├── .DS_Store ├── .gitignore ├── 2023-Labs ├── .DS_Store ├── .gitignore ├── Lab1 │ ├── answer_key │ │ ├── data.tf │ │ ├── provider.tf │ │ └── vpc.tf │ ├── data.tf │ ├── provider.tf │ └── vpc.tf ├── Lab2 │ ├── answer_key │ │ ├── main.tf │ │ ├── provider.tf │ │ ├── sg.tf │ │ └── vpc.tf │ ├── main.tf │ ├── provider.tf │ ├── sg.tf │ └── vpc.tf ├── Lab3 │ ├── answer_key │ │ ├── main.tf │ │ ├── output.tf │ │ ├── provider.tf │ │ ├── sg.tf │ │ ├── vars.tf │ │ └── vpc.tf │ ├── main.tf │ ├── output.tf │ ├── provider.tf │ ├── sg.tf │ ├── vars.tf │ └── vpc.tf ├── Lab4 │ ├── answer_key │ │ ├── main.tf │ │ ├── output.tf │ │ ├── provider.tf │ │ ├── sg.tf │ │ ├── templates │ │ │ └── cloud-init-c2.tmpl │ │ ├── vars.tf │ │ └── vpc.tf │ ├── main.tf │ ├── output.tf │ ├── provider.tf │ ├── sg.tf │ ├── templates │ │ └── cloud-init-c2.tmpl │ ├── vars.tf │ └── vpc.tf ├── Lab5 │ ├── answer_key │ │ ├── locals.tf │ │ ├── main.tf │ │ ├── modules │ │ │ └── servers │ │ │ │ ├── main.tf │ │ │ │ ├── output.tf │ │ │ │ ├── templates │ │ │ │ └── cloud-init.tmpl │ │ │ │ └── vars.tf │ │ ├── output.tf │ │ ├── provider.tf │ │ ├── sg.tf │ │ ├── templates │ │ │ └── cloud-init.tmpl │ │ ├── vars.tf │ │ └── vpc.tf │ ├── main.tf │ ├── output.tf │ ├── provider.tf │ ├── sg.tf │ ├── templates │ │ └── cloud-init-c2.tmpl │ ├── vars.tf │ └── vpc.tf └── TCS Slides │ └── Texas CyberSummit - 2023 Building Infra.pdf ├── LICENSE ├── RTV_2024 ├── .gitignore ├── LAB1-Readme.md ├── LAB2-Readme.md ├── LAB3-Readme.md ├── Lab1 │ ├── .terraform.lock.hcl │ ├── provider.tf │ ├── quickstart.sh │ └── vpc.tf ├── Lab2 │ ├── ec2.tf │ ├── ec2.tmpl copy.example │ ├── provider.tf │ ├── vars.tf │ └── vpc.tf └── Lab3 │ ├── data.tf │ ├── ec2.tf │ ├── modules │ ├── networking │ │ ├── main.tf │ │ ├── output.tf │ │ └── vars.tf │ └── servers │ │ ├── main.tf │ │ ├── output.tf │ │ ├── templates │ │ └── ec2.tmpl │ │ └── vars.tf │ ├── provider.tf │ ├── sg.tf │ ├── vars.tf │ └── vpc.tf └── modules └── networking ├── main.tf ├── output.tf └── vars.tf /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Neuvik/neuvik-terraform-workshop/3050b0a31b4bad37c6c08d6686edaed9e7e73923/.DS_Store -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Local .terraform directories 2 | **/.terraform/* 3 | 4 | # .tfstate files 5 | *.tfstate 6 | *.tfstate.* 7 | 8 | # Crash log files 9 | crash.log 10 | crash.*.log 11 | 12 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as 13 | # password, private keys, and other secrets. These should not be part of version 14 | # control as they are data points which are potentially sensitive and subject 15 | # to change depending on the environment. 16 | *.tfvars 17 | *.tfvars.json 18 | 19 | # Ignore override files as they are usually used to override resources locally and so 20 | # are not checked in 21 | override.tf 22 | override.tf.json 23 | *_override.tf 24 | *_override.tf.json 25 | 26 | # Include override files you do wish to add to version control using negated pattern 27 | # !example_override.tf 28 | 29 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 30 | # example: *tfplan* 31 | 32 | # Ignore CLI configuration files 33 | .terraformrc 34 | terraform.rc 35 | 36 | Workshop 37 | 38 | .infracost 39 | .infracost/* -------------------------------------------------------------------------------- /2023-Labs/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Neuvik/neuvik-terraform-workshop/3050b0a31b4bad37c6c08d6686edaed9e7e73923/2023-Labs/.DS_Store -------------------------------------------------------------------------------- /2023-Labs/.gitignore: -------------------------------------------------------------------------------- 1 | # Local .terraform directories 2 | **/.terraform/* 3 | 4 | # .tfstate files 5 | *.tfstate 6 | *.tfstate.* 7 | 8 | # Crash log files 9 | crash.log 10 | crash.*.log 11 | 12 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as 13 | # password, private keys, and other secrets. These should not be part of version 14 | # control as they are data points which are potentially sensitive and subject 15 | # to change depending on the environment. 16 | *.tfvars 17 | *.tfvars.json 18 | 19 | # Ignore override files as they are usually used to override resources locally and so 20 | # are not checked in 21 | override.tf 22 | override.tf.json 23 | *_override.tf 24 | *_override.tf.json 25 | 26 | # Include override files you do wish to add to version control using negated pattern 27 | # !example_override.tf 28 | 29 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 30 | # example: *tfplan* 31 | 32 | # Ignore CLI configuration files 33 | .terraformrc 34 | terraform.rc 35 | 36 | Workshop 37 | 38 | .infracost 39 | .infracost/* -------------------------------------------------------------------------------- /2023-Labs/Lab1/answer_key/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_availability_zones" "available" { 2 | state = "available" 3 | } -------------------------------------------------------------------------------- /2023-Labs/Lab1/answer_key/provider.tf: -------------------------------------------------------------------------------- 1 | # First, require the latest providers for terraform, specifically we are going use the AWS Provider, and a version of 5.0 or greater. 2 | terraform { 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = "~> 5.0" 7 | } 8 | } 9 | } 10 | 11 | # Configure this provider to the region that you are assigned. By default we are going ot use us-east-1. 12 | provider "aws" { 13 | region = "us-east-1" 14 | } -------------------------------------------------------------------------------- /2023-Labs/Lab1/answer_key/vpc.tf: -------------------------------------------------------------------------------- 1 | # This is required to exposed things behind a NAT 2 | resource "aws_eip" "nat_eip" { 3 | domain = "vpc" 4 | } 5 | 6 | # Remember to use the VPC from the pervious exercise. 7 | resource "aws_vpc" "main" { 8 | cidr_block = "10.0.0.0/16" 9 | enable_dns_hostnames = true 10 | enable_dns_support = true 11 | } 12 | 13 | # Remember to use the Subnet from the pervious exercise. 14 | resource "aws_subnet" "main" { 15 | vpc_id = aws_vpc.main.id 16 | cidr_block = "10.0.1.0/24" 17 | availability_zone = data.aws_availability_zones.available.names[0] 18 | } 19 | 20 | # Create an Internet Gateway, and attach it to the VPC. 21 | resource "aws_internet_gateway" "gw" { 22 | vpc_id = aws_vpc.main.id 23 | } 24 | 25 | # Any resources that are not needing to be on the internet but do need to access the internet need the system below. 26 | resource "aws_nat_gateway" "nat" { 27 | allocation_id = aws_eip.nat_eip.id 28 | subnet_id = aws_subnet.main.id 29 | } 30 | 31 | # This is the routing table for the public subnet, this builds the objects and the routes are added to this table. 32 | resource "aws_route_table" "public" { 33 | vpc_id = aws_vpc.main.id 34 | } 35 | 36 | # This is the route table for the public subnet, only a generic 0/0 route is needed. 37 | resource "aws_route" "public" { 38 | route_table_id = aws_route_table.public.id 39 | destination_cidr_block = "0.0.0.0/0" 40 | gateway_id = aws_internet_gateway.gw.id 41 | } 42 | 43 | #This associates the route table to the subnet 44 | resource "aws_route_table_association" "public" { 45 | subnet_id = aws_subnet.main.id 46 | route_table_id = aws_route_table.public.id 47 | } -------------------------------------------------------------------------------- /2023-Labs/Lab1/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_availability_zones" "available" { 2 | } -------------------------------------------------------------------------------- /2023-Labs/Lab1/provider.tf: -------------------------------------------------------------------------------- 1 | # First, require the latest providers for terraform, specifically we are going use the AWS Provider, and a version of 5.0 or greater. 2 | terraform { 3 | required_providers { } 4 | } 5 | 6 | # Configure this provider to the region that you are assigned. By default we are going ot use us-east-1. 7 | provider "aws" { } -------------------------------------------------------------------------------- /2023-Labs/Lab1/vpc.tf: -------------------------------------------------------------------------------- 1 | # Create a VPC, use 10.0.0.0/8 as the main resource for the PVPC 2 | resource "aws_vpc" "main" { } 3 | 4 | # Create a subnet for resources. 5 | resource "aws_subnet" "main" { } 6 | 7 | # Create an aws_eip resource that includes the VPC, this will be for the nat gateway. 8 | resource "aws_eip" "nat_eip" { } 9 | 10 | # Create an Internet Gateway, and attach it to the VPC. 11 | resource "aws_internet_gateway" "gw" { } 12 | 13 | # Create a NAT Gateway, and attach it to the subnet. 14 | resource "aws_nat_gateway" "nat" { } 15 | 16 | # Create a routing table for the public subnet. 17 | resource "aws_route_table" "public" { } 18 | 19 | # Add a route for the default subnet of 0.0.0.0 and attach it to the routing table 20 | resource "aws_route" "public" { } 21 | 22 | # Associate the routing table to the subnet 23 | resource "aws_route_table_association" "public" { } 24 | -------------------------------------------------------------------------------- /2023-Labs/Lab2/answer_key/main.tf: -------------------------------------------------------------------------------- 1 | # Use a Data element to find the correct AMI for Ubuntu 20.04. This will be used in the resource. 2 | data "aws_ami" "ubuntu" { 3 | most_recent = true 4 | 5 | filter { 6 | name = "name" 7 | values = ["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*"] 8 | } 9 | 10 | filter { 11 | name = "virtualization-type" 12 | values = ["hvm"] 13 | } 14 | 15 | owners = ["099720109477"] # Canonical 16 | } 17 | 18 | # Create a resource for the first instance, use the AMI from the data element. 19 | resource "aws_instance" "c2" { 20 | ami = data.aws_ami.ubuntu.id 21 | instance_type = "t3.nano" 22 | 23 | tags = { 24 | Name = "c2_server" 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /2023-Labs/Lab2/answer_key/provider.tf: -------------------------------------------------------------------------------- 1 | # First, require the latest providers for terraform, specifically we are going use the AWS Provider, and a version of 5.0 or greater. 2 | terraform { 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = "~> 5.0" 7 | } 8 | } 9 | } 10 | 11 | # Configure this provider to the region that you are assigned. By default we are going ot use us-east-1. 12 | provider "aws" { 13 | region = "us-east-1" 14 | } -------------------------------------------------------------------------------- /2023-Labs/Lab2/answer_key/sg.tf: -------------------------------------------------------------------------------- 1 | resource "aws_security_group" "default" { 2 | name = "main-allow-defaults" 3 | description = "Allow SSH inbound traffic, from servers within the environment." 4 | vpc_id = aws_vpc.main.id 5 | 6 | ingress { 7 | from_port = 22 8 | to_port = 22 9 | protocol = "tcp" 10 | cidr_blocks = [ 11 | "0.0.0.0/0" 12 | ] 13 | } 14 | 15 | egress { 16 | from_port = 0 17 | to_port = 0 18 | protocol = "-1" 19 | cidr_blocks = [ 20 | "0.0.0.0/0" 21 | ] 22 | } 23 | } -------------------------------------------------------------------------------- /2023-Labs/Lab2/answer_key/vpc.tf: -------------------------------------------------------------------------------- 1 | # This is required to exposed things behind a NAT 2 | resource "aws_eip" "nat_eip" { 3 | domain = "vpc" 4 | } 5 | 6 | # Remember to use the VPC from the pervious exercise. 7 | resource "aws_vpc" "main" { 8 | cidr_block = "10.0.0.0/16" 9 | enable_dns_hostnames = true 10 | enable_dns_support = true 11 | } 12 | 13 | # Remember to use the Subnet from the pervious exercise. 14 | resource "aws_subnet" "main" { 15 | vpc_id = aws_vpc.main.id 16 | cidr_block = "10.0.1.0/24" 17 | availability_zone = data.aws_availability_zones.available.names[0] 18 | } 19 | 20 | # Create an Internet Gateway, and attach it to the VPC. 21 | resource "aws_internet_gateway" "gw" { 22 | vpc_id = aws_vpc.main.id 23 | } 24 | 25 | # Any resources that are not needing to be on the internet but do need to access the internet need the system below. 26 | resource "aws_nat_gateway" "nat" { 27 | allocation_id = aws_eip.nat_eip.id 28 | subnet_id = aws_subnet.main.id 29 | } 30 | 31 | # This is the routing table for the public subnet, this builds the objects and the routes are added to this table. 32 | resource "aws_route_table" "public" { 33 | vpc_id = aws_vpc.main.id 34 | } 35 | 36 | # This is the route table for the public subnet, only a generic 0/0 route is needed. 37 | resource "aws_route" "public" { 38 | route_table_id = aws_route_table.public.id 39 | destination_cidr_block = "0.0.0.0/0" 40 | gateway_id = aws_internet_gateway.gw.id 41 | } 42 | 43 | #This associates the route table to the subnet 44 | resource "aws_route_table_association" "public" { 45 | subnet_id = aws_subnet.main.id 46 | route_table_id = aws_route_table.public.id 47 | } -------------------------------------------------------------------------------- /2023-Labs/Lab2/main.tf: -------------------------------------------------------------------------------- 1 | # Use a Data element to find the correct AMI for Ubuntu 20.04. This will be used in the resource. 2 | # Use the Ubuntu 20.04 AMD64 Server AMI from the Canonical Account.check "name". 3 | data "aws_ami" "ubuntu" { } 4 | 5 | # Create a resource for the first instance, use the AMI from the data element. 6 | # The instance Type to t3.nano 7 | # Give it a name of c2_server 8 | resource "aws_instance" "c2" { } 9 | 10 | -------------------------------------------------------------------------------- /2023-Labs/Lab2/provider.tf: -------------------------------------------------------------------------------- 1 | # First, require the latest providers for terraform, specifically we are going use the AWS Provider, and a version of 5.0 or greater. 2 | terraform { 3 | required_providers { } 4 | } 5 | 6 | # Configure this provider to the region that you are assigned. By default we are going ot use us-east-1. 7 | provider "aws" { } -------------------------------------------------------------------------------- /2023-Labs/Lab2/sg.tf: -------------------------------------------------------------------------------- 1 | # Create an AWS Security Group make sure the security group allows ingress port 22 and egress all ports. 2 | 3 | resource "aws_security_group" "default" { } -------------------------------------------------------------------------------- /2023-Labs/Lab2/vpc.tf: -------------------------------------------------------------------------------- 1 | # Create a VPC, use 10.0.0.0/8 as the main resource for the PVPC 2 | resource "aws_vpc" "main" { } 3 | 4 | # Create a subnet for resources. 5 | resource "aws_subnet" "main" { } 6 | 7 | # Create an aws_eip resource that includes the VPC, this will be for the nat gateway. 8 | resource "aws_eip" "nat_eip" { } 9 | 10 | # Create an Internet Gateway, and attach it to the VPC. 11 | resource "aws_internet_gateway" "gw" { } 12 | 13 | # Create a NAT Gateway, and attach it to the subnet. 14 | resource "aws_nat_gateway" "nat" { } 15 | 16 | # Create a routing table for the public subnet. 17 | resource "aws_route_table" "public" { } 18 | 19 | # Add a route for the default subnet of 0.0.0.0 and attach it to the routing table 20 | resource "aws_route" "public" { } 21 | 22 | # Associate the routing table to the subnet 23 | resource "aws_route_table_association" "public" { } 24 | -------------------------------------------------------------------------------- /2023-Labs/Lab3/answer_key/main.tf: -------------------------------------------------------------------------------- 1 | # Use a Data element to find the correct AMI for Ubuntu 20.04. This will be used in the resource. 2 | # Use the Ubuntu 20.04 AMD64 Server AMI from the Canonical Account.check "name". 3 | data "aws_ami" "ubuntu" { 4 | most_recent = true 5 | 6 | filter { 7 | name = "name" 8 | values = ["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*"] 9 | } 10 | 11 | filter { 12 | name = "virtualization-type" 13 | values = ["hvm"] 14 | } 15 | 16 | owners = ["099720109477"] # Canonical 17 | } 18 | 19 | # Create a resource for the first instance, use the AMI from the data element. 20 | # The instance Type to t3.nano 21 | # Give it a name of c2_server 22 | resource "aws_instance" "c2" { 23 | ami = data.aws_ami.ubuntu.id # This is the ID number of the Data element generated above 24 | instance_type = "t3.nano" # This is a small instance type 25 | 26 | subnet_id = aws_subnet.main.id # This is the aws subnet ID for the public subnet, meant to be a DMZ this is how a "Public IP" is attached to an instance. 27 | 28 | root_block_device { 29 | volume_size = 40 # This a 40GB instance size, it can be larger 30 | } 31 | 32 | tags = { 33 | Name = "c2_server" 34 | } 35 | } 36 | 37 | resource "aws_eip" "c2" { 38 | instance = aws_instance.c2.id 39 | domain = "vpc" 40 | } -------------------------------------------------------------------------------- /2023-Labs/Lab3/answer_key/output.tf: -------------------------------------------------------------------------------- 1 | output "c2_public_ip" { 2 | value = "${aws_instance.c2.public_ip}" 3 | } 4 | 5 | output "c2_private_ip" { 6 | value = "${aws_instance.c2.private_ip}" 7 | } -------------------------------------------------------------------------------- /2023-Labs/Lab3/answer_key/provider.tf: -------------------------------------------------------------------------------- 1 | # First, require the latest providers for terraform, specifically we are going use the AWS Provider, and a version of 5.0 or greater. 2 | terraform { 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = "~> 5.0" 7 | } 8 | } 9 | } 10 | 11 | # Configure this provider to the region that you are assigned. By default we are going ot use us-east-1. 12 | provider "aws" { 13 | region = "us-east-1" 14 | } -------------------------------------------------------------------------------- /2023-Labs/Lab3/answer_key/sg.tf: -------------------------------------------------------------------------------- 1 | resource "aws_security_group" "default" { 2 | name = "main-allow-defaults" 3 | description = "Allow SSH inbound traffic, from servers within the environment." 4 | vpc_id = aws_vpc.main.id 5 | 6 | ingress { 7 | from_port = 22 8 | to_port = 22 9 | protocol = "tcp" 10 | cidr_blocks = [ 11 | "0.0.0.0/0" 12 | ] 13 | } 14 | 15 | egress { 16 | from_port = 0 17 | to_port = 0 18 | protocol = "-1" 19 | cidr_blocks = [ 20 | "0.0.0.0/0" 21 | ] 22 | } 23 | } -------------------------------------------------------------------------------- /2023-Labs/Lab3/answer_key/vars.tf: -------------------------------------------------------------------------------- 1 | variable "users_list" { 2 | description = "In here you need to use your own SSH private key, this is just an example." 3 | type = map(any) 4 | 5 | default = { 6 | "moses" = "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPyOdTJ3mXw4X0XRSGxlrllvUw1chX4uk1FPerUcJtEo+RSKR1OIRFoXwohk3D+7jfY+6FS6qd+QfKYWg0A7HqU= moses", 7 | } 8 | } 9 | 10 | variable "c2_hostname" { 11 | description = "This is a list that is fed to build local users in the VMs. This uses the username / ssh key pairing to fill out the cloud-init templates" 12 | type = string 13 | default = "c2server" 14 | } 15 | -------------------------------------------------------------------------------- /2023-Labs/Lab3/answer_key/vpc.tf: -------------------------------------------------------------------------------- 1 | # This is required to exposed things behind a NAT 2 | resource "aws_eip" "nat_eip" { 3 | domain = "vpc" 4 | } 5 | 6 | # Remember to use the VPC from the pervious exercise. 7 | resource "aws_vpc" "main" { 8 | cidr_block = "10.0.0.0/16" 9 | enable_dns_hostnames = true 10 | enable_dns_support = true 11 | } 12 | 13 | # Remember to use the Subnet from the pervious exercise. 14 | resource "aws_subnet" "main" { 15 | vpc_id = aws_vpc.main.id 16 | cidr_block = "10.0.1.0/24" 17 | availability_zone = data.aws_availability_zones.available.names[0] 18 | } 19 | 20 | # Create an Internet Gateway, and attach it to the VPC. 21 | resource "aws_internet_gateway" "gw" { 22 | vpc_id = aws_vpc.main.id 23 | } 24 | 25 | # Any resources that are not needing to be on the internet but do need to access the internet need the system below. 26 | resource "aws_nat_gateway" "nat" { 27 | allocation_id = aws_eip.nat_eip.id 28 | subnet_id = aws_subnet.main.id 29 | } 30 | 31 | # This is the routing table for the public subnet, this builds the objects and the routes are added to this table. 32 | resource "aws_route_table" "public" { 33 | vpc_id = aws_vpc.main.id 34 | } 35 | 36 | # This is the route table for the public subnet, only a generic 0/0 route is needed. 37 | resource "aws_route" "public" { 38 | route_table_id = aws_route_table.public.id 39 | destination_cidr_block = "0.0.0.0/0" 40 | gateway_id = aws_internet_gateway.gw.id 41 | } 42 | 43 | #This associates the route table to the subnet 44 | resource "aws_route_table_association" "public" { 45 | subnet_id = aws_subnet.main.id 46 | route_table_id = aws_route_table.public.id 47 | } -------------------------------------------------------------------------------- /2023-Labs/Lab3/main.tf: -------------------------------------------------------------------------------- 1 | # Use a Data element to find the correct AMI for Ubuntu 20.04. This will be used in the resource. 2 | # Use the Ubuntu 20.04 AMD64 Server AMI from the Canonical Account.check "name". 3 | data "aws_ami" "ubuntu" { } 4 | 5 | # Create a resource for the first instance, use the AMI from the data element. 6 | # The instance Type to t3.nano 7 | # Give it a name of c2_server 8 | resource "aws_instance" "c2" { } 9 | 10 | # Provide an EIP for the C2 Server 11 | resource "aws_eip" "c2" { } 12 | -------------------------------------------------------------------------------- /2023-Labs/Lab3/output.tf: -------------------------------------------------------------------------------- 1 | # Create an output to output the following: 2 | 3 | # The ec2 instance Public IP (if it exists) 4 | output "c2_public_ip" {} 5 | 6 | # The ec2 instance Private IP (if it exists) 7 | output "c2_private_ip" { } -------------------------------------------------------------------------------- /2023-Labs/Lab3/provider.tf: -------------------------------------------------------------------------------- 1 | # First, require the latest providers for terraform, specifically we are going use the AWS Provider, and a version of 5.0 or greater. 2 | terraform { 3 | required_providers { 4 | } 5 | } 6 | 7 | # Configure this provider to the region that you are assigned. By default we are going ot use us-east-1. 8 | provider "aws" { 9 | } -------------------------------------------------------------------------------- /2023-Labs/Lab3/sg.tf: -------------------------------------------------------------------------------- 1 | # Create an AWS Security Group make sure the security group allows ingress port 22 and egress all ports. 2 | 3 | resource "aws_security_group" "default" { } -------------------------------------------------------------------------------- /2023-Labs/Lab3/vars.tf: -------------------------------------------------------------------------------- 1 | variable "users_list" { } 2 | 3 | variable "c2_hostname" { } 4 | -------------------------------------------------------------------------------- /2023-Labs/Lab3/vpc.tf: -------------------------------------------------------------------------------- 1 | # Remember to use the VPC from the pervious exercise. 2 | resource "aws_vpc" "main" { 3 | } 4 | 5 | # Remember to use the Subnet from the pervious exercise. 6 | resource "aws_subnet" "main" { 7 | } -------------------------------------------------------------------------------- /2023-Labs/Lab4/answer_key/main.tf: -------------------------------------------------------------------------------- 1 | # Use a Data element to find the correct AMI for Ubuntu 20.04. This will be used in the resource. 2 | # Use the Ubuntu 20.04 AMD64 Server AMI from the Canonical Account.check "name". 3 | data "aws_availability_zones" "available" { 4 | state = "available" 5 | } 6 | 7 | data "aws_ami" "ubuntu" { 8 | most_recent = true 9 | 10 | filter { 11 | name = "name" 12 | values = ["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*"] 13 | } 14 | 15 | filter { 16 | name = "virtualization-type" 17 | values = ["hvm"] 18 | } 19 | 20 | owners = ["099720109477"] # Canonical 21 | } 22 | 23 | # Create a file from a template to be used for userdata, specifically cloud-init 24 | # This is the Template File that can be suitable for building c2. 25 | resource "local_file" "cloud_init_c2_template" { 26 | content = templatefile("${path.module}/templates/cloud-init-c2.tmpl", { 27 | users = var.users_list 28 | hostname = var.c2_hostname 29 | }) 30 | 31 | filename = "${path.module}/files/cloud-init-c2.yaml" 32 | } 33 | 34 | data "local_file" "cloud_init_c2_yaml" { 35 | filename = local_file.cloud_init_c2_template.filename 36 | depends_on = [local_file.cloud_init_c2_template] 37 | } 38 | 39 | # Create a resource for the first instance, use the AMI from the data element. 40 | # The instance Type to t3.nano 41 | # Give it a name of c2_server 42 | resource "aws_instance" "c2" { 43 | ami = data.aws_ami.ubuntu.id # This is the ID number of the Data element generated above 44 | instance_type = "t3.micro" # This is a small instance type 45 | 46 | subnet_id = aws_subnet.main.id # This is the aws subnet ID for the public subnet, meant to be a DMZ this is how a "Public IP" is attached to an instance. 47 | user_data = data.local_file.cloud_init_c2_yaml.content 48 | 49 | vpc_security_group_ids = [ 50 | aws_security_group.default.id 51 | ] 52 | 53 | root_block_device { 54 | volume_size = 40 # This a 40GB instance size, it can be larger 55 | } 56 | 57 | tags = { 58 | Name = "c2_server" 59 | } 60 | } 61 | 62 | resource "aws_eip" "c2" { 63 | instance = aws_instance.c2.id 64 | domain = "vpc" 65 | } 66 | -------------------------------------------------------------------------------- /2023-Labs/Lab4/answer_key/output.tf: -------------------------------------------------------------------------------- 1 | output "c2_public_ip" { 2 | value = aws_eip.c2.public_ip 3 | } 4 | 5 | output "c2_private_ip" { 6 | value = aws_instance.c2.private_ip 7 | } -------------------------------------------------------------------------------- /2023-Labs/Lab4/answer_key/provider.tf: -------------------------------------------------------------------------------- 1 | # First, require the latest providers for terraform, specifically we are going use the AWS Provider, and a version of 5.0 or greater. 2 | terraform { 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = "~> 5.0" 7 | } 8 | local = { 9 | version = "~> 2.4.0" 10 | } 11 | } 12 | } 13 | 14 | # Configure this provider to the region that you are assigned. By default we are going ot use us-east-1. 15 | provider "aws" { 16 | region = "us-west-1" 17 | } -------------------------------------------------------------------------------- /2023-Labs/Lab4/answer_key/sg.tf: -------------------------------------------------------------------------------- 1 | resource "aws_security_group" "default" { 2 | name = "main-allow-defaults" 3 | description = "Allow SSH inbound traffic, from servers within the environment." 4 | vpc_id = aws_vpc.main.id 5 | 6 | ingress { 7 | from_port = 22 8 | to_port = 22 9 | protocol = "tcp" 10 | cidr_blocks = [ 11 | "0.0.0.0/0" 12 | ] 13 | } 14 | 15 | egress { 16 | from_port = 0 17 | to_port = 0 18 | protocol = "-1" 19 | cidr_blocks = [ 20 | "0.0.0.0/0" 21 | ] 22 | } 23 | } -------------------------------------------------------------------------------- /2023-Labs/Lab4/answer_key/templates/cloud-init-c2.tmpl: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | # Adds the users to the system 3 | 4 | fqdn: ${hostname} 5 | write_files: 6 | - content: | 7 | #!/bin/bash 8 | export DEBIAN_FRONTEND=noninteractive 9 | apt-get update && \ 10 | apt-get -o Dpkg::Options::="--force-confold" upgrade -q -y --allow-remove-essential --allow-downgrades && \ 11 | apt-get -o Dpkg::Options::="--force-confold" dist-upgrade -q -y --allow-remove-essential --allow-downgrades && \ 12 | apt autoremove -y 13 | sudo reboot 14 | path: /root/update.sh 15 | permissions: '0700' 16 | 17 | - content: | 18 | 0 5 * * 1 root /root/updates.sh >/dev/null 2>&1 19 | path: /etc/cron.d/updates 20 | permissions: '0644' 21 | 22 | users: 23 | - default 24 | %{ for user_key, user_value in users ~} 25 | - name: ${user_key} 26 | lock_passwd: true 27 | shell: /bin/bash 28 | ssh_authorized_keys: 29 | - ${user_value} 30 | sudo: ALL=(ALL) NOPASSWD:ALL 31 | %{ endfor ~} 32 | 33 | # These packaages are just placeholders 34 | packages: 35 | - apache2 36 | - apt-transport-https 37 | - ca-certificates 38 | - curl 39 | - gnupg-agent 40 | - p7zip-full 41 | - screen 42 | - software-properties-common 43 | - unattended-upgrades 44 | - wget 45 | 46 | runcmd: 47 | - hostnamectl set-hostname ${hostname} 48 | - sed -i 's/127.0.0.1 localhost/127.0.0.1 localhost ${hostname}/g' /etc/hosts 49 | 50 | package_update: true 51 | package_upgrade: true 52 | package_reboot_if_required: true 53 | 54 | power_state: 55 | mode: reboot 56 | delay: 1 57 | message: Rebooting after installation 58 | -------------------------------------------------------------------------------- /2023-Labs/Lab4/answer_key/vars.tf: -------------------------------------------------------------------------------- 1 | variable "users_list" { 2 | description = "In here you need to use your own SSH private key, this is just an example." 3 | type = map(any) 4 | 5 | default = { 6 | "moses" = "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPyOdTJ3mXw4X0XRSGxlrllvUw1chX4uk1FPerUcJtEo+RSKR1OIRFoXwohk3D+7jfY+6FS6qd+QfKYWg0A7HqU= moses", 7 | } 8 | } 9 | 10 | variable "c2_hostname" { 11 | description = "This is a list that is fed to build local users in the VMs. This uses the username / ssh key pairing to fill out the cloud-init templates" 12 | type = string 13 | default = "c2server" 14 | } 15 | -------------------------------------------------------------------------------- /2023-Labs/Lab4/answer_key/vpc.tf: -------------------------------------------------------------------------------- 1 | # This is required to exposed things behind a NAT 2 | resource "aws_eip" "nat_eip" { 3 | domain = "vpc" 4 | } 5 | 6 | # Remember to use the VPC from the pervious exercise. 7 | resource "aws_vpc" "main" { 8 | cidr_block = "10.0.0.0/16" 9 | enable_dns_hostnames = true 10 | enable_dns_support = true 11 | } 12 | 13 | # Remember to use the Subnet from the pervious exercise. 14 | resource "aws_subnet" "main" { 15 | vpc_id = aws_vpc.main.id 16 | cidr_block = "10.0.1.0/24" 17 | availability_zone = data.aws_availability_zones.available.names[0] 18 | } 19 | 20 | # Create an Internet Gateway, and attach it to the VPC. 21 | resource "aws_internet_gateway" "gw" { 22 | vpc_id = aws_vpc.main.id 23 | } 24 | 25 | # Any resources that are not needing to be on the internet but do need to access the internet need the system below. 26 | resource "aws_nat_gateway" "nat" { 27 | allocation_id = aws_eip.nat_eip.id 28 | subnet_id = aws_subnet.main.id 29 | } 30 | 31 | # This is the routing table for the public subnet, this builds the objects and the routes are added to this table. 32 | resource "aws_route_table" "public" { 33 | vpc_id = aws_vpc.main.id 34 | } 35 | 36 | # This is the route table for the public subnet, only a generic 0/0 route is needed. 37 | resource "aws_route" "public" { 38 | route_table_id = aws_route_table.public.id 39 | destination_cidr_block = "0.0.0.0/0" 40 | gateway_id = aws_internet_gateway.gw.id 41 | } 42 | 43 | #This associates the route table to the subnet 44 | resource "aws_route_table_association" "public" { 45 | subnet_id = aws_subnet.main.id 46 | route_table_id = aws_route_table.public.id 47 | } -------------------------------------------------------------------------------- /2023-Labs/Lab4/main.tf: -------------------------------------------------------------------------------- 1 | # Use a Data element to find the correct AMI for Ubuntu 20.04. This will be used in the resource. 2 | # Use the Ubuntu 20.04 AMD64 Server AMI from the Canonical Account.check "name". 3 | data "aws_ami" "ubuntu" { } 4 | 5 | # Create a resource for the first instance, use the AMI from the data element. 6 | # The instance Type to t3.nano 7 | # Give it a name of c2_server 8 | # Don't forget to include user_data here which is going to be a data element. 9 | resource "aws_instance" "c2" { } 10 | 11 | # Provide an EIP for the C2 Server 12 | resource "aws_eip" "c2" { } 13 | 14 | # Create a file from a template to be used for userdata, specifically cloud-init 15 | # This is the Template File that can be suitable for building c2. 16 | # Note there are two variables listed here: 17 | # 18 | # hostname 19 | # users 20 | # 21 | # Hostname is going to come from c2_hostname 22 | # Users is going to be a mapped list from users 23 | # 24 | 25 | resource "local_file" "cloud_init_c2_template" { } 26 | 27 | # This will leverage teh dat afile above to create the cloud-init yaml file. 28 | data "local_file" "cloud_init_c2_yaml" { } 29 | -------------------------------------------------------------------------------- /2023-Labs/Lab4/output.tf: -------------------------------------------------------------------------------- 1 | # Create an output to output the following: 2 | # The ec2 instance Public IP (if it exists) 3 | # The ec2 instance Private IP (if it exists) 4 | 5 | -------------------------------------------------------------------------------- /2023-Labs/Lab4/provider.tf: -------------------------------------------------------------------------------- 1 | # First, require the latest providers for terraform, specifically we are going use the AWS Provider, and a version of 5.0 or greater. 2 | terraform { 3 | required_providers { 4 | } 5 | } 6 | 7 | # Configure this provider to the region that you are assigned. By default we are going ot use us-east-1. 8 | provider "aws" { 9 | } -------------------------------------------------------------------------------- /2023-Labs/Lab4/sg.tf: -------------------------------------------------------------------------------- 1 | # Create an AWS Security Group make sure the security group allows ingress port 22 and egress all ports. 2 | 3 | resource "aws_security_group" "default" { } -------------------------------------------------------------------------------- /2023-Labs/Lab4/templates/cloud-init-c2.tmpl: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | # Adds the users to the system 3 | 4 | fqdn: ${hostname} 5 | write_files: 6 | - content: | 7 | #!/bin/bash 8 | export DEBIAN_FRONTEND=noninteractive 9 | apt-get update && \ 10 | apt-get -o Dpkg::Options::="--force-confold" upgrade -q -y --allow-remove-essential --allow-downgrades && \ 11 | apt-get -o Dpkg::Options::="--force-confold" dist-upgrade -q -y --allow-remove-essential --allow-downgrades && \ 12 | apt autoremove -y 13 | sudo reboot 14 | path: /root/update.sh 15 | permissions: '0700' 16 | 17 | - content: | 18 | 0 5 * * 1 root /root/updates.sh >/dev/null 2>&1 19 | path: /etc/cron.d/updates 20 | permissions: '0644' 21 | 22 | users: 23 | - default 24 | %{ for user_key, user_value in users ~} 25 | - name: ${user_key} 26 | lock_passwd: true 27 | shell: /bin/bash 28 | ssh_authorized_keys: 29 | - ${user_value} 30 | sudo: ALL=(ALL) NOPASSWD:ALL 31 | %{ endfor ~} 32 | 33 | # These packaages are just placeholders 34 | packages: 35 | - apache2 36 | - apt-transport-https 37 | - ca-certificates 38 | - curl 39 | - gnupg-agent 40 | - p7zip-full 41 | - screen 42 | - software-properties-common 43 | - unattended-upgrades 44 | - wget 45 | 46 | runcmd: 47 | - hostnamectl set-hostname ${hostname} 48 | - sed -i 's/127.0.0.1 localhost/127.0.0.1 localhost ${hostname}/g' /etc/hosts 49 | 50 | package_update: true 51 | package_upgrade: true 52 | package_reboot_if_required: true 53 | 54 | power_state: 55 | mode: reboot 56 | delay: 1 57 | message: Rebooting after installation 58 | -------------------------------------------------------------------------------- /2023-Labs/Lab4/vars.tf: -------------------------------------------------------------------------------- 1 | variable "users_list" { } 2 | 3 | variable "c2_hostname" { } 4 | -------------------------------------------------------------------------------- /2023-Labs/Lab4/vpc.tf: -------------------------------------------------------------------------------- 1 | # Remember to use the VPC from the pervious exercise. 2 | resource "aws_vpc" "main" { 3 | } 4 | 5 | # Remember to use the Subnet from the pervious exercise. 6 | resource "aws_subnet" "main" { 7 | } -------------------------------------------------------------------------------- /2023-Labs/Lab5/answer_key/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | c2_server_name = "c2_server" 3 | c2_cloud_init_yaml_name = "c2_cloud_init.yaml" 4 | redirector_server_name = "redirector_server" 5 | redirector_cloud_init_yaml_name = "c2_cloud_init.yaml" 6 | } -------------------------------------------------------------------------------- /2023-Labs/Lab5/answer_key/main.tf: -------------------------------------------------------------------------------- 1 | data "aws_availability_zones" "available" { 2 | state = "available" 3 | } 4 | 5 | # Create a resource for the first instance, use the AMI from the data element. 6 | # The instance Type to t3.nano 7 | # Give it a name of c2_server 8 | module "c2_server" { 9 | source = "./modules/servers" 10 | 11 | server_name = local.c2_server_name 12 | main_subnet = aws_subnet.main.id 13 | security_group_id = aws_security_group.default.id 14 | cloud_init_yaml_name = local.c2_cloud_init_yaml_name 15 | users_list = var.users_list 16 | } 17 | 18 | module "redirector_server" { 19 | source = "./modules/servers" 20 | 21 | server_name = local.redirector_server_name 22 | main_subnet = aws_subnet.main.id 23 | security_group_id = aws_security_group.default.id 24 | cloud_init_yaml_name = local.redirector_cloud_init_yaml_name 25 | users_list = var.users_list 26 | } -------------------------------------------------------------------------------- /2023-Labs/Lab5/answer_key/modules/servers/main.tf: -------------------------------------------------------------------------------- 1 | # Use a Data element to find the correct AMI for Ubuntu 20.04. This will be used in the resource. 2 | # Use the Ubuntu 20.04 AMD64 Server AMI from the Canonical Account.check "name". 3 | data "aws_availability_zones" "available" { 4 | state = "available" 5 | } 6 | 7 | data "aws_ami" "ubuntu" { 8 | most_recent = true 9 | 10 | filter { 11 | name = "name" 12 | values = ["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*"] 13 | } 14 | 15 | filter { 16 | name = "virtualization-type" 17 | values = ["hvm"] 18 | } 19 | 20 | owners = ["099720109477"] # Canonical 21 | } 22 | 23 | resource "local_file" "cloud_init_template" { 24 | content = templatefile("${path.module}/templates/cloud-init.tmpl", { 25 | users = var.users_list 26 | hostname = var.server_name 27 | }) 28 | 29 | filename = "${path.module}/files/${var.cloud_init_yaml_name}" 30 | #filename = "${path.module}/files/cloud-init.yaml" 31 | } 32 | 33 | # Create a resource for the first instance, use the AMI from the data element. 34 | # The instance Type to t3.nano 35 | # Give it a name of c2_server 36 | resource "aws_instance" "server" { 37 | ami = data.aws_ami.ubuntu.id # This is the ID number of the Data element generated above 38 | instance_type = "t3.micro" # This is a small instance type 39 | 40 | subnet_id = var.main_subnet # This is a variable that is fed into the module 41 | user_data = local_file.cloud_init_template.content 42 | 43 | vpc_security_group_ids = [ 44 | var.security_group_id 45 | ] 46 | 47 | root_block_device { 48 | volume_size = 40 # This a 40GB instance size, it can be larger 49 | } 50 | 51 | tags = { 52 | Name = var.server_name 53 | } 54 | } 55 | 56 | resource "aws_eip" "server" { 57 | instance = aws_instance.server.id 58 | domain = "vpc" 59 | } 60 | -------------------------------------------------------------------------------- /2023-Labs/Lab5/answer_key/modules/servers/output.tf: -------------------------------------------------------------------------------- 1 | output "c2_public_ip" { 2 | value = aws_eip.server.public_ip 3 | } 4 | 5 | output "c2_private_ip" { 6 | value = aws_instance.server.private_ip 7 | } 8 | -------------------------------------------------------------------------------- /2023-Labs/Lab5/answer_key/modules/servers/templates/cloud-init.tmpl: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | # Adds the users to the system 3 | 4 | fqdn: ${hostname} 5 | write_files: 6 | - content: | 7 | #!/bin/bash 8 | export DEBIAN_FRONTEND=noninteractive 9 | apt-get update && \ 10 | apt-get -o Dpkg::Options::="--force-confold" upgrade -q -y --allow-remove-essential --allow-downgrades && \ 11 | apt-get -o Dpkg::Options::="--force-confold" dist-upgrade -q -y --allow-remove-essential --allow-downgrades && \ 12 | apt autoremove -y 13 | sudo reboot 14 | path: /root/update.sh 15 | permissions: '0700' 16 | 17 | - content: | 18 | 0 5 * * 1 root /root/updates.sh >/dev/null 2>&1 19 | path: /etc/cron.d/updates 20 | permissions: '0644' 21 | 22 | users: 23 | - default 24 | %{ for user_key, user_value in users ~} 25 | - name: ${user_key} 26 | lock_passwd: true 27 | shell: /bin/bash 28 | ssh_authorized_keys: 29 | - ${user_value} 30 | sudo: ALL=(ALL) NOPASSWD:ALL 31 | %{ endfor ~} 32 | 33 | # These packaages are just placeholders 34 | packages: 35 | - apache2 36 | - apt-transport-https 37 | - ca-certificates 38 | - curl 39 | - gnupg-agent 40 | - p7zip-full 41 | - screen 42 | - software-properties-common 43 | - unattended-upgrades 44 | - wget 45 | 46 | runcmd: 47 | - hostnamectl set-hostname ${hostname} 48 | - sed -i 's/127.0.0.1 localhost/127.0.0.1 localhost ${hostname}/g' /etc/hosts 49 | 50 | package_update: true 51 | package_upgrade: true 52 | package_reboot_if_required: true 53 | 54 | power_state: 55 | mode: reboot 56 | delay: 1 57 | message: Rebooting after installation 58 | -------------------------------------------------------------------------------- /2023-Labs/Lab5/answer_key/modules/servers/vars.tf: -------------------------------------------------------------------------------- 1 | variable "server_name" { 2 | description = "The Server Name" 3 | type = string 4 | default = "" 5 | } 6 | 7 | variable "main_subnet" { 8 | description = "The subnet for this server" 9 | type = string 10 | default = "" 11 | } 12 | 13 | variable "security_group_id" { 14 | description = "The security group for this server" 15 | type = string 16 | default = "" 17 | } 18 | 19 | variable user_data { 20 | type = list 21 | description = "This is the userdata for ther server" 22 | default = [""] 23 | } 24 | 25 | variable "users_list" { 26 | type = map(any) 27 | description = "A list of users." 28 | default = { "" = "" } 29 | } 30 | 31 | variable "cloud_init_yaml_name" { 32 | type = string 33 | description = "What would you like to name this yaml file?" 34 | } 35 | 36 | -------------------------------------------------------------------------------- /2023-Labs/Lab5/answer_key/output.tf: -------------------------------------------------------------------------------- 1 | output "c2_public_ip" { 2 | description = "The IP of the C2 Server" 3 | value = module.c2_server.c2_public_ip 4 | } 5 | 6 | output "c2_private_ip" { 7 | description = "The IP of the C2 Server" 8 | value = module.c2_server.c2_private_ip 9 | } 10 | 11 | ### Can we make it prettier? 12 | 13 | output "final_text" { 14 | value = </dev/null 2>&1 19 | path: /etc/cron.d/updates 20 | permissions: '0644' 21 | 22 | users: 23 | - default 24 | %{ for user_key, user_value in users ~} 25 | - name: ${user_key} 26 | lock_passwd: true 27 | shell: /bin/bash 28 | ssh_authorized_keys: 29 | - ${user_value} 30 | sudo: ALL=(ALL) NOPASSWD:ALL 31 | %{ endfor ~} 32 | 33 | # These packaages are just placeholders 34 | packages: 35 | - apache2 36 | - apt-transport-https 37 | - ca-certificates 38 | - curl 39 | - gnupg-agent 40 | - p7zip-full 41 | - screen 42 | - software-properties-common 43 | - unattended-upgrades 44 | - wget 45 | 46 | runcmd: 47 | - hostnamectl set-hostname ${hostname} 48 | - sed -i 's/127.0.0.1 localhost/127.0.0.1 localhost ${hostname}/g' /etc/hosts 49 | 50 | package_update: true 51 | package_upgrade: true 52 | package_reboot_if_required: true 53 | 54 | power_state: 55 | mode: reboot 56 | delay: 1 57 | message: Rebooting after installation 58 | -------------------------------------------------------------------------------- /2023-Labs/Lab5/answer_key/vars.tf: -------------------------------------------------------------------------------- 1 | variable "users_list" { 2 | description = "In here you need to use your own SSH private key, this is just an example." 3 | type = map(any) 4 | 5 | default = { 6 | "moses" = "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPyOdTJ3mXw4X0XRSGxlrllvUw1chX4uk1FPerUcJtEo+RSKR1OIRFoXwohk3D+7jfY+6FS6qd+QfKYWg0A7HqU= moses", 7 | } 8 | } 9 | 10 | variable "c2_hostname" { 11 | description = "This is a list that is fed to build local users in the VMs. This uses the username / ssh key pairing to fill out the cloud-init templates" 12 | type = string 13 | default = "c2server" 14 | } 15 | -------------------------------------------------------------------------------- /2023-Labs/Lab5/answer_key/vpc.tf: -------------------------------------------------------------------------------- 1 | # This is required to exposed things behind a NAT 2 | resource "aws_eip" "nat_eip" { 3 | domain = "vpc" 4 | } 5 | 6 | # Remember to use the VPC from the pervious exercise. 7 | resource "aws_vpc" "main" { 8 | cidr_block = "10.0.0.0/16" 9 | enable_dns_hostnames = true 10 | enable_dns_support = true 11 | } 12 | 13 | # Remember to use the Subnet from the pervious exercise. 14 | resource "aws_subnet" "main" { 15 | vpc_id = aws_vpc.main.id 16 | cidr_block = "10.0.1.0/24" 17 | availability_zone = data.aws_availability_zones.available.names[0] 18 | } 19 | 20 | # Create an Internet Gateway, and attach it to the VPC. 21 | resource "aws_internet_gateway" "gw" { 22 | vpc_id = aws_vpc.main.id 23 | } 24 | 25 | # Any resources that are not needing to be on the internet but do need to access the internet need the system below. 26 | resource "aws_nat_gateway" "nat" { 27 | allocation_id = aws_eip.nat_eip.id 28 | subnet_id = aws_subnet.main.id 29 | } 30 | 31 | # This is the routing table for the public subnet, this builds the objects and the routes are added to this table. 32 | resource "aws_route_table" "public" { 33 | vpc_id = aws_vpc.main.id 34 | } 35 | 36 | # This is the route table for the public subnet, only a generic 0/0 route is needed. 37 | resource "aws_route" "public" { 38 | route_table_id = aws_route_table.public.id 39 | destination_cidr_block = "0.0.0.0/0" 40 | gateway_id = aws_internet_gateway.gw.id 41 | } 42 | 43 | #This associates the route table to the subnet 44 | resource "aws_route_table_association" "public" { 45 | subnet_id = aws_subnet.main.id 46 | route_table_id = aws_route_table.public.id 47 | } -------------------------------------------------------------------------------- /2023-Labs/Lab5/main.tf: -------------------------------------------------------------------------------- 1 | # Use a Data element to find the correct AMI for Ubuntu 20.04. This will be used in the resource. 2 | # Use the Ubuntu 20.04 AMD64 Server AMI from the Canonical Account.check "name". 3 | data "aws_ami" "ubuntu" { } 4 | 5 | # Create a resource for the first instance, use the AMI from the data element. 6 | # The instance Type to t3.nano 7 | # Give it a name of c2_server 8 | # Don't forget to include user_data here which is going to be a data element. 9 | resource "aws_instance" "c2" { } 10 | 11 | # Provide an EIP for the C2 Server 12 | resource "aws_eip" "c2" { } 13 | 14 | # Create a file from a template to be used for userdata, specifically cloud-init 15 | # This is the Template File that can be suitable for building c2. 16 | # Note there are two variables listed here: 17 | # 18 | # hostname 19 | # users 20 | # 21 | # Hostname is going to come from c2_hostname 22 | # Users is going to be a mapped list from users 23 | # 24 | 25 | resource "local_file" "cloud_init_c2_template" { } 26 | 27 | # This will leverage teh dat afile above to create the cloud-init yaml file. 28 | data "local_file" "cloud_init_c2_yaml" { } 29 | -------------------------------------------------------------------------------- /2023-Labs/Lab5/output.tf: -------------------------------------------------------------------------------- 1 | # Create an output to output the following: 2 | # The ec2 instance Public IP (if it exists) 3 | # The ec2 instance Private IP (if it exists) 4 | 5 | -------------------------------------------------------------------------------- /2023-Labs/Lab5/provider.tf: -------------------------------------------------------------------------------- 1 | # First, require the latest providers for terraform, specifically we are going use the AWS Provider, and a version of 5.0 or greater. 2 | terraform { 3 | required_providers { 4 | } 5 | } 6 | 7 | # Configure this provider to the region that you are assigned. By default we are going ot use us-east-1. 8 | provider "aws" { 9 | } -------------------------------------------------------------------------------- /2023-Labs/Lab5/sg.tf: -------------------------------------------------------------------------------- 1 | # Create an AWS Security Group make sure the security group allows ingress port 22 and egress all ports. 2 | 3 | resource "aws_security_group" "default" { } -------------------------------------------------------------------------------- /2023-Labs/Lab5/templates/cloud-init-c2.tmpl: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | # Adds the users to the system 3 | 4 | fqdn: ${hostname} 5 | write_files: 6 | - content: | 7 | #!/bin/bash 8 | export DEBIAN_FRONTEND=noninteractive 9 | apt-get update && \ 10 | apt-get -o Dpkg::Options::="--force-confold" upgrade -q -y --allow-remove-essential --allow-downgrades && \ 11 | apt-get -o Dpkg::Options::="--force-confold" dist-upgrade -q -y --allow-remove-essential --allow-downgrades && \ 12 | apt autoremove -y 13 | sudo reboot 14 | path: /root/update.sh 15 | permissions: '0700' 16 | 17 | - content: | 18 | 0 5 * * 1 root /root/updates.sh >/dev/null 2>&1 19 | path: /etc/cron.d/updates 20 | permissions: '0644' 21 | 22 | users: 23 | - default 24 | %{ for user_key, user_value in users ~} 25 | - name: ${user_key} 26 | lock_passwd: true 27 | shell: /bin/bash 28 | ssh_authorized_keys: 29 | - ${user_value} 30 | sudo: ALL=(ALL) NOPASSWD:ALL 31 | %{ endfor ~} 32 | 33 | # These packaages are just placeholders 34 | packages: 35 | - apache2 36 | - apt-transport-https 37 | - ca-certificates 38 | - curl 39 | - gnupg-agent 40 | - p7zip-full 41 | - screen 42 | - software-properties-common 43 | - unattended-upgrades 44 | - wget 45 | 46 | runcmd: 47 | - hostnamectl set-hostname ${hostname} 48 | - sed -i 's/127.0.0.1 localhost/127.0.0.1 localhost ${hostname}/g' /etc/hosts 49 | 50 | package_update: true 51 | package_upgrade: true 52 | package_reboot_if_required: true 53 | 54 | power_state: 55 | mode: reboot 56 | delay: 1 57 | message: Rebooting after installation 58 | -------------------------------------------------------------------------------- /2023-Labs/Lab5/vars.tf: -------------------------------------------------------------------------------- 1 | variable "users_list" { } 2 | 3 | variable "c2_hostname" { } 4 | -------------------------------------------------------------------------------- /2023-Labs/Lab5/vpc.tf: -------------------------------------------------------------------------------- 1 | # Remember to use the VPC from the pervious exercise. 2 | resource "aws_vpc" "main" { 3 | } 4 | 5 | # Remember to use the Subnet from the pervious exercise. 6 | resource "aws_subnet" "main" { 7 | } -------------------------------------------------------------------------------- /2023-Labs/TCS Slides/Texas CyberSummit - 2023 Building Infra.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Neuvik/neuvik-terraform-workshop/3050b0a31b4bad37c6c08d6686edaed9e7e73923/2023-Labs/TCS Slides/Texas CyberSummit - 2023 Building Infra.pdf -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Neuvik 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /RTV_2024/.gitignore: -------------------------------------------------------------------------------- 1 | # Local .terraform directories 2 | **/.terraform/* 3 | 4 | # .tfstate files 5 | *.tfstate 6 | *.tfstate.* 7 | 8 | # Crash log files 9 | crash.log 10 | crash.*.log 11 | 12 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as 13 | # password, private keys, and other secrets. These should not be part of version 14 | # control as they are data points which are potentially sensitive and subject 15 | # to change depending on the environment. 16 | *.tfvars 17 | *.tfvars.json 18 | 19 | # Ignore override files as they are usually used to override resources locally and so 20 | # are not checked in 21 | override.tf 22 | override.tf.json 23 | *_override.tf 24 | *_override.tf.json 25 | 26 | # Include override files you do wish to add to version control using negated pattern 27 | # !example_override.tf 28 | 29 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 30 | # example: *tfplan* 31 | 32 | # Ignore CLI configuration files 33 | .terraformrc 34 | terraform.rc 35 | 36 | Workshop 37 | 38 | .infracost 39 | .infracost/* -------------------------------------------------------------------------------- /RTV_2024/LAB1-Readme.md: -------------------------------------------------------------------------------- 1 | # Welcome to Lab 1 2 | 3 | To get started have one of the items available to you: 4 | 5 | 1. Lucky enough to be one of the first few that we can provide an account. 6 | 2. Have an existing AWS Account. 7 | 8 | Let's also discuss what you need to get going. 9 | 10 | 1. [Terraform](https://developer.hashicorp.com/terraform/install) version 1.9.0 or higher 11 | 2. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) version 2 12 | 3. AWS Access Keys on your local system OR use AWS CloudShell. 13 | 14 | ## AWS CloudShell 15 | 16 | For those that have AWS CloudShell (which we do recommend), you can use the following installation script. 17 | 18 | **Step 1.** Log into AWS. 19 | 20 | **Step 2.** Open CloudShell 21 | 22 | **Step 3.** Type the following into the CloudShell: 23 | 24 | ``` 25 | git clone https://github.com/neuvik/neuvik-terraform-workshop 26 | ``` 27 | 28 | ``` 29 | cd neuvik-terraform-workshop/RTV_2024/Lab1 30 | ``` 31 | 32 | ``` 33 | bash quickstart.sh 34 | ``` 35 | 36 | **Step 4.** You should now have the Terraform Binary and the required items to work. Unfortunately if you are going to use CloudShell you will need to use one of the console text editors to do your work. This would include: `nano`, `vi`, `emacs`. Since my editor of choice is `emacs` I'll default to `nano` to save you from the pain. 37 | 38 | **Step 5.** Let's also make sure you have the appropriate aws credentials, please type this command: 39 | 40 | ``` 41 | aws sts get-caller-identity 42 | ``` 43 | 44 | You should now have something similar to the following: 45 | 46 | ``` 47 | { 48 | "UserId": "AROA[REDACTED]F:mo[REDACTED]m", 49 | "Account": "02[REDACTED]73", 50 | "Arn": "arn:aws:sts::022[REDACTED]73:assumed-role/AW[REDACTED]bc1/mo[REDACTED]m" 51 | } 52 | ``` 53 | 54 | If these steps are done we can continue. 55 | 56 | ## Our First Terraform Run! 57 | 58 | **Step 6.** The first thing we need to do is download terraform modules, we will also want to validate the items in our current Lab1. We can do this using the `terraform validate` command. 59 | 60 | ``` 61 | terraform validate 62 | ``` 63 | 64 | We get no errors, everything appears great! Our terraform validates. Now let's open the vpc.tf file. 65 | 66 | ``` 67 | nano vpc.tf 68 | ``` 69 | 70 | **Step 7.** Let's add the following logic into the VPC file which will create a VPC (Virtual Private Cloud). This will contain the network and the components required to have virtual machines operate 71 | 72 | ``` 73 | resource "aws_vpc" "main" { 74 | cidr_block = "10.0.0.0/16" 75 | } 76 | ``` 77 | 78 | **Step 8.** What we are doing here is creating a VPC Resource, the name of the Resource (which is only relevant to terraform) is called "main". The only item that we are setting is: 79 | 80 | `cidr_block = "10.0.0.0/16"` 81 | 82 | **Step 9.** Let's run `terraform validate` to ensure things will operate correctly. 83 | 84 | ``` 85 | terraform validate 86 | ``` 87 | 88 | As you can see we are not operating correctly. 89 | 90 | 91 | ``` 92 | │ Error: Reference to undeclared input variable 93 | │ 94 | │ on provider.tf line 13, in provider "aws": 95 | │ 13: region = var.region 96 | │ 97 | │ An input variable with the name "region" has not been declared. This variable can be declared with a variable "region" {} block. 98 | ╵ 99 | ``` 100 | 101 | The issue here is that in our original provider.tf file the "region" is not filled in. 102 | 103 | **Step 10.** Let's go ahead and create a region variable to help us correct things. 104 | 105 | ``` 106 | nano vars.tf 107 | ``` 108 | 109 | If you have your own AWS account you are free to choose the appropriate region for yourself. If you are given an account then look at the region assigned to you. In the example below it's us-east-1 but this is just an example, it's not real. 110 | 111 | ``` 112 | variable "region" { 113 | type = string 114 | description = "Please what Datacenter you wish to use, such as us-east-1" 115 | default = "us-test-1" 116 | } 117 | ``` 118 | 119 | **Step 11.** Once you save the file, we can run `terraform validate` and it should work. 120 | 121 | ``` 122 | terraform validate 123 | ``` 124 | 125 | It should give you this message. 126 | 127 | ``` 128 | Success! The configuration is valid. 129 | ``` 130 | 131 | **Step 12.** Let's now run the following command: 132 | 133 | ``` 134 | terraform plan -out run.plan 135 | ``` 136 | 137 | The output displayed below should also be of note, look them over first. 138 | 139 | 140 | ``` 141 | Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following 142 | symbols: 143 | + create 144 | 145 | Terraform will perform the following actions: 146 | 147 | # aws_vpc.main will be created 148 | + resource "aws_vpc" "main" { 149 | + arn = (known after apply) 150 | + cidr_block = "10.0.0.0/16" 151 | + default_network_acl_id = (known after apply) 152 | + default_route_table_id = (known after apply) 153 | + default_security_group_id = (known after apply) 154 | + dhcp_options_id = (known after apply) 155 | + enable_dns_hostnames = (known after apply) 156 | + enable_dns_support = true 157 | + enable_network_address_usage_metrics = (known after apply) 158 | + id = (known after apply) 159 | + instance_tenancy = "default" 160 | + ipv6_association_id = (known after apply) 161 | + ipv6_cidr_block = (known after apply) 162 | + ipv6_cidr_block_network_border_group = (known after apply) 163 | + main_route_table_id = (known after apply) 164 | + owner_id = (known after apply) 165 | + tags_all = (known after apply) 166 | } 167 | 168 | Plan: 1 to add, 0 to change, 0 to destroy. 169 | 170 | ──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── 171 | 172 | Saved the plan to: /tmp/run.plan 173 | 174 | To perform exactly these actions, run the following command to apply: 175 | terraform apply "run.plan" 176 | ``` 177 | 178 | A few things. To note. 179 | 180 | 1. These items can all be manipulated, for example, dhcp_options_id can be set to a DHCP Options ID Number. This can be pulled in statically or as a data object. 181 | 2. Notice that you have a Plan output below, 1 item to add, 0 to change 182 | 3. Finally you can run execute this plan by running: `terraform apply "run.plan"` 183 | 184 | **Step 13.** Next, you can apply the terraform: 185 | 186 | ``` 187 | terraform apply "run.plan" 188 | ``` 189 | 190 | Hopefully you see the following output below 191 | 192 | ``` 193 | aws_vpc.main: Creating... 194 | aws_vpc.main: Creation complete after 1s [id=vpc-0f2f9cc1c8ebd7347] 195 | 196 | Apply complete! Resources: 1 added, 0 changed, 0 destroyed. 197 | ``` 198 | 199 | We now have a created resource. We can see this resource in AWS. There are a few ways that we can view it. 200 | 201 | 1. We can use `terraform show` 202 | 2. We can use `terraform output` 203 | 204 | The problem with using terraform output is we have specified no outputs. Let's skip that idea for now. What does `terraform show` do? It allows us to inspect every resource in it's full scope that is in the state file. 205 | 206 | Run the following command to see it: 207 | 208 | ``` 209 | terraform show 210 | ``` 211 | 212 | The output should be similar to: 213 | 214 | ``` 215 | # aws_vpc.main: 216 | resource "aws_vpc" "main" { 217 | arn = "arn:aws:ec2:us-east-1:[redacted]]:vpc/vpc-0d86541f771f258c1" 218 | assign_generated_ipv6_cidr_block = false 219 | cidr_block = "10.0.0.0/16" 220 | default_network_acl_id = "acl-00ec4fc28abfcb1d6" 221 | default_route_table_id = "rtb-0f3426a904b3fa90a" 222 | default_security_group_id = "sg-0b36c67520c216a8d" 223 | dhcp_options_id = "dopt-bad978c0" 224 | enable_dns_hostnames = false 225 | enable_dns_support = true 226 | enable_network_address_usage_metrics = false 227 | id = "vpc-0d86541f771f258c1" 228 | instance_tenancy = "default" 229 | ipv6_association_id = null 230 | ipv6_cidr_block = null 231 | ipv6_cidr_block_network_border_group = null 232 | ipv6_ipam_pool_id = null 233 | ipv6_netmask_length = 0 234 | main_route_table_id = "rtb-0f3426a904b3fa90a" 235 | owner_id = "170441420683" 236 | tags_all = {} 237 | } 238 | ``` 239 | 240 | **Step 14.** We can use each one of these items in other resources. Looking at these values at the top: 241 | 242 | ``` 243 | # aws_vpc.main: 244 | resource "aws_vpc" "main" { 245 | ``` 246 | 247 | If we want to insert this value `"vpc-0d86541f771f258c1"` into a *different* resource let's talk about a few things. First, we want to look for the value in the output: 248 | 249 | ``` 250 | id = "vpc-0d86541f771f258c1" 251 | ``` 252 | 253 | To leverage these values we could call it by calling `aws_vpc.main.id`, which would replace the the string and insert the vpc- value in the output. We will be using this in a future part of this lab. Let's continue. 254 | 255 | **Step 15.** Let's now run `terraform destroy`. 256 | 257 | ``` 258 | terraform destroy 259 | aws_vpc.main: Refreshing state... [id=vpc-0f2f9cc1c8ebd7347] 260 | 261 | Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: 262 | - destroy 263 | 264 | Terraform will perform the following actions: 265 | 266 | # aws_vpc.main will be destroyed 267 | - resource "aws_vpc" "main" { 268 | - arn = "arn:aws:ec2:us-east-1:[redacted]:vpc/vpc-0f2f9cc1c8ebd7347" -> null 269 | - assign_generated_ipv6_cidr_block = false -> null 270 | - cidr_block = "10.0.0.0/16" -> null 271 | - default_network_acl_id = "acl-0b1fe86cf986fdbd5" -> null 272 | - default_route_table_id = "rtb-05d0d82807d58bc88" -> null 273 | - default_security_group_id = "sg-0ed60e9c328176dd8" -> null 274 | - dhcp_options_id = "dopt-05b58dc3d2a6e6e80" -> null 275 | - enable_dns_hostnames = false -> null 276 | - enable_dns_support = true -> null 277 | - enable_network_address_usage_metrics = false -> null 278 | - id = "vpc-0f2f9cc1c8ebd7347" -> null 279 | - instance_tenancy = "default" -> null 280 | - ipv6_netmask_length = 0 -> null 281 | - main_route_table_id = "rtb-05d0d82807d58bc88" -> null 282 | - owner_id = "022499029073" -> null 283 | - tags = {} -> null 284 | - tags_all = {} -> null 285 | # (4 unchanged attributes hidden) 286 | } 287 | 288 | Plan: 0 to add, 0 to change, 1 to destroy. 289 | 290 | Do you really want to destroy all resources? 291 | Terraform will destroy all your managed infrastructure, as shown above. 292 | There is no undo. Only 'yes' will be accepted to confirm. 293 | 294 | Enter a value: yes 295 | ``` 296 | 297 | Please make sure you type yes in the string to destroy all items. 298 | 299 | ``` 300 | aws_vpc.main: Destroying... [id=vpc-0f2f9cc1c8ebd7347] 301 | aws_vpc.main: Destruction complete after 0s 302 | 303 | Destroy complete! Resources: 1 destroyed. 304 | ``` 305 | 306 | ## We are now done with Lab 1 please proceed to the Lab 2 directory -------------------------------------------------------------------------------- /RTV_2024/LAB2-Readme.md: -------------------------------------------------------------------------------- 1 | # Welcome to Lab 2 2 | 3 | Please make sure you have read through Lab 1 to get started. We are now going to build out a few things in this lab. This would be a VPC, an EC2 Server, and finally a set of outputs. The goal of this lab will be to build a Bastion Host that accepts SSH Inbound. That will just show how to build a single machine in automation. 4 | 5 | ## Getting started with VPCs 6 | 7 | **Step 1.** Let's next move into building a few systems, to do this we will need at a minimum a VPC. 8 | 9 | 10 | ``` 11 | cd ../Lab2 12 | ``` 13 | 14 | ``` 15 | nano vpc.tf 16 | ``` 17 | 18 | Let's paste in some code items, we are going to describe these while we insert them. 19 | 20 | ``` 21 | # Pulls in the data of availability zones in the given datacenter. 22 | data "aws_availability_zones" "available" { 23 | } 24 | 25 | # Remember to use the Subnet from the previous exercise. 26 | resource "aws_subnet" "main" { 27 | vpc_id = aws_vpc.main.id 28 | cidr_block = "10.0.1.0/24" 29 | availability_zone = data.aws_availability_zones.available.names[0] 30 | map_public_ip_on_launch = true 31 | } 32 | ``` 33 | 34 | There are a few things in here that we need to discuss. The first one is this line: 35 | 36 | ``` 37 | vpc_id = aws_vpc.main.id 38 | ``` 39 | 40 | This `vpc_id` is `aws_vpc.main.id`. This is what we discussed previously. This will insert the VPC ID from the previous resource. 41 | 42 | The next line is: 43 | 44 | ``` 45 | availability_zone = data.aws_availability_zones.available.names[0] 46 | ``` 47 | 48 | This is a new learning module for you, the first part of this is the `data` element which inserts an already created item. The data element in this case comes from: 49 | 50 | Now let's continue pasting items in. 51 | 52 | ``` 53 | # Create an Internet Gateway, and attach it to the VPC. 54 | resource "aws_internet_gateway" "gw" { 55 | vpc_id = aws_vpc.main.id 56 | } 57 | ``` 58 | 59 | The next thing we are adding is the Internet Gateway which is used to communicate between our VPC and the internet. 60 | 61 | ``` 62 | # This is the routing table for the public subnet, this builds the objects and the routes are added to this table. 63 | resource "aws_route_table" "public" { 64 | vpc_id = aws_vpc.main.id 65 | } 66 | 67 | 68 | # This is the route table for the public subnet, only a generic 0/0 route is needed. 69 | resource "aws_route" "public" { 70 | route_table_id = aws_route_table.public.id 71 | destination_cidr_block = "0.0.0.0/0" 72 | gateway_id = aws_internet_gateway.gw.id 73 | } 74 | 75 | #This associates the route table to the subnet 76 | resource "aws_route_table_association" "public" { 77 | subnet_id = aws_subnet.main.id 78 | route_table_id = aws_route_table.public.id 79 | } 80 | ``` 81 | 82 | Now we have a new set of items we need to add. These are the routing table elements to route to the internet. As we are adding all of these in. Do you see how we are dynamically associating items using terraforms' language? 83 | 84 | Notice for example here: 85 | 86 | ``` 87 | subnet_id = aws_subnet.main.id 88 | route_table_id = aws_route_table.public.id 89 | ``` 90 | 91 | What this does is that this will associate a subnet that we created to the routing table we created. We do not need to worry about statically defining these values, nor do we want to. 92 | 93 | **Step 2.** Next, let's plan and apply these changes, let's also use some new tools. 94 | 95 | ``` 96 | tflint 97 | ``` 98 | 99 | This is the terraform linter. The linter is going to throw out a warning: 100 | 101 | ``` 102 | 1 issue(s) found: 103 | 104 | Warning: terraform "required_version" attribute is required (terraform_required_version) 105 | 106 | on provider.tf line 2: 107 | 2: terraform { 108 | 109 | Reference: https://github.com/terraform-linters/tflint-ruleset-terraform/blob/v0.8.0/docs/rules/terraform_required_version.md 110 | ``` 111 | 112 | This tells us that we have a rule violation. Let's fix that by opening up `provider.tf`. Make the file look like the following: 113 | 114 | ``` 115 | #First, require the latest providers for terraform, specifically we are going use the AWS Provider, and a version of 5.0 or greater. 116 | terraform { 117 | required_providers { 118 | aws = { 119 | source = "hashicorp/aws" 120 | version = "~> 5.0" 121 | } 122 | } 123 | required_version = "~> 1.9.0" 124 | } 125 | 126 | # Configure this provider to the region that you are assigned. By default we are going ot use us-east-1. 127 | provider "aws" { 128 | region = var.region 129 | } 130 | ``` 131 | 132 | This now corrected the linter if you run `tflint` again it will not return an error. 133 | 134 | ``` 135 | terraform fmt 136 | ``` 137 | 138 | The terraform fmt command will make each terraform file come out in a pretty well-defined format. 139 | 140 | ``` 141 | terraform apply 142 | ``` 143 | 144 | Terraform apply will then run and provide us with a question, make sure you answer `yes` to run the entire job. 145 | 146 | ## Setting up EC2 147 | 148 | **Step 3.** We are now going to be putting an EC2 server in the environment, to do this we are going to need to first get an AMI in each datacenter. We will use an Ubuntu image for now to simplify things. Create the following file: 149 | 150 | ``` 151 | nano data.tf 152 | ``` 153 | 154 | Insert the following lines: 155 | 156 | ``` 157 | data "aws_ami" "ubuntu" { 158 | most_recent = true 159 | 160 | filter { 161 | name = "name" 162 | values = ["ubuntu/images/hvm-ssd/ubuntu-jammy-22.04-amd64-server-*"] 163 | } 164 | 165 | filter { 166 | name = "virtualization-type" 167 | values = ["hvm"] 168 | } 169 | 170 | owners = ["099720109477"] # Canonical 171 | } 172 | ``` 173 | 174 | This will select the AWS AMI for the 22.04 release of Ubuntu. 175 | 176 | **Step 4.** Next you may be tempted to use a premade SSH Key, however we can use cloud-init scripts to build our machine. Let's create a templatefile that we will use to build our templates. 177 | 178 | The first thing we want to do is setup an ssh-key 179 | 180 | ``` 181 | ssh-keygen 182 | ``` 183 | 184 | Create a Key in the default directory, if you wish to use a password that is up to you as this is a lab. Passwords are always recommended to protect your private key. 185 | 186 | **Step 5.** We are now going to create a template for our first build. As this will be a simple bastion first, let's see how we can build it. 187 | 188 | ``` 189 | nano ec2.tmpl 190 | ``` 191 | 192 | ``` 193 | #cloud-config 194 | 195 | package_update: true 196 | package_upgrade: true 197 | package_reboot_if_required: true 198 | 199 | fqdn: ${hostname} 200 | 201 | users: 202 | %{ for user_key, user_value in users ~} 203 | - name: ${user_key} 204 | lock_passwd: true 205 | shell: /bin/bash 206 | ssh_authorized_keys: 207 | - ${user_value} 208 | sudo: ALL=(ALL) NOPASSWD:ALL 209 | %{ endfor ~} 210 | 211 | packages: 212 | - apt-transport-https 213 | - build-essential 214 | - ca-certificates 215 | - certbot 216 | - curl 217 | - gnupg 218 | - gnupg-agent 219 | - make 220 | - software-properties-common 221 | - sudo 222 | 223 | power_state: 224 | mode: reboot 225 | delay: 1 226 | message: Rebooting after installation 227 | ``` 228 | 229 | The above is a yaml file and these YAML files are SPACE sensitive. As such we have included a copy of it in the Lab2 folder. 230 | 231 | **Step 6.** To use this, you will need to modify your vars.tf file to add a new map based attribute that will parse names and SSH Keys. 232 | 233 | 234 | ``` 235 | nano vars.tf 236 | ``` 237 | 238 | At the end of the file add the following: 239 | 240 | ``` 241 | variable "operators" { 242 | description = "This is a list that is fed to build local users in the VMs. This uses the username / ssh key pairing to fill out the cloud-init templates" 243 | type = map(any) 244 | default = { 245 | "operator" = "THIS IS YOUR SSH KEY" 246 | } 247 | } 248 | ``` 249 | 250 | Notice that in this string: 251 | 252 | ``` 253 | "operator" = "THIS IS YOUR SSH KEY" 254 | ``` 255 | 256 | Your username will be "operator" and the string that says "THIS IS YOUR SSH KEY" should be filled int with your public key. For example: 257 | 258 | ``` 259 | cat ~/.ssh/id_rsa.pub 260 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCvbXx45/1SwFukLruG5Y6vsrWOaFtRTxUakd1HFxas4IGXXJCwoLtJqPPqrvAgPtSFG6Ad4NOdIiGaBV8fypLR+ECZqdmgr1/sp7iFGovVfV1S8eHNlHr6q/Aewo1uYNwJ2ERAqYn57U01C/G5hfyGVTMSaZZ0gQOFo/HYbA/1Yo8sFRNRctw2uArlf3P8v6RZ7Rf7oOK3MGOVwdbcMQna88r9ljM4tA0dAoXu8+wqGWfXkBkTeIOipz+vK5u/NwzJrb8bg6BjYZv41Ws6fXI1eVyxcwJAFrUv2xdMHHHwoGbNNKk9348hjF7aE/u491WCDDpudGUZkxng0JRwpVBL+A5Wb6r+ngb2v3PpjTjs/sg2HIIUB2c6i0iO44LgoavYgdXl4p5F7WQS69hZtmIYj2Q+UV0FLNSMNXh7GiG6pJAF9lcPArg7LbjQWSH1958CGJj3lCMsAjKFF5YqGH9AXAlu0z8L6KIwWmsI6VjWGTyCR2AeYKZ9miGiTnZv/hk= cloudshell-user@ip-10-130-82-117.ec2.internal 261 | ``` 262 | 263 | Then my operator still will look like: 264 | 265 | ``` 266 | "operator" = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCvbXx45/1SwFukLruG5Y6vsrWOaFtRTxUakd1HFxas4IGXXJCwoLtJqPPqrvAgPtSFG6Ad4NOdIiGaBV8fypLR+ECZqdmgr1/sp7iFGovVfV1S8eHNlHr6q/Aewo1uYNwJ2ERAqYn57U01C/G5hfyGVTMSaZZ0gQOFo/HYbA/1Yo8sFRNRctw2uArlf3P8v6RZ7Rf7oOK3MGOVwdbcMQna88r9ljM4tA0dAoXu8+wqGWfXkBkTeIOipz+vK5u/NwzJrb8bg6BjYZv41Ws6fXI1eVyxcwJAFrUv2xdMHHHwoGbNNKk9348hjF7aE/u491WCDDpudGUZkxng0JRwpVBL+A5Wb6r+ngb2v3PpjTjs/sg2HIIUB2c6i0iO44LgoavYgdXl4p5F7WQS69hZtmIYj2Q+UV0FLNSMNXh7GiG6pJAF9lcPArg7LbjQWSH1958CGJj3lCMsAjKFF5YqGH9AXAlu0z8L6KIwWmsI6VjWGTyCR2AeYKZ9miGiTnZv/hk= cloudshell-user@ip-10-130-82-117.ec2.internal" 267 | ``` 268 | 269 | Save the vars.tf file. 270 | 271 | **Step 7.** We also need to make sure we have a security group that lets us in. 272 | 273 | ``` 274 | nano sg.tf 275 | ``` 276 | 277 | Insert the following: 278 | 279 | ``` 280 | # Security Groups 281 | resource "aws_security_group" "main_sg" { 282 | name = "Private East Servers" 283 | description = "Private East Servers" 284 | vpc_id = aws_vpc.main.id 285 | 286 | ingress { 287 | from_port = 22 288 | to_port = 22 289 | protocol = "tcp" 290 | cidr_blocks = ["0.0.0.0/0"] 291 | } 292 | 293 | ingress { 294 | from_port = 443 295 | to_port = 443 296 | protocol = "tcp" 297 | cidr_blocks = ["0.0.0.0/0"] 298 | } 299 | 300 | egress { 301 | from_port = 0 302 | to_port = 0 303 | protocol = "-1" 304 | cidr_blocks = ["0.0.0.0/0"] 305 | } 306 | 307 | tags = { 308 | Name = "Public Allow" 309 | } 310 | } 311 | ``` 312 | 313 | **Step 8.** Now that all of this is done, we can move on to building our Machine. This would be a way of doing it. 314 | 315 | ``` 316 | nano ec2.tf 317 | ``` 318 | 319 | Insert the following 320 | 321 | ``` 322 | # Ubuntu Default 323 | resource "local_file" "cloud_init_ubuntu" { 324 | content = templatefile("${path.module}/ec2.tmpl", { 325 | hostname = "bastion", 326 | users = var.operators 327 | }) 328 | filename = "./ec2.yaml" 329 | } 330 | 331 | data "local_file" "cloud_init_ubuntu" { 332 | filename = local_file.cloud_init_ubuntu.filename 333 | depends_on = [ 334 | local_file.cloud_init_ubuntu 335 | ] 336 | } 337 | 338 | resource "aws_instance" "bastion" { 339 | ami = data.aws_ami.ubuntu.id 340 | instance_type = "t3.small" 341 | subnet_id = aws_subnet.main.id # This is the aws subnet ID for the public subnet, meant to be a DMZ this is how a "Public IP" is attached to an instance. 342 | source_dest_check = false 343 | 344 | vpc_security_group_ids = [ 345 | aws_security_group.main_sg.id # Default security group 346 | ] 347 | 348 | user_data = data.local_file.cloud_init_ubuntu.content 349 | 350 | metadata_options { 351 | http_endpoint = "enabled" 352 | http_put_response_hop_limit = "1" 353 | http_tokens = "required" 354 | } 355 | 356 | tags = { 357 | Name = "Bastion Host" 358 | } 359 | 360 | lifecycle { 361 | #ignore_changes = [ 362 | # ami, # Do not remove this, any changes to the Ubuntu AMI will cause this repo to redeploy the machine 363 | # user_data, # Do not remove this, any changes to the User Data will cause this machine to be rebuilt! 364 | #] 365 | #prevent_destroy = true 366 | } 367 | } 368 | ``` 369 | 370 | **Step 9.** Now we need to run a few commands to clean things up and initialize the template module that we have just added. 371 | 372 | ``` 373 | terraform init 374 | ``` 375 | 376 | ``` 377 | terraform fmt 378 | ``` 379 | 380 | ``` 381 | terraform validate 382 | ``` 383 | 384 | ``` 385 | terraform apply 386 | ``` 387 | 388 | **Step 10.** Did you notice that you really don't have any connectivity information? Let's create an output file to help us out. Outputs can be whatever we want. 389 | 390 | ``` 391 | nano output.tf 392 | ``` 393 | 394 | Insert the following text: 395 | 396 | ``` 397 | output "final_text" { 398 | value = <