├── .gitignore
├── ImportCommands.txt
├── JuniorAdminIssue.ps1
├── README.md
├── backend.tf
├── image
└── terraform-import.webp
├── junior_admin.sh
├── m3_commands.txt
├── outputs.tf
├── part01-import-consul
├── config
│ └── m4_commands.txt
├── consul.tfplan
├── data
│ ├── checkpoint-signature
│ ├── node-id
│ ├── raft
│ │ ├── peers.info
│ │ └── raft.db
│ └── serf
│ │ ├── local.snapshot
│ │ └── remote.snapshot
├── m4_commands.txt
└── main.tf
├── resources.tf
├── terraform.tfvars
├── terraform.tfvars.example
└── variables.tf
/.gitignore:
--------------------------------------------------------------------------------
1 | .terraform
2 | *.tfstate
3 | *.tfstate.backup
4 | m3.tfplan
5 | *.hcl
--------------------------------------------------------------------------------
/ImportCommands.txt:
--------------------------------------------------------------------------------
1 | #Use the values output by the JuniorAdminIssue.ps1 or junior_admin.sh script
2 |
3 | terraform import --var-file="terraform.tfvars" "module.vpc.aws_route_table.private[2]" rtb-07465504d32ffd748
4 | terraform import --var-file="terraform.tfvars" "module.vpc.aws_route_table_association.private[2]" subnet-01b7a7db16e4b9c22/rtb-07465504d32ffd748
5 | terraform import --var-file="terraform.tfvars" "module.vpc.aws_subnet.private[2]" subnet-01b7a7db16e4b9c22
6 | terraform import --var-file="terraform.tfvars" "module.vpc.aws_route_table_association.public[2]" subnet-0ec4ac3f16023547b/rtb-05e943ee5ee7bbc4d
7 | terraform import --var-file="terraform.tfvars" "module.vpc.aws_subnet.public[2]" subnet-0ec4ac3f16023547b
8 |
--------------------------------------------------------------------------------
/JuniorAdminIssue.ps1:
--------------------------------------------------------------------------------
1 | # Import the AWS module
2 | Import-Module AWSPowerShell.NetCore
3 |
4 | #Select the AWS profile deep-dive
5 | Set-AWSCredential -ProfileName "deep-dive"
6 |
7 | #Set the default region as applicable
8 | $region = "us-east-1"
9 | Set-DefaultAWSRegion -Region $region
10 |
11 | #Get the VPC and AZs
12 | #This assumes you used globo-primary for the name of the VPC
13 | $vpc = Get-EC2Vpc -Filter @{Name="tag:Name"; Values="globo-primary"}
14 | $azs = Get-EC2AvailabilityZone
15 | $az = ($azs | Sort-Object -Property ZoneName)[2]
16 |
17 | #Create two new subnets in the third AZ
18 | $privateSubnet = New-EC2Subnet -AvailabilityZone $az.ZoneName `
19 | -CidrBlock "10.0.12.0/24" -VpcId $vpc.VpcId
20 | $publicSubnet = New-EC2Subnet -AvailabilityZone $az.ZoneName `
21 | -CidrBlock "10.0.2.0/24" -VpcId $vpc.VpcId
22 |
23 | #Get the Public route table for all public subnets and associate the new public subnet
24 | $publicRouteTable = Get-EC2RouteTable `
25 | -Filter @{ Name="tag:Name"; values="globo-primary-public"} -Region $region
26 | $publicRouteTableAssociation = Register-EC2RouteTable `
27 | -RouteTableId $publicRouteTable.RouteTableId -SubnetId $publicSubnet.SubnetId
28 |
29 | #Create a route table for the new private subnet and send traffic through the NAT Gateway
30 | $privateRouteTable = New-EC2RouteTable -VpcId $vpc.VpcId
31 | Register-EC2RouteTable -RouteTableId $privateRouteTable.RouteTableId `
32 | -SubnetId $privateSubnet.SubnetId
33 |
34 | Write-Output "Oh Jimmy, what did you do?"
35 |
36 | $JimmysResources = @{}
37 | $JimmysResources.Add("privateSubnet",$privateSubnet.SubnetId)
38 | $JimmysResources.Add("publicSubnet",$publicSubnet.SubnetId)
39 | $JimmysResources.Add("privateRouteTable",$privateRouteTable.RouteTableId)
40 | $JimmysResources.Add("privateRouteTableAssoc","$($privateSubnet.SubnetId)/$($privateRouteTable.RouteTableId)")
41 | $JimmysResources.Add("publicRouteTableAssoc","$($publicSubnet.SubnetId)/$($publicRouteTable.RouteTableId)")
42 |
43 |
44 | Write-Output ($JimmysResources.GetEnumerator() | sort -Property Name)
45 |
46 |
47 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Terraform Import Tutorial
2 |
3 |
4 |
5 |
6 |
7 | **This Repo contains many sample codes with explainations (Linux Configuration Managemnet)to help you find your need of Terraform import very easily and in a structured manner.**
8 |
9 | ## Published articles:
10 |
11 | - [Terraform import Tutorial - Part 0 - Getting Ready to Terraform import]()
12 |
13 | - [Terraform import Tutorial - Part 1 - User Management]()
14 |
15 |
16 | ## Contributions:
17 |
18 | All contributions are welcomed. Help me to enrich this repository.
19 |
20 | If you find any **bugs** in the examples, please file an issue.
21 |
22 | ### TODO:
23 |
24 | - [ ] Adding Terraform import ...
25 | - [ ] Adding Terraform import ...
26 |
27 |
--------------------------------------------------------------------------------
/backend.tf:
--------------------------------------------------------------------------------
1 | ## Move this backend file to m3 when migrating state
2 | terraform {
3 | backend "consul" {
4 | address = "127.0.0.1:8500"
5 | scheme = "http"
6 | }
7 | }
--------------------------------------------------------------------------------
/image/terraform-import.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/babakDoraniArab/terraform-import-tutorial/5d563b1531a9947fa45b40e991fe330bfbefb3cf/image/terraform-import.webp
--------------------------------------------------------------------------------
/junior_admin.sh:
--------------------------------------------------------------------------------
1 | # Set AWS profile to use deep-dive
2 |
3 |
4 | # If you don't have jq installed, you're going to need it
5 | #sudo apt install jq -y
6 |
7 | # We're going to manually create two new subnets
8 |
9 | # First let's get the existing VPC id
10 | vpc_id=$(aws ec2 describe-vpcs --filters Name="tag:Name",Values="globo-primary" \
11 | --query 'Vpcs[0].VpcId' --output text)
12 |
13 | # Get the third AZ in the region
14 | az=$(aws ec2 describe-availability-zones --query 'AvailabilityZones[2].ZoneId' --output text)
15 |
16 | # Create a new public subnet in the VPC
17 | pub_subnet=$(aws ec2 create-subnet --availability-zone-id $az \
18 | --cidr-block "10.0.3.0/24" --vpc-id $vpc_id)
19 |
20 | # Create a new private subnet in the VPC
21 | priv_subnet=$(aws ec2 create-subnet --availability-zone-id $az \
22 | --cidr-block "10.0.13.0/24" --vpc-id $vpc_id)
23 |
24 | # Create a private route table for priv_subnet
25 | priv_rt=$(aws ec2 create-route-table --vpc-id $vpc_id)
26 |
27 | priv_rt_id=$(echo $priv_rt | jq .RouteTable.RouteTableId -r)
28 |
29 | # Get the subnet ID for the private subnet
30 |
31 | priv_subnet_id=$(echo $priv_subnet | jq .Subnet.SubnetId -r)
32 |
33 | # Associate route table with private subnet
34 |
35 | aws ec2 associate-route-table --route-table-id $priv_rt_id --subnet-id $priv_subnet_id
36 |
37 | # Get the public route table
38 | pub_rt_id=$(aws ec2 describe-route-tables --filters Name="vpc-id",Values=$vpc_id \
39 | Name="tag:Name",Values="globo-primary-public" \
40 | --query RouteTables[0].RouteTableId --output text)
41 |
42 | #Get the subnet ID for the public subnet
43 | pub_subnet_id=$(echo $pub_subnet | jq .Subnet.SubnetId -r)
44 |
45 | # Associate the public route table with pub_subnet
46 | aws ec2 associate-route-table --route-table-id $pub_rt_id --subnet-id $pub_subnet_id
47 |
48 | echo "privateRouteTable: $priv_rt_id"
49 | echo "privateRouteTableAssoc: $priv_subnet_id/$priv_rt_id"
50 | echo "privateSubnet: $priv_subnet_id"
51 | echo "publicRouteTableAssoc: $pub_subnet_id/$pub_rt_id"
52 | echo "publicSubnet: $pub_subnet_id"
53 |
--------------------------------------------------------------------------------
/m3_commands.txt:
--------------------------------------------------------------------------------
1 | # Configure an AWS profile with proper credentials
2 | aws configure --profile deep-dive
3 |
4 | # Linux or MacOS
5 | export AWS_PROFILE=deep-dive
6 |
7 | # Windows
8 | $env:AWS_PROFILE="deep-dive"
9 |
10 | # Deploy the current environment
11 | terraform init
12 | terraform validate
13 | terraform plan -out m3.tfplan
14 | terraform apply "m3.tfplan"
15 |
16 | # Now Jimmy ruins things
17 |
18 | # Linux and MacOS: Run the junior_admin.sh script
19 | ./junior_admin.sh
20 |
21 | # Windows: Install the AWS PowerShell module
22 | Install-Module AWSPowerShell.NetCore -Scope CurrentUser
23 |
24 | # Windows: Run the JuniorAdminIssue.ps1 script
25 | .\JuniorAdminIssue.ps1
26 |
27 | # Update your terraform.tfvars file to comment out the current
28 | # private_subnets, public_subnets, and subnet_count values and
29 | # uncomment the updated values
30 |
31 | # Run the import commands in ImportCommands.txt
32 |
33 | terraform plan -out m3.tfplan
34 |
35 | # There should be 3 changes where tags are added
36 |
37 | terraform apply "m3.tfplan"
38 |
39 | terraform destroy
--------------------------------------------------------------------------------
/outputs.tf:
--------------------------------------------------------------------------------
1 | ##################################################################################
2 | # OUTPUT
3 | ##################################################################################
4 |
--------------------------------------------------------------------------------
/part01-import-consul/config/m4_commands.txt:
--------------------------------------------------------------------------------
1 | ## First let's try out some terraform state commands
2 | ## Go to the m3 folder and run the state commands
3 |
4 | # View all the Terraform resources
5 | terraform state list
6 |
7 | # Now let's look at a specific resource
8 | terraform state show module.vpc.aws_vpc.this[0]
9 |
10 | # We can also view all the state data
11 | terraform state pull
12 |
13 | ## Now it's time to deploy our local Consul server node
14 | ## Download the consul executable from https://www.consul.io/downloads
15 |
16 | # Go into the consul subfolder in m4
17 | cd ../m4/consul
18 |
19 | # Create a data subdirectory
20 | mkdir data
21 |
22 | # Launch consul server instance
23 | consul agent -bootstrap -config-file="config/consul-config.hcl" -bind="127.0.0.1"
24 |
25 | # Open a separate terminal window to run the rest of the commands
26 | # Make sure you are back in the m4/consul directory
27 | cd m4/consul
28 |
29 | # Generate the bootstrap token
30 | consul acl bootstrap
31 |
32 | # Set CONSUL_TOKEN to SecretID
33 |
34 | # Linux and MacOS
35 | export CONSUL_HTTP_TOKEN=SECRETID_VALUE
36 | export CONSUL_HTTP_TOKEN=9ff1cf9d-2e50-acb0-9d6e-8280fd926328
37 |
38 |
39 | # Windows
40 | $env:CONSUL_HTTP_TOKEN="SECRETID_VALUE"
41 |
42 | ## Now we're going to configure Consul using Terraform
43 | # Set up paths, policies, and tokens
44 | terraform init
45 | terraform plan -out consul.tfplan
46 | terraform apply consul.tfplan
47 |
48 | # Get token values for Mary and Sally and record them for later
49 | consul acl token read -id ACCESSOR_ID_MARY
50 | consul acl token read -id ACCESSOR_ID_SALLY
51 |
52 | # Go back to the main m4 folder
53 | cd ..
54 |
55 | ## Now let's set up the Consul backend and migrate the state
56 |
57 | # Copy the backend.tf file to m3
58 |
59 | cp backend.tf ..\m3\backend.tf
60 |
61 | # Move to the m3 folder
62 | cd ..\m3
63 |
64 | # Now let's set the Consul token to Mary Moe
65 | # Replace SECRETID_VALUE with Mary Moe's secret ID
66 | # Linux and MacOS
67 | export CONSUL_HTTP_TOKEN=SECRETID_VALUE
68 |
69 | # Windows
70 | $env:CONSUL_HTTP_TOKEN="SECRETID_VALUE"
71 |
72 | # Now we can initialize the backend config
73 | terraform init -backend-config="path=networking/state/globo-primary"
74 |
75 | # Change the enable_nat_gateway to true in the resources.tf file
76 |
77 | # Now run terraform plan and apply
78 | terraform plan -out nat.tfplan
79 | terraform apply nat.tfplan
80 |
81 | # Open a second terminal
82 | # Export the Consul token again
83 | # Try to run a terraform plan
84 | terraform plan
85 |
86 | ## You can stop your Consul instance if you want now, or leave it running
87 | ## for the next module
88 |
89 | ## We are going to keep using the infrastructure in AWS for m5, so don't destroy it!
--------------------------------------------------------------------------------
/part01-import-consul/consul.tfplan:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/babakDoraniArab/terraform-import-tutorial/5d563b1531a9947fa45b40e991fe330bfbefb3cf/part01-import-consul/consul.tfplan
--------------------------------------------------------------------------------
/part01-import-consul/data/checkpoint-signature:
--------------------------------------------------------------------------------
1 | 0e54f9d0-ce43-b56a-e71f-453b6ec4b88f
2 |
3 |
4 | This signature is a randomly generated UUID used to de-duplicate
5 | alerts and version information. This signature is random, it is
6 | not based on any personally identifiable information. To create
7 | a new signature, you can simply delete this file at any time.
8 | See the documentation for the software using Checkpoint for more
9 | information on how to disable it.
10 |
11 |
--------------------------------------------------------------------------------
/part01-import-consul/data/node-id:
--------------------------------------------------------------------------------
1 | c42b57a1-365d-0db7-1797-16762127ebac
--------------------------------------------------------------------------------
/part01-import-consul/data/raft/peers.info:
--------------------------------------------------------------------------------
1 |
2 | As of Consul 0.7.0, the peers.json file is only used for recovery
3 | after an outage. The format of this file depends on what the server has
4 | configured for its Raft protocol version. Please see the agent configuration
5 | page at https://www.consul.io/docs/agent/options.html#_raft_protocol for more
6 | details about this parameter.
7 |
8 | For Raft protocol version 2 and earlier, this should be formatted as a JSON
9 | array containing the address and port of each Consul server in the cluster, like
10 | this:
11 |
12 | [
13 | "10.1.0.1:8300",
14 | "10.1.0.2:8300",
15 | "10.1.0.3:8300"
16 | ]
17 |
18 | For Raft protocol version 3 and later, this should be formatted as a JSON
19 | array containing the node ID, address:port, and suffrage information of each
20 | Consul server in the cluster, like this:
21 |
22 | [
23 | {
24 | "id": "adf4238a-882b-9ddc-4a9d-5b6758e4159e",
25 | "address": "10.1.0.1:8300",
26 | "non_voter": false
27 | },
28 | {
29 | "id": "8b6dda82-3103-11e7-93ae-92361f002671",
30 | "address": "10.1.0.2:8300",
31 | "non_voter": false
32 | },
33 | {
34 | "id": "97e17742-3103-11e7-93ae-92361f002671",
35 | "address": "10.1.0.3:8300",
36 | "non_voter": false
37 | }
38 | ]
39 |
40 | The "id" field is the node ID of the server. This can be found in the logs when
41 | the server starts up, or in the "node-id" file inside the server's data
42 | directory.
43 |
44 | The "address" field is the address and port of the server.
45 |
46 | The "non_voter" field controls whether the server is a non-voter, which is used
47 | in some advanced Autopilot configurations, please see
48 | https://www.consul.io/docs/guides/autopilot.html for more information. If
49 | "non_voter" is omitted it will default to false, which is typical for most
50 | clusters.
51 |
52 | Under normal operation, the peers.json file will not be present.
53 |
54 | When Consul starts for the first time, it will create this peers.info file and
55 | delete any existing peers.json file so that recovery doesn't occur on the first
56 | startup.
57 |
58 | Once this peers.info file is present, any peers.json file will be ingested at
59 | startup, and will set the Raft peer configuration manually to recover from an
60 | outage. It's crucial that all servers in the cluster are shut down before
61 | creating the peers.json file, and that all servers receive the same
62 | configuration. Once the peers.json file is successfully ingested and applied, it
63 | will be deleted.
64 |
65 | Please see https://www.consul.io/docs/guides/outage.html for more information.
66 |
--------------------------------------------------------------------------------
/part01-import-consul/data/raft/raft.db:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/babakDoraniArab/terraform-import-tutorial/5d563b1531a9947fa45b40e991fe330bfbefb3cf/part01-import-consul/data/raft/raft.db
--------------------------------------------------------------------------------
/part01-import-consul/data/serf/local.snapshot:
--------------------------------------------------------------------------------
1 | alive: Babaks-MacBook-Pro.local 127.0.0.1:8301
2 | event-clock: 1
3 |
--------------------------------------------------------------------------------
/part01-import-consul/data/serf/remote.snapshot:
--------------------------------------------------------------------------------
1 | alive: Babaks-MacBook-Pro.local.dc1 127.0.0.1:8302
2 |
--------------------------------------------------------------------------------
/part01-import-consul/m4_commands.txt:
--------------------------------------------------------------------------------
1 | ## First let's try out some terraform state commands
2 | ## Go to the m3 folder and run the state commands
3 |
4 | # View all the Terraform resources
5 | terraform state list
6 |
7 | # Now let's look at a specific resource
8 | terraform state show module.vpc.aws_vpc.this[0]
9 |
10 | # We can also view all the state data
11 | terraform state pull
12 |
13 | ## Now it's time to deploy our local Consul server node
14 | ## Download the consul executable from https://www.consul.io/downloads
15 |
16 | # Go into the consul subfolder in m4
17 | cd ../m4/consul
18 |
19 | # Create a data subdirectory
20 | mkdir data
21 |
22 | # Launch consul server instance
23 | consul agent -bootstrap -config-file="config/consul-config.hcl" -bind="127.0.0.1"
24 |
25 | # Open a separate terminal window to run the rest of the commands
26 | # Make sure you are back in the m4/consul directory
27 | cd m4/consul
28 |
29 | # Generate the bootstrap token
30 | consul acl bootstrap
31 |
32 | # Set CONSUL_TOKEN to SecretID
33 |
34 | # Linux and MacOS
35 | export CONSUL_HTTP_TOKEN=SECRETID_VALUE
36 |
37 | # Windows
38 | $env:CONSUL_HTTP_TOKEN="SECRETID_VALUE"
39 |
40 | ## Now we're going to configure Consul using Terraform
41 | # Set up paths, policies, and tokens
42 | terraform init
43 | terraform plan -out consul.tfplan
44 | terraform apply consul.tfplan
45 |
46 | # Get token values for Mary and Sally and record them for later
47 | consul acl token read -id ACCESSOR_ID_MARY
48 | consul acl token read -id ACCESSOR_ID_SALLY
49 |
50 | # Go back to the main m4 folder
51 | cd ..
52 |
53 | ## Now let's set up the Consul backend and migrate the state
54 |
55 | # Copy the backend.tf file to m3
56 | cp backend.tf ..\m3\backend.tf
57 |
58 | # Move to the m3 folder
59 | cd ..\m3
60 |
61 | # Now let's set the Consul token to Mary Moe
62 | # Replace SECRETID_VALUE with Mary Moe's secret ID
63 | # Linux and MacOS
64 | export CONSUL_HTTP_TOKEN=SECRETID_VALUE
65 | export CONSUL_HTTP_TOKEN=f6845bf2-e997-8c62-3f2b-ab1d05f3c4a2
66 |
67 | # Windows
68 | $env:CONSUL_HTTP_TOKEN="SECRETID_VALUE"
69 |
70 | # Now we can initialize the backend config
71 | terraform init -backend-config="path=networking/state/globo-primary"
72 |
73 | # Change the enable_nat_gateway to true in the resources.tf file
74 |
75 | # Now run terraform plan and apply
76 | terraform plan -out nat.tfplan
77 | terraform apply nat.tfplan
78 |
79 | # Open a second terminal
80 | # Export the Consul token again
81 | # Try to run a terraform plan
82 | terraform plan
83 |
84 | ## You can stop your Consul instance if you want now, or leave it running
85 | ## for the next module
86 |
87 | ## We are going to keep using the infrastructure in AWS for m5, so don't destroy it!
--------------------------------------------------------------------------------
/part01-import-consul/main.tf:
--------------------------------------------------------------------------------
1 | ##################################################################################
2 | # CONFIGURATION - added for Terraform 0.14
3 | ##################################################################################
4 |
5 | terraform {
6 | required_providers {
7 | consul = {
8 | source = "hashicorp/consul"
9 | version = "~>2.0"
10 | }
11 | }
12 | }
13 |
14 | ##################################################################################
15 | # PROVIDERS
16 | ##################################################################################
17 |
18 | provider "consul" {
19 | address = "127.0.0.1:8500"
20 | datacenter = "dc1"
21 | }
22 |
23 | ##################################################################################
24 | # RESOURCES
25 | ##################################################################################
26 |
27 | resource "consul_keys" "networking" {
28 |
29 | key {
30 | path = "networking/configuration/"
31 | value = ""
32 | }
33 |
34 | key {
35 | path = "networking/state/"
36 | value = ""
37 | }
38 | }
39 |
40 | resource "consul_keys" "applications" {
41 |
42 | key {
43 | path = "applications/configuration/"
44 | value = ""
45 | }
46 |
47 | key {
48 | path = "applications/state/"
49 | value = ""
50 | }
51 | }
52 |
53 | resource "consul_acl_policy" "networking" {
54 | name = "networking"
55 | rules = <<-RULE
56 | key_prefix "networking" {
57 | policy = "write"
58 | }
59 |
60 | session_prefix "" {
61 | policy = "write"
62 | }
63 | RULE
64 | }
65 |
66 | resource "consul_acl_policy" "applications" {
67 | name = "applications"
68 | rules = <<-RULE
69 | key_prefix "applications" {
70 | policy = "write"
71 | }
72 |
73 | key_prefix "networking/state" {
74 | policy = "read"
75 | }
76 |
77 | session_prefix "" {
78 | policy = "write"
79 | }
80 |
81 | RULE
82 | }
83 |
84 | resource "consul_acl_token" "mary" {
85 | description = "token for Mary Moe"
86 | policies = [consul_acl_policy.networking.name]
87 | }
88 |
89 | resource "consul_acl_token" "sally" {
90 | description = "token for Sally Sue"
91 | policies = [consul_acl_policy.applications.name]
92 | }
93 |
94 | ##################################################################################
95 | # OUTPUTS
96 | ##################################################################################
97 |
98 | output "mary_token_accessor_id" {
99 | value = consul_acl_token.mary.id
100 | }
101 |
102 | output "sally_token_accessor_id" {
103 | value = consul_acl_token.sally.id
104 | }
105 |
--------------------------------------------------------------------------------
/resources.tf:
--------------------------------------------------------------------------------
1 | ##################################################################################
2 | # CONFIGURATION - added for Terraform 0.14
3 | ##################################################################################
4 |
5 | terraform {
6 | required_providers {
7 | aws = {
8 | source = "hashicorp/aws"
9 | version = "~>3.0"
10 | }
11 | }
12 | }
13 |
14 | ##################################################################################
15 | # PROVIDERS
16 | ##################################################################################
17 |
18 | provider "aws" {
19 | profile = "babak"
20 | region = var.region
21 | }
22 |
23 | ##################################################################################
24 | # DATA
25 | ##################################################################################
26 |
27 | data "aws_availability_zones" "available" {}
28 |
29 | ##################################################################################
30 | # RESOURCES
31 | ##################################################################################
32 |
33 | # NETWORKING #
34 | module "vpc" {
35 | source = "terraform-aws-modules/vpc/aws"
36 | version = "~>2.0"
37 |
38 | name = "globo-primary"
39 |
40 | cidr = var.cidr_block
41 | azs = slice(data.aws_availability_zones.available.names, 0, var.subnet_count)
42 | private_subnets = var.private_subnets
43 | public_subnets = var.public_subnets
44 |
45 | enable_nat_gateway = false
46 |
47 | create_database_subnet_group = false
48 |
49 |
50 | tags = {
51 | Environment = "Production"
52 | Team = "Network"
53 | }
54 | }
55 |
56 |
57 |
58 |
59 |
60 |
61 |
--------------------------------------------------------------------------------
/terraform.tfvars:
--------------------------------------------------------------------------------
1 | # private_subnets = ["10.0.10.0/24", "10.0.11.0/24"]
2 |
3 | # public_subnets = ["10.0.0.0/24", "10.0.1.0/24"]
4 |
5 | # subnet_count = 2
6 |
7 | region = "eu-west-1"
8 |
9 |
10 | # Use these for the import update
11 |
12 | private_subnets = ["10.0.10.0/24", "10.0.11.0/24", "10.0.12.0/24"]
13 |
14 | public_subnets = ["10.0.0.0/24", "10.0.1.0/24", "10.0.2.0/24"]
15 |
16 | subnet_count = 3
--------------------------------------------------------------------------------
/terraform.tfvars.example:
--------------------------------------------------------------------------------
1 | private_subnets = ["10.0.10.0/24", "10.0.11.0/24"]
2 |
3 | public_subnets = ["10.0.0.0/24", "10.0.1.0/24"]
4 |
5 | subnet_count = 2
6 |
7 | # Use these for the import update
8 |
9 | #private_subnets = ["10.0.10.0/24", "10.0.11.0/24", "10.0.12.0/24"]
10 |
11 | #public_subnets = ["10.0.0.0/24", "10.0.1.0/24", "10.0.2.0/24"]
12 |
13 | #subnet_count = 3
--------------------------------------------------------------------------------
/variables.tf:
--------------------------------------------------------------------------------
1 | ##################################################################################
2 | # VARIABLES
3 | ##################################################################################
4 |
5 | variable "region" {
6 | default = "us-east-1"
7 | }
8 |
9 | variable "subnet_count" {
10 | default = 2
11 | }
12 |
13 | variable "cidr_block" {
14 | default = "10.0.0.0/16"
15 | }
16 |
17 | variable "private_subnets" {
18 | type = list(any)
19 | }
20 |
21 | variable "public_subnets" {
22 | type = list(any)
23 | }
24 |
25 |
--------------------------------------------------------------------------------