├── m2
└── README.md
├── m8
├── space_coyote
│ ├── terraform.tfvars
│ ├── terraform.tf
│ ├── outputs.tf
│ ├── networking
│ │ ├── outputs.tf
│ │ ├── variables.tf
│ │ └── main.tf
│ ├── files
│ │ └── deploy_app.sh
│ ├── variables.tf
│ └── main.tf
├── commands.sh
├── datasource.tf
└── README.md
├── m5
├── commands.sh
└── README.md
├── m4
├── commands.sh
├── terraform.tf
├── outputs.tf
├── files
│ └── deploy_app.sh
├── variables.tf
├── README.md
└── main.tf
├── .gitignore
├── LICENSE
├── m6
├── enforce-mandatory-tags.sentinel
├── README.md
├── restrict-ingress-sg-rule-ssh.sentinel
└── restrict-ec2-instance-type.sentinel
├── m9
├── NetworkREADME.md
└── README.md
├── m3
└── README.md
└── README.md
/m2/README.md:
--------------------------------------------------------------------------------
1 | ## Demo
2 |
3 | No demos in this module
4 |
--------------------------------------------------------------------------------
/m8/space_coyote/terraform.tfvars:
--------------------------------------------------------------------------------
1 | prefix = "sc"
2 | project = "space_coyote"
3 | environment = "development"
4 | billing = "314159"
--------------------------------------------------------------------------------
/m5/commands.sh:
--------------------------------------------------------------------------------
1 | # Clone diamond-dogs repo and copy files over
2 | # Running from the root of exercise files
3 | cp -r ./diamond-dogs-app/* ../diamond-dogs/
--------------------------------------------------------------------------------
/m4/commands.sh:
--------------------------------------------------------------------------------
1 | # Copy the files to a new directory
2 | # From the root of the example files run this command:
3 | cp ./m4 ./diamond-dogs-app -r
4 |
5 |
6 |
--------------------------------------------------------------------------------
/m4/terraform.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | aws = {
4 | source = "hashicorp/aws"
5 | version = "~>5.0"
6 | }
7 | }
8 | }
--------------------------------------------------------------------------------
/m8/commands.sh:
--------------------------------------------------------------------------------
1 | # Copy the files to a new directory
2 | # From the root of the example files run this command:
3 | cp ./m8/space_coyote ./space-coyote-app -r
--------------------------------------------------------------------------------
/m8/space_coyote/terraform.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | aws = {
4 | source = "hashicorp/aws"
5 | version = "~>5.0"
6 | }
7 | }
8 | }
--------------------------------------------------------------------------------
/m4/outputs.tf:
--------------------------------------------------------------------------------
1 | output "diamond_dogs_url" {
2 | value = "http://${aws_eip.diamond_dogs.public_dns}"
3 | }
4 |
5 | output "diamond_dogs_ip" {
6 | value = "http://${aws_eip.diamond_dogs.public_ip}"
7 | }
8 |
--------------------------------------------------------------------------------
/m8/space_coyote/outputs.tf:
--------------------------------------------------------------------------------
1 | output "space_coyote_url" {
2 | value = "http://${aws_eip.space_coyote.public_dns}"
3 | }
4 |
5 | output "space_coyote_ip" {
6 | value = "http://${aws_eip.space_coyote.public_ip}"
7 | }
--------------------------------------------------------------------------------
/m8/space_coyote/networking/outputs.tf:
--------------------------------------------------------------------------------
1 | output "vpc_id" {
2 | description = "VPC id for Space Coyote"
3 | value = aws_vpc.space_coyote.id
4 | }
5 |
6 | output "subnet_id" {
7 | description = "Subnet id for Space Coyote"
8 | value = aws_subnet.space_coyote.id
9 | }
--------------------------------------------------------------------------------
/m8/datasource.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | tfe = {
4 | source = "hashicorp/tfe"
5 | version = "~>0.50"
6 | }
7 | }
8 | }
9 |
10 | data "tfe_outputs" "diamond_dogs" {
11 | organization = "YOUR_ORGANIZATION"
12 | workspace = "DIAMOND_DOGS_WORKSPACE_NAME"
13 | }
14 |
15 | output "diamond_dogs_url" {
16 | value = data.tfe_outputs.diamond_dogs.nonsensitive_values["diamond_dogs_url"]
17 | }
--------------------------------------------------------------------------------
/m8/space_coyote/files/deploy_app.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Script to deploy a very simple web application.
3 | # The web app has a customizable image and some text.
4 |
5 | sudo apt -y update
6 | sudo apt -y install apache2 cowsay unzip
7 | sudo systemctl start apache2
8 |
9 | wget https://github.com/ned1313/FlappyBird-JavaScript/archive/refs/heads/master.zip
10 | unzip master.zip
11 | sudo chown -R ubuntu:ubuntu /var/www/html
12 |
13 | cp -r FlappyBird-JavaScript-master/* /var/www/html/
14 |
15 | cowsay Space Coyote - Curses!!!
--------------------------------------------------------------------------------
/m4/files/deploy_app.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Script to deploy a very simple web application.
3 | # The web app has a customizable image and some text.
4 |
5 | sudo apt -y update
6 | sudo apt -y install apache2 cowsay
7 | sudo systemctl start apache2
8 | sudo chown -R ubuntu:ubuntu /var/www/html
9 |
10 | cat << EOM > /var/www/html/index.html
11 |
12 |
Howl!!!
13 |
14 |
15 |
16 |
17 |

18 |
Diamond Dogs
19 | Welcome to ${project}'s app.
20 |
21 |
22 |
23 |
24 |
25 | EOM
26 |
27 | cowsay Diamond Dogs - Howl!!!
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Local .terraform directories
2 | **/.terraform/*
3 |
4 | # .tfstate files
5 | *.tfstate
6 | *.tfstate.*
7 |
8 | # Crash log files
9 | crash.log
10 |
11 | # Ignore any .tfvars files that are generated automatically for each Terraform run. Most
12 | # .tfvars files are managed as part of configuration and so should be included in
13 | # version control.
14 | #
15 | # example.tfvars
16 |
17 | # Ignore override files as they are usually used to override resources locally and so
18 | # are not checked in
19 | override.tf
20 | override.tf.json
21 | *_override.tf
22 | *_override.tf.json
23 |
24 | # Include override files you do wish to add to version control using negated pattern
25 | #
26 | # !example_override.tf
27 |
28 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
29 | # example: *tfplan*
30 |
31 | # Ignore diamond dogs and space coyote folders
32 | diamond-dogs-app/*
33 | space-coyote-app/*
34 | policy_testing/*
35 | terraform-aws-networking/*
--------------------------------------------------------------------------------
/m8/space_coyote/networking/variables.tf:
--------------------------------------------------------------------------------
1 | variable "prefix" {
2 | type = string
3 | description = "(Required) This prefix will be included in the name of most resources."
4 | }
5 |
6 | variable "environment" {
7 | type = string
8 | description = "(Required) Application environment for deployment, defaults to development."
9 | }
10 |
11 | variable "region" {
12 | type = string
13 | description = "(Optional) The region where the resources are created. Defaults to us-east-1."
14 | default = "us-east-1"
15 | }
16 |
17 | variable "address_space" {
18 | type = string
19 | description = "(Required) The address space that is used by the virtual network. You can supply more than one address space. Changing this forces a new resource to be created. Example: 10.0.0.0/16."
20 | }
21 |
22 | variable "subnet_prefix" {
23 | type = string
24 | description = "(Required) The address prefix to use for the subnet. Example: 10.0.10.0/24."
25 | default = "10.0.10.0/24"
26 | }
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021 Ned Bellavance
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/m6/enforce-mandatory-tags.sentinel:
--------------------------------------------------------------------------------
1 | # Enforcing Mandatory Tags for aws ec2 instances
2 |
3 | # This Policy uses tfplan/v2 as the data source
4 | import "tfplan/v2" as tfplan
5 |
6 | # The list of mandatory tags
7 | param mandatory_tags default ["Environment","Billing"]
8 |
9 | print("Checking for mandatory tags:", mandatory_tags)
10 |
11 | # Filtering all of 'aws_instance' resources from the plan file
12 | ec2_instances = filter tfplan.resource_changes as _, resource {
13 | resource.mode == "managed" and
14 | resource.type == "aws_instance" and
15 | (resource.change.actions contains "create" or resource.change.actions == ["update"])
16 | }
17 |
18 | # Verify each instance has the mandatory tags
19 | deny_resources_without_mandatory_tags = func(required_tags, instances) {
20 | validated = true
21 |
22 | for instances as address, r {
23 | for required_tags as mtag {
24 | if r.change.after.tags_all not contains mtag {
25 | print("Resource", address, "is missing the mandatory tag", mtag)
26 | validated = false
27 | }
28 | }
29 | }
30 |
31 | return validated
32 | }
33 |
34 |
35 |
36 | # Main Policy
37 | main = rule {
38 | deny_resources_without_mandatory_tags(mandatory_tags, ec2_instances)
39 | }
40 |
--------------------------------------------------------------------------------
/m8/space_coyote/networking/main.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">1.0.0"
3 | required_providers {
4 | aws = {
5 | source = "hashicorp/aws"
6 | version = "~>5.0"
7 | }
8 | }
9 | }
10 |
11 | resource "aws_vpc" "space_coyote" {
12 | cidr_block = var.address_space
13 | enable_dns_hostnames = true
14 |
15 | tags = {
16 | name = "${var.prefix}-vpc-${var.region}"
17 | environment = var.environment
18 | }
19 | }
20 |
21 | resource "aws_subnet" "space_coyote" {
22 | vpc_id = aws_vpc.space_coyote.id
23 | cidr_block = var.subnet_prefix
24 |
25 | tags = {
26 | name = "${var.prefix}-subnet"
27 | }
28 | }
29 |
30 | resource "aws_internet_gateway" "space_coyote" {
31 | vpc_id = aws_vpc.space_coyote.id
32 |
33 | tags = {
34 | Name = "${var.prefix}-internet-gateway"
35 | }
36 | }
37 |
38 | resource "aws_route_table" "space_coyote" {
39 | vpc_id = aws_vpc.space_coyote.id
40 |
41 | route {
42 | cidr_block = "0.0.0.0/0"
43 | gateway_id = aws_internet_gateway.space_coyote.id
44 | }
45 | }
46 |
47 | resource "aws_route_table_association" "space_coyote" {
48 | subnet_id = aws_subnet.space_coyote.id
49 | route_table_id = aws_route_table.space_coyote.id
50 | }
--------------------------------------------------------------------------------
/m8/space_coyote/variables.tf:
--------------------------------------------------------------------------------
1 | variable "prefix" {
2 | type = string
3 | description = "(Required) This prefix will be included in the name of most resources."
4 | }
5 |
6 | variable "project" {
7 | type = string
8 | description = "(Required) Application project name."
9 | }
10 |
11 | variable "billing" {
12 | type = string
13 | description = "(Required) Billing code for project."
14 | }
15 |
16 | variable "environment" {
17 | type = string
18 | description = "(Required) Application environment for deployment."
19 | }
20 |
21 | variable "region" {
22 | type = string
23 | description = "(Optional) The region where the resources are created. Defaults to us-east-1."
24 | default = "us-east-1"
25 | }
26 |
27 | variable "address_space" {
28 | type = string
29 | description = "(Optional) The address space that is used by the virtual network. You can supply more than one address space. Changing this forces a new resource to be created. Defaults to 10.0.0.0/16."
30 | default = "10.0.0.0/16"
31 | }
32 |
33 | variable "subnet_prefix" {
34 | type = string
35 | description = "(Optional) The address prefix to use for the subnet. Defaults to 10.0.10.0/24."
36 | default = "10.0.10.0/24"
37 | }
38 |
39 | variable "instance_type" {
40 | type = string
41 | description = "(Optional) Specifies the AWS instance type. Defaults to t2.micro."
42 | default = "t3.micro"
43 | }
--------------------------------------------------------------------------------
/m4/variables.tf:
--------------------------------------------------------------------------------
1 | variable "prefix" {
2 | type = string
3 | description = "(Required) This prefix will be included in the name of most resources."
4 | }
5 |
6 | variable "project" {
7 | type = string
8 | description = "(Required) Application project name."
9 | }
10 |
11 | variable "environment" {
12 | type = string
13 | description = "(Required) Application environment for deployment."
14 | }
15 |
16 | variable "region" {
17 | type = string
18 | description = "(Optional) The region where the resources are created. Defaults to us-east-1."
19 | default = "us-east-1"
20 | }
21 |
22 | variable "address_space" {
23 | type = string
24 | description = "(Optional) The address space that is used by the virtual network. You can supply more than one address space. Changing this forces a new resource to be created. Defaults to 10.0.0.0/16."
25 | default = "10.0.0.0/16"
26 | }
27 |
28 | variable "subnet_prefix" {
29 | type = string
30 | description = "(Optional) The address prefix to use for the subnet. Defaults to 10.0.10.0/24."
31 | default = "10.0.10.0/24"
32 | }
33 |
34 | variable "instance_type" {
35 | type = string
36 | description = "(Optional) Specifies the AWS instance type. Defaults to t3.micro."
37 | default = "t3.micro"
38 | }
39 |
40 | variable "height" {
41 | type = string
42 | default = "400"
43 | description = "(Optional) Image height in pixels. Defaults to 400"
44 | }
45 |
46 | variable "width" {
47 | type = string
48 | default = "600"
49 | description = "(Optional) Image width in pixels. Defaults to 600"
50 | }
51 |
52 | variable "placeholder" {
53 | type = string
54 | default = "placedog.net"
55 | description = "(Optional) Diamond Dog URL. Defaults to placedog.net."
56 | }
57 |
--------------------------------------------------------------------------------
/m9/NetworkREADME.md:
--------------------------------------------------------------------------------
1 | # AWS Networking Terraform module
2 |
3 | Terraform module which creates a VPC, Subnets, Internet Gateway and Route Table associations on AWS with all (or almost all) features provided by Terraform AWS provider.
4 |
5 | ## Usage
6 |
7 | ### VPC with Subnets
8 |
9 | Example:
10 |
11 | ```hcl
12 | module "networking" {
13 | source = "app.terraform.io/example-org-6cde13/networking/aws"
14 | version = "1.0.0"
15 | region = "us-east-1"
16 | prefix = "sc"
17 | environment = "development"
18 | address_space = "10.0.0.0/16"
19 | subnet_prefix = "10.0.10.0/24"
20 | }
21 | ```
22 |
23 | ## Requirements
24 |
25 | | Name | Version |
26 | |------|---------|
27 | | terraform | >1.0.0 |
28 | | aws | ~>3.0 |
29 |
30 | ## Providers
31 |
32 | | Name | Version |
33 | |------|---------|
34 | | aws | ~>3.0 |
35 |
36 | ## Inputs
37 |
38 | | Name | Description | Type | Default | Required |
39 | |------|-------------|------|---------|:--------:|
40 | | address\_space | (Required) The address space that is used by the virtual network. You can supply more than one address space. Changing this forces a new resource to be created. Example: 10.0.0.0/16. | `string` | n/a | yes |
41 | | environment | (Required) Application environment for deployment, defaults to development. | `string` | n/a | yes |
42 | | prefix | (Required) This prefix will be included in the name of most resources. | `string` | n/a | yes |
43 | | region | (Optional) The region where the resources are created. Defaults to us-east-1. | `string` | `"us-east-1"` | no |
44 | | subnet\_prefix | (Required) The address prefix to use for the subnet. Example: 10.0.10.0/24. | `string` | `"10.0.10.0/24"` | no |
45 |
46 | ## Outputs
47 |
48 | | Name | Description |
49 | |------|-------------|
50 | | subnet\_id | Subnet id for application |
51 | | vpc\_id | VPC id for application |
--------------------------------------------------------------------------------
/m6/README.md:
--------------------------------------------------------------------------------
1 | # Applying Policy as Code
2 |
3 | A major benefit of TFC is access to Sentinel, and your security team is pumped to put some guardrails on what application teams can do. We are going to create a few policies, apply them to the workspace, and show how the checks can fail.
4 |
5 | ## Demos
6 |
7 | - Create Sentinel policies and policy sets
8 | - Apply policy sets to the organization and application workspace
9 | - Test the policies by updated the application code
10 |
11 | ### Create Sentinel policies and policy sets
12 |
13 | First we need a place to store our policies, namely a Policy Set. Sentinel is a policy as code framework, so just like Terraform code we should store Sentinel code in a Version Control System like GitHub.
14 |
15 | The policies to be created are in the `m6` directory. Create a policy set in Terraform Cloud by going to the organization settings and clicking on **Policy Sets**. Choose to apply the policy set globally.
16 |
17 | Then create policies for each of the files in the `m6` directory, and associate them with the policy set you just created.
18 |
19 | ### Test the policies by updated the application code
20 |
21 | ### Manually Run a Plan
22 |
23 | Navigate to your diamond dogs development workspaces and queue a plan.
24 |
25 | Will see the plan was successful but there was a policy failure. We need to fix our code!
26 |
27 | Now, add the **Billing** tag to default tags for the AWS provider in your main.tf file. You can do so directly in GitHub or through the local copy of your repository. Be sure to have the `development` branch checked out.
28 |
29 | This time, the Sentinel policy should pass because your EC2 instance now has both tags. Each time you push a change to the development, you'll trigger a new Terraform run. Keep trying until you pass the Sentinel policy check.
30 |
31 | ## More Policy as Code resources
32 |
33 | You can find policy libraries on the [Terraform Registry](https://registry.terraform.io/browse/policies) and [GitHub](https://github.com/hashicorp/terraform-sentinel-policies).
34 |
--------------------------------------------------------------------------------
/m6/restrict-ingress-sg-rule-ssh.sentinel:
--------------------------------------------------------------------------------
1 | import "tfplan/v2" as tfplan
2 |
3 | aws_security_groups = filter tfplan.resource_changes as _, resource_changes {
4 | resource_changes.mode is "managed" and
5 | resource_changes.type is "aws_security_group" and
6 | (resource_changes.change.actions contains "create" or
7 | resource_changes.change.actions is ["update"]) and
8 | (resource_changes.change.after.ingress else []) is not empty
9 | }
10 |
11 | aws_security_group_rules = filter tfplan.resource_changes as _, resource_changes {
12 | resource_changes.mode is "managed" and
13 | resource_changes.type is "aws_security_group_rule" and
14 | (resource_changes.change.actions contains "create" or
15 | resource_changes.change.actions is ["update"]) and
16 | resource_changes.change.after.type is "ingress"
17 | }
18 |
19 | ssh_security_groups = filter aws_security_groups as _, asg {
20 | any asg.change.after.ingress as _, ingress {
21 | ingress.to_port is 22 or
22 | (ingress.from_port <= 22 and
23 | ingress.to_port >= 22)
24 | }
25 | }
26 |
27 | ssh_security_group_rules = filter aws_security_group_rules as _, asgr {
28 | asgr.change.after.to_port is 22 or
29 | (asgr.change.after.from_port <= 22 and
30 | asgr.change.after.to_port >= 22)
31 | }
32 |
33 | protocol_security_groups = filter aws_security_groups as _, asg {
34 | all asg.change.after.ingress as _, ingress {
35 | ingress.protocol is "-1"
36 | }
37 | }
38 |
39 | protocol_security_group_rules = filter aws_security_group_rules as _, asgr {
40 | asgr.change.after.protocol is "-1"
41 | }
42 |
43 | deny_public_ssh_security_groups = rule {
44 | all ssh_security_groups as _, ssg {
45 | all ssg.change.after.ingress as _, ingress {
46 | ingress.cidr_blocks not contains "0.0.0.0/0"
47 | }
48 | }
49 | }
50 |
51 | deny_public_ssh_security_group_rules = rule {
52 | all ssh_security_group_rules as _, ssgr {
53 | ssgr.change.after.cidr_blocks not contains "0.0.0.0/0"
54 | }
55 | }
56 |
57 | deny_all_open_protocol_security_groups = rule {
58 | all protocol_security_groups as _, psg {
59 | all psg.change.after.ingress as _, ingress {
60 | ingress.cidr_blocks not contains "0.0.0.0/0"
61 | }
62 | }
63 | }
64 |
65 | deny_all_open_protocol_security_group_rules = rule {
66 | all protocol_security_group_rules as _, psgr {
67 | psgr.change.after.cidr_blocks not contains "0.0.0.0/0"
68 | }
69 | }
70 |
71 | // Ensure no security groups allow ingress from 0.0.0.0/0 to port 22.
72 | main = rule {
73 | deny_public_ssh_security_groups and
74 | deny_public_ssh_security_group_rules and
75 | deny_all_open_protocol_security_groups and
76 | deny_all_open_protocol_security_group_rules
77 | }
--------------------------------------------------------------------------------
/m3/README.md:
--------------------------------------------------------------------------------
1 | # Configuring Your Organization and Workspaces
2 |
3 | We need to sign up for an HCP Account and then Terraform Cloud and create an organization and workspace for the new application. We can use the process to take a brief tour and get our local Terraform client configured to use TFC.
4 |
5 | ## Demos
6 |
7 | - Signing up a new organization for Terraform Cloud
8 | - Tour of the user interface.
9 | - Configuring the Terraform CLI to work with TFC.
10 |
11 | ## Signing up a new organization for Terraform Cloud
12 |
13 | Let's start with setting up an HCP Account and use that to connect to Terraform Cloud. We recommend not using any production Terraform Cloud accounts for this course.
14 |
15 | Click on the link below and create an account if you do not already have one. Please be sure to validate your email address.
16 |
17 | [https://portal.cloud.hashicorp.com/sign-up](https://portal.cloud.hashicorp.com/sign-up)
18 |
19 | After you have validated your email you will be taken to the HCP portal. From there, you can click on the Terraform Cloud service and link your HCP account to Terraform Cloud. Then you can create an organization.
20 |
21 | The organization name needs to be unique so be creative.
22 |
23 | ## Configuring the Terraform CLI to work with TFC
24 |
25 | Now we are going to create an API token for Terraform Cloud. From your terminal run the following command
26 |
27 | ```
28 | terraform login
29 | ```
30 |
31 | When prompted, enter `yes` to proceed. Click on the URL provided. This will forward you to Terraform Cloud to create an API token.
32 |
33 | Copy the token string and paste that at the "Token for app.terraform.io" prompt.
34 |
35 | If the token was entered successfully you should see the following:
36 |
37 | ```bash
38 | Retrieved token for user tfcuser
39 |
40 | ---
41 |
42 | -
43 | ----- -
44 | --------- --
45 | --------- - -----
46 | --------- ------ -------
47 | ------- --------- ----------
48 | ---- ---------- ----------
49 | -- ---------- ----------
50 |
51 | Welcome to Terraform Cloud! - ---------- -------
52 |
53 | ---
54 |
55 | ## Documentation: terraform.io/docs/cloud -------- -
56 |
57 | ---
58 |
59 | ---
60 |
61 | ---
62 |
63 | New to TFC? Follow these steps to instantly apply an example configuration:
64 |
65 | $ git clone https://github.com/hashicorp/tfc-getting-started.git
66 | $ cd tfc-getting-started
67 | $ scripts/setup.sh
68 | ```
69 |
--------------------------------------------------------------------------------
/m8/space_coyote/main.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | aws = {
4 | source = "hashicorp/aws"
5 | version = "~>3.0"
6 | }
7 | }
8 | }
9 |
10 | provider "aws" {
11 | region = var.region
12 | default_tags {
13 | tags = {
14 | Owner = "Globomantics"
15 | Project = var.project
16 | Environment = var.environment
17 | Billing = var.billing
18 | }
19 | }
20 | }
21 |
22 | module "networking" {
23 | source = "./networking"
24 | region = var.region
25 | prefix = var.prefix
26 | environment = var.environment
27 | address_space = "10.0.0.0/16"
28 | subnet_prefix = "10.0.10.0/24"
29 | }
30 |
31 | module "s3_bucket" {
32 | source = "terraform-aws-modules/s3-bucket/aws"
33 | version = "3.15.1"
34 | bucket_prefix = "${var.prefix}-${var.environment}"
35 |
36 | versioning = {
37 | enabled = true
38 | }
39 |
40 | }
41 |
42 | resource "aws_security_group" "space_coyote" {
43 | name = "${var.prefix}-security-group"
44 |
45 | vpc_id = module.networking.vpc_id
46 |
47 | ingress {
48 | from_port = 80
49 | to_port = 80
50 | protocol = "tcp"
51 | cidr_blocks = ["0.0.0.0/0"]
52 | }
53 |
54 | ingress {
55 | from_port = 443
56 | to_port = 443
57 | protocol = "tcp"
58 | cidr_blocks = ["0.0.0.0/0"]
59 | }
60 |
61 | egress {
62 | from_port = 0
63 | to_port = 0
64 | protocol = "-1"
65 | cidr_blocks = ["0.0.0.0/0"]
66 | }
67 |
68 | tags = {
69 | Name = "${var.prefix}-security-group"
70 | }
71 |
72 | lifecycle {
73 | create_before_destroy = true
74 | }
75 | }
76 |
77 |
78 | data "aws_ami" "ubuntu" {
79 | most_recent = true
80 |
81 | filter {
82 | name = "name"
83 | values = ["ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*"]
84 | }
85 |
86 | filter {
87 | name = "virtualization-type"
88 | values = ["hvm"]
89 | }
90 |
91 | owners = ["099720109477"] # Canonical
92 | }
93 |
94 | resource "aws_eip" "space_coyote" {
95 | instance = aws_instance.space_coyote.id
96 | }
97 |
98 | resource "aws_eip_association" "space_coyote" {
99 | instance_id = aws_instance.space_coyote.id
100 | allocation_id = aws_eip.space_coyote.id
101 | }
102 |
103 | resource "aws_instance" "space_coyote" {
104 | ami = data.aws_ami.ubuntu.id
105 | instance_type = var.instance_type
106 | associate_public_ip_address = true
107 | subnet_id = module.networking.subnet_id
108 | vpc_security_group_ids = [aws_security_group.space_coyote.id]
109 |
110 | user_data = templatefile("${path.module}/files/deploy_app.sh", {})
111 |
112 | tags = {
113 | Name = "${var.prefix}-space_coyote-instance"
114 | }
115 | }
--------------------------------------------------------------------------------
/m4/README.md:
--------------------------------------------------------------------------------
1 | # Deploying with the CLI and Terraform Cloud
2 |
3 | Let's talk about the various options for deployment with Terraform Cloud. The new application you're using is not in source control at the moment, so we are going to deploy it using the CLI. We'll change that momentarily.
4 |
5 | ## Demos
6 |
7 | - Creating a workspace on TFC with the CLI workflow
8 | - Configuring variable values and environment variables
9 | - Updating the Terraform configuration
10 | - Deploying the configuration with the CLI
11 |
12 | ### Creating a workspace on TFC with the CLI workflow
13 |
14 | Before you deploy your configuration, make a copy of the contents of the `m4` directory to a new directory named `diamond-dogs-app`. This will be the directory we use for the rest of the course.
15 |
16 | Workspaces can be created using the UI, CLI, or API. To create a new workspace for the configuration, simply update the `cloud` block in the `terraform.tf` file with your organization name. Save the file and run `terraform init`.
17 |
18 | `terraform.tf`
19 |
20 | ```hcl
21 | terraform {
22 | cloud {
23 | organization = ""
24 |
25 | workspaces {
26 | name = "diamonddogs-app-useast1-dev
27 | }
28 | }
29 | }
30 | ```
31 |
32 | While this will create your workspace, it **will not** configure any variables or environment variables. We can do that next.
33 |
34 | ### Configuring variable values and environment variables
35 |
36 | Terraform uses all Terraform and Environment variables for all plans and applies in this workspace. Workspaces using Terraform 0.10.0 or later can also load default values from any \*.auto.tfvars files in the configuration. You may want to use the Terraform Cloud Provider or the variables API to add multiple variables at once.
37 |
38 | Terraform requires credentials in order to communicate with your cloud provider's API. These API keys should never be stored directly in your terraform code. We will use Terraform Cloud environment variables to store our sensitive cloud credentials for AWS.
39 |
40 | - In the `Variables Set` section of your organization settings create a new variable set named `aws-creds`
41 | - Add a Environment variable named `AWS_ACCESS_KEY_ID` with your AWS Access Key
42 | - Add a second Environment variable named `AWS_SECRET_ACCESS_KEY` with your AWS Secret Key. Be sure to mark this variable as sensitive. Sensitive variables will not be displayed within the environment, and can only be overwritten - not read.
43 |
44 | ### Configuring variables in the workspace
45 |
46 | You will also need to go into the workspace and configure the variables. You can do this by clicking on the `Variables` section in the workspace. You will need to configure the `prefix`, `environment`, and `project` variables.
47 |
48 | ### Deploying the configuration with the CLI
49 |
50 | We can first run a `terraform plan` and then a `terraform apply` to generate our infrastructure. The output of the remote operations will be streamed to our local console.
51 |
52 | ```bash
53 | terraform plan
54 | ```
55 |
56 | ```bash
57 | terraform apply
58 | ```
59 |
--------------------------------------------------------------------------------
/m4/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = var.region
3 | default_tags {
4 | tags = {
5 | Owner = "Globomantics"
6 | Project = var.project
7 | Environment = var.environment
8 | }
9 | }
10 | }
11 |
12 |
13 | resource "aws_vpc" "diamond_dogs" {
14 | cidr_block = var.address_space
15 | enable_dns_hostnames = true
16 |
17 | tags = {
18 | name = "${var.prefix}-vpc-${var.region}"
19 | environment = var.environment
20 | }
21 | }
22 |
23 | resource "aws_subnet" "diamond_dogs" {
24 | vpc_id = aws_vpc.diamond_dogs.id
25 | cidr_block = var.subnet_prefix
26 |
27 | tags = {
28 | name = "${var.prefix}-subnet"
29 | }
30 | }
31 |
32 | resource "aws_security_group" "diamond_dogs" {
33 | name = "${var.prefix}-security-group"
34 |
35 | vpc_id = aws_vpc.diamond_dogs.id
36 |
37 | ingress {
38 | from_port = 80
39 | to_port = 80
40 | protocol = "tcp"
41 | cidr_blocks = ["0.0.0.0/0"]
42 | }
43 |
44 | ingress {
45 | from_port = 443
46 | to_port = 443
47 | protocol = "tcp"
48 | cidr_blocks = ["0.0.0.0/0"]
49 | }
50 |
51 | egress {
52 | from_port = 0
53 | to_port = 0
54 | protocol = "-1"
55 | cidr_blocks = ["0.0.0.0/0"]
56 | }
57 |
58 | tags = {
59 | Name = "${var.prefix}-security-group"
60 | }
61 |
62 | lifecycle {
63 | create_before_destroy = true
64 | }
65 | }
66 |
67 | resource "aws_internet_gateway" "diamond_dogs" {
68 | vpc_id = aws_vpc.diamond_dogs.id
69 |
70 | tags = {
71 | Name = "${var.prefix}-internet-gateway"
72 | }
73 | }
74 |
75 | resource "aws_route_table" "diamond_dogs" {
76 | vpc_id = aws_vpc.diamond_dogs.id
77 |
78 | route {
79 | cidr_block = "0.0.0.0/0"
80 | gateway_id = aws_internet_gateway.diamond_dogs.id
81 | }
82 | }
83 |
84 | resource "aws_route_table_association" "diamond_dogs" {
85 | subnet_id = aws_subnet.diamond_dogs.id
86 | route_table_id = aws_route_table.diamond_dogs.id
87 | }
88 |
89 | data "aws_ami" "ubuntu" {
90 | most_recent = true
91 |
92 | filter {
93 | name = "name"
94 | values = ["ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*"]
95 | }
96 |
97 | filter {
98 | name = "virtualization-type"
99 | values = ["hvm"]
100 | }
101 |
102 | owners = ["099720109477"] # Canonical
103 | }
104 |
105 | resource "aws_instance" "diamond_dogs" {
106 | ami = data.aws_ami.ubuntu.id
107 | instance_type = var.instance_type
108 | associate_public_ip_address = true
109 | subnet_id = aws_subnet.diamond_dogs.id
110 | vpc_security_group_ids = [aws_security_group.diamond_dogs.id]
111 |
112 | user_data_replace_on_change = true
113 | user_data = templatefile("${path.module}/files/deploy_app.sh", {
114 | placeholder = var.placeholder
115 | width = var.width
116 | height = var.height
117 | project = var.project
118 | })
119 |
120 | tags = {
121 | Name = "${var.prefix}-diamond_dogs-instance"
122 | }
123 | }
124 |
125 | resource "aws_eip" "diamond_dogs" {
126 | instance = aws_instance.diamond_dogs.id
127 | }
128 |
129 | resource "aws_eip_association" "diamond_dogs" {
130 | instance_id = aws_instance.diamond_dogs.id
131 | allocation_id = aws_eip.diamond_dogs.id
132 | }
--------------------------------------------------------------------------------
/m9/README.md:
--------------------------------------------------------------------------------
1 | # Using the Private Module Registry
2 |
3 | While migrating Team Space Coyote, you found some useful modules you'd like to make available to other groups. You're going to create a private module registry, add the modules, and update a Team Space Coyote's deployment to use the new module location.
4 |
5 | ## Demos
6 |
7 | - Add public modules and providers to the registry
8 | - Create a module with the private registry
9 | - Update the Space Coyote configuration
10 |
11 | ### Add public modules and providers to the registry
12 |
13 | Click on the "Registry" tab at the top of the page. From the providers tab, click on "Search public registry" button. Search for the AWS provider and add it to the registry.
14 |
15 | Click on the modules tab, and click on "Search public registry". Search for the `s3-bucket` module. Add the module to the private registry.
16 |
17 | ### Add private networking module to the registry
18 |
19 | The Space Coyote configuration utilizes a local `networking` module for building out the application's networking components including vpc, subnets, route tables, gateways, etc. This code is not only relevant for the Space Coyote team, but is common networking configuration that will prove valuable for other teams as well. Therefore the Space Coyote team has offered to publish this as a private module to the registry for other teams to be able to use.
20 |
21 | Create a new private github repository named `terraform-aws-networking`:
22 |
23 | - Private
24 | - Add a README.md file
25 | - Add a .gitignore template for Terraform
26 |
27 | Add the `main.tf`, `variables.tf` and `outputs.tf` of the local networking module to this repository. Create a `README.md` for the repository that contains the contents of the NetworkREADME.md file in the m9 directory.
28 |
29 | After adding the files to the repository, create a new version for the module by creating a Tag with the value `1.0.0`.
30 |
31 | In the Terraform Cloud UI, you will need to create an OAuth based VCS connection to GitHub, if you haven't already done so. The GitHub App connection will not work with the private registry.
32 |
33 | In the organization settings, go to the **Providers** area of Version control and Add a VCS provider. Select *GitHub.com (Custom)* from the list of providers and walk through the steps to create the connection via OAuth.
34 |
35 | Click on the "Registry" tab at the top of the page. "Publish" button followed by "Module". Click on the "GitHub" button and select the `terraform-aws-networking` repository that you just created.
36 |
37 | Click on the "Publish module" button.
38 |
39 | After the module is completely published, please select version `1.0.0` of the module by clicking `Change` under **Version** in the upper left section of your screen.
40 |
41 | ### Update the Space Coyote configuration
42 |
43 | Update the Space Coyote configuration `main.tf` file to remove the local `networking` module and start using the new `networking` module in the Terraform Cloud private module registry.
44 |
45 | ```hcl
46 | module "networking" {
47 | source = "./networking"
48 | region = var.region
49 | prefix = var.prefix
50 | environment = var.environment
51 | address_space = "10.0.0.0/16"
52 | subnet_prefix = "10.0.10.0/24"
53 | }
54 | ```
55 |
56 | You can copy the module creation code from the "Usage Instructions" section of the module's page in your Private Module Registry.
57 |
58 | ```hcl
59 | module "networking" {
60 | source = "app.terraform.io/example-org-6cde13/networking/aws"
61 | version = "1.0.0"
62 | region = "us-east-1"
63 | prefix = "sc"
64 | environment = "development"
65 | address_space = "10.0.0.0/16"
66 | subnet_prefix = "10.0.10.0/24"
67 | }
68 | ```
69 |
70 | You can then validate and apply the change by using the new centralized module from Terraform Cloud and remove the local module.
71 |
72 | ```bash
73 | terraform init
74 | terraform validate
75 | terraform apply
76 | ```
77 |
--------------------------------------------------------------------------------
/m6/restrict-ec2-instance-type.sentinel:
--------------------------------------------------------------------------------
1 | # This policy uses the Sentinel tfplan import to require that
2 | # all EC2 instances have instance types from an allowed list
3 |
4 | ##### Imports #####
5 |
6 | import "tfplan"
7 | import "strings"
8 |
9 | ##### Functions #####
10 |
11 | # Find all resources of a specific type from all modules using the tfplan import
12 | find_resources_from_plan = func(type) {
13 |
14 | resources = {}
15 |
16 | # Iterate over all modules in the tfplan import
17 | for tfplan.module_paths as path {
18 | # Iterate over the named resources of desired type in the module
19 | for tfplan.module(path).resources[type] else {} as name, instances {
20 | # Iterate over resource instances
21 | for instances as index, r {
22 |
23 | # Get the address of the instance
24 | if length(path) == 0 {
25 | # root module
26 | address = type + "." + name + "[" + string(index) + "]"
27 | } else {
28 | # non-root module
29 | address = "module." + strings.join(path, ".module.") + "." +
30 | type + "." + name + "[" + string(index) + "]"
31 | }
32 |
33 | # Add the instance to resources map, setting the key to the address
34 | resources[address] = r
35 | }
36 | }
37 | }
38 |
39 | return resources
40 | }
41 |
42 | find_resources_from_environment = func(resources, environments) {
43 | resources_in_environment = {}
44 | for resources as address, r {
45 | if environments contains r.applied.tags_all.Environment {
46 | resources_in_environment[address] = r
47 | }
48 | }
49 | return resources_in_environment
50 | }
51 |
52 | # Validate that all instances of a specified resource type being modified have
53 | # a specified top-level attribute in a given list
54 | validate_attribute_in_list = func(type, attribute, allowed_values, environments) {
55 |
56 | validated = true
57 |
58 | # Get all resource instances of the specified type
59 | resource_instances_type = find_resources_from_plan(type)
60 |
61 | # Get all resource instances of the specified type in the specified environment
62 | resource_instances = find_resources_from_environment(resource_instances_type, environments)
63 |
64 | # If there are no instances of the specified type, return true
65 | if length(resource_instances) == 0 {
66 | print("No instances of type", type, "in environments", environments, "found.")
67 | return true
68 | }
69 | # Loop through the resource instances
70 | for resource_instances as address, r {
71 |
72 | # Skip resource instances that are being destroyed
73 | # to avoid unnecessary policy violations.
74 | # Used to be: if length(r.diff) == 0
75 | if r.destroy and not r.requires_new {
76 | print("Skipping resource", address, "that is being destroyed.")
77 | continue
78 | }
79 |
80 | # Determine if the attribute is computed
81 | if r.diff[attribute].computed else false is true {
82 | print("Resource", address, "has attribute", attribute,
83 | "that is computed.")
84 | # If you want computed values to cause the policy to fail,
85 | # uncomment the next line.
86 | # validated = false
87 | } else {
88 | # Validate that each instance has allowed value
89 | if r.applied[attribute] else "" not in allowed_values {
90 | print("Resource", address, "has attribute", attribute, "with value",
91 | r.applied[attribute] else "",
92 | "that is not in the allowed list:", allowed_values)
93 | validated = false
94 | }
95 | }
96 |
97 | }
98 | return validated
99 | }
100 |
101 | ##### Lists #####
102 |
103 | # Allowed EC2 Instance Types
104 | # We don't include t2.nano or t2.micro to illustrate overriding failed policy
105 | allowed_types = [
106 | "t2.small",
107 | "t2.medium",
108 | "t2.large",
109 | ]
110 |
111 | environment_tags = ["development"]
112 |
113 | ##### Rules #####
114 |
115 | # Call the validation function
116 | instances_validated = validate_attribute_in_list("aws_instance",
117 | "instance_type", allowed_types, environment_tags)
118 |
119 | # Main rule
120 | main = rule {
121 | instances_validated
122 | }
123 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Getting-Started-with-Terraform-Cloud
2 |
3 | Exercise files to go with my Pluralsight course [Getting Started with Terraform Cloud](http://www.pluralsight.com/courses/terraform-cloud-getting-started).
4 |
5 | ## Course Description
6 |
7 | As Terraform is adopted across the industry, companies discover they need to collaborate within teams and across the organization. This course will teach you how to use Terraform Cloud to deliver collaboration and automation in a managed environment.
8 |
9 | As the use of Terraform expands in an organization, a need arises for **simple collaboration**, **workflow automation**, and a **reliable** and **consistent execution** environment. In this course, Getting Started with Terraform Cloud, you’ll learn to leverage the services and features available in the Terraform Cloud managed service. First, you’ll explore **what Terraform Cloud is** and **how to onboard your organization**. Next, you’ll discover **how to set up an automated workflow that is integrated with versioned source control** . Finally, you’ll learn how to use integrations like **Sentinel** and the **private module registry**. When you’re finished with this course, you’ll have the skills and knowledge of Terraform Cloud needed to successfully migrate your organization.
10 |
11 | ## Using the exercise files
12 |
13 | Unlike some of my other Terraform courses, this course make extensive use of the Terraform Cloud UI. As such, there are going to be less code examples and more directions of what to do to follow along with the demos in the course modules.
14 |
15 | Each module directory has a `README.md` file that describes the exercises in that module and general directions for following along. In some cases, this is not a step-by-step guide, but a simple overview. You can watch the Pluralsight videos to see the actual steps.
16 |
17 | ## Pre-requisites and Expectations
18 |
19 | This course is **not going to teach you Terraform**. I expect that you are coming to this course with at least a beginner's level of knowledge on using Terraform, and you are curious about leveraging Terraform Cloud for your organization or personal projects. It would also be good to be familiar with some basic Git concepts, as we are going to use GitHub for several of the exercises.
20 |
21 | You should also have the follow ready to go:
22 |
23 | * An AWS account with permissions to create networking and compute resources
24 | * Terraform 1.1 or newer and git installed locally
25 | * A code editor like VS Code
26 | * An email you want to use with Terraform Cloud
27 | * A personal GitHub account you can link to Terraform Cloud
28 |
29 | As part of the exercises, you will sign up for a Terraform Cloud account and create a new organization.
30 |
31 | ## AWS Environment
32 |
33 | You will need access to an AWS environment with permissions to create resources in EC2 and S3. I recommend creating a throwaway account just for this course. The exercises have been tested with AWS region `us-east-1`. The input variable `region` has `us-east-1` set as the default, but you can supply a different region if you prefer.
34 |
35 | You will need to generate an AWS access key to run through the exercises. You can do this through the IAM console in a browser (*hint*: it's under security credentials for your user) by following the [official AWS docs](https://aws.amazon.com/premiumsupport/knowledge-center/create-access-key/). I'd recommend assigning the `AdministratorAccess` policy to your user to give you permissions to do everything in the account. Also, enable 2FA for the user account!
36 |
37 | ## Line Endings
38 |
39 | An issue I have discovered from time to time is that Terraform doesn't much like the Windows style of ending a line with both a Carriage Return (CR) and a Line Feed (LF), commonly referred to as CRLF. If you are experiencing strange parsing issues, change the line ending to be Line Feed (LF) only. In VS Code this can be down by clicking on the CRLF in the lower right corner and changing it to LF. This primarily affects template files or scripts being sent to a Linux machine for processing.
40 |
41 | ## Money 💰
42 |
43 |
44 | A gentle reminder about cost. The course will have you creating resources in AWS. I have tried to use the [Free-tier](https://aws.amazon.com/free/) eligible items.
45 |
46 | In Terraform Cloud, you will start a trial of the Teams & Governance plan on Terraform Cloud, but you do not need to provide any payment information. The trial only lasts for 30 days, after it reverts to the Free plan. I'd recommend completing the course in less than 30 days so you don't lose your progress.
47 |
48 | When you complete your exercises, make sure to queue a destroy plan to delete all the infrastructure you've created. You can do so from the Settings area of each Workspace. Select the **Destruction and Deletion** option and click on *Queue destroy plan*.
49 |
50 | ## Conclusion
51 |
52 | I hope you enjoy taking this course as much as I did creating it. I'd love to hear feedback and suggestions for revisions. Find me on Twitter (@ned1313) or add an issue to this repository.
53 |
54 | Thanks and happy automating!
55 |
56 | Ned
--------------------------------------------------------------------------------
/m8/README.md:
--------------------------------------------------------------------------------
1 | # Migrating to Terraform Cloud
2 |
3 | Team Space Coyote is ready to migrate their application to Terraform Cloud. You are going to help them create workspaces, set up a CLI workflow, and migrate the state of the existing deployment.
4 |
5 | ## Demos
6 |
7 | - Create the Space Coyote deployment to test migration
8 | - Create the necessary resources in Terraform Cloud
9 | - Update and migrate the Space Coyote deployment
10 |
11 | ### Create the Space Coyote deployment to test migration
12 |
13 | First copy the `space_coyote` directory from the `m8` directory to a new directory named `space-coyote-app`. This will be the directory we use for the rest of the course.
14 |
15 | Deploy the Space Coyote application by initializing terraform, validating the code and executing an apply.
16 |
17 | ```bash
18 | # Set AWS credentials or use existing profile
19 | terraform init
20 | terraform validate
21 | terraform apply
22 | ```
23 |
24 | You can confirm the application has been deployed by browsing to the `space_coyote_url` and that terraform is storing the state of the environment locally inside a `terraform.tfstate` file.
25 |
26 | ```bash
27 | terraform show
28 | ```
29 |
30 | ### Create the necessary resources in Terraform Cloud
31 |
32 | Team Space Coyote is ready to migrate their application to Terraform Cloud. You are going to help them create workspaces, set up a CLI workflow, and migrate the state of the existing deployment.
33 |
34 | To migrate the application to Terraform Cloud, create a `backend.tf` file with the following code, replacing the `YOURORGANIZATION` placeholder with your Terraform Cloud organization name.
35 |
36 | `backend.tf`
37 |
38 | ```hcl
39 | terraform {
40 | cloud {
41 | organization = "YOURORGANIZATION"
42 |
43 | workspaces {
44 | tags = ["spacecoyote","apps"]
45 | }
46 | }
47 | }
48 | ```
49 |
50 | Validate the code is correct with a `terraform validate`
51 |
52 | ## Update and migrate the Space Coyote deployment
53 |
54 | Begin the migration to Terraform Cloud by issuing a `terraform init` and proceeding to answer the migration questions presented. The new workspace name should be `spacecoyote-app-useast1-dev`.
55 |
56 | After migration you should see a new state file on the "States" tab of your `spacecoyote-app-useast1-dev` Terraform Cloud workspace.
57 |
58 | You can confirm that the output of a `terraform show` matches the output of the same command before the migration. Once the migration is complete delete the local terraform.tfstate file by running `rm terraform.tfstate`.
59 |
60 | ### Secure Space Coyote variables within Terraform Cloud
61 |
62 | Within Terraform Cloud create the required variables for the Space Coyote application. These can be found in the `terraform.tfvars` files.
63 |
64 | ```hcl
65 | prefix = "sc"
66 | project = "space_coyote"
67 | environment = "development"
68 | billing = "314159"
69 | ```
70 |
71 | Next, update the variable set from the previous modules to allow the new workspace access.
72 |
73 | Finally, create the space_coyote_devs team and grant it write access to the workspace.
74 |
75 | Validate that all the required migration steps have been satisfied by running a successful `terraform apply`.
76 |
77 | Congratulations!!! Now all future changes to Terraform will be executed against Terraform Cloud, and the features of Terraform Cloud can be leveraged fully by the Space Coyote application.
78 |
79 | ### Query information from Diamond Dogs TFC Workspaces
80 |
81 | Now that both the Diamond Dogs and Space Coyote state is stored within Terraform Cloud, the Space Coyote team has decided that they would like to include a link to the Diamond Dog's application. How can they do this in an automated fashion?
82 |
83 | They can use both the `tfe_outputs` data source and Terraform Cloud Run Triggers.
84 |
85 | ### Allow state access for SC
86 |
87 | We are going to be pulling information from the Diamond Dogs outputs through the `tfe_outputs` data source. Before we trigger a run with our updated configuration, we need to grant the Space Coyote workspace access to the Diamond Dogs dev outputs.
88 |
89 | In the General settings of the Diamond Dogs development workspace, update the **Remote state sharing** setting to share the remote state with the Space Coyote development workspace and click on save settings.
90 |
91 | ### Terraform Cloud Run Triggers
92 |
93 | Terraform Cloud provides a way to connect your workspace to one or more workspaces within your organization, known as "source workspaces". These connections, called run triggers, allow runs to queue automatically in your workspace on successful apply of runs in any of the source workspaces.
94 |
95 | We can now set our Space Coyote development workspace to automatically perform an run and update whenever the Diamond Dog's development workspace has a successful run.
96 |
97 | Select the `Settings` of the Space Coyote dev workspace and choose `Run Triggers`. Specify the `Source Workspace` as Diamond Dogs Dev workspace, and select `Add workspace`. Now anytime the source workspaces has a successful apply the Space Coyote workspace will trigger an automatic run.
98 |
99 | To see this in action, change the billable value in the variables, and trigger a Terraform run in the Diamond Dogs dev workspace and then notice that after the apply is complete an automatic run will be triggered inside the Space Coyote dev workspace. You can view the metadata of this run within the Space Coyote workspace and validate that the run was trigger from the source workspace.
100 |
101 | This will ensure that the Diamond Dog's url output is always up to date within the Space Coyote message because it will pull the remote state of that workspace anytime there is a change in the Diamond Dog's workspace.
102 |
--------------------------------------------------------------------------------
/m5/README.md:
--------------------------------------------------------------------------------
1 | # Adopting a Git Workflow
2 |
3 | The application is deployed and now you'd like to shift to a VCS style workflow. The new application will be added to a GitHub repo, and then the workspace will be hooked in. The actual deployment shouldn't change at all,
4 |
5 | ## Demos
6 |
7 | - Move application code to a VCS repository
8 | - Migrate to the VCS workflow
9 | - Use the VCS workflow to deploy an update
10 | - Show how the CLI plan still works
11 |
12 | ### Move application code to a VCS repository
13 |
14 | #### Create a new GitHub repository
15 |
16 | You will need a free GitHub.com account for this exercise. We recommend using a personal account. You can sign up or login in to an existing account at https://github.com/
17 |
18 | Login to github and create a new repository by navigating to .
19 |
20 | Use the following settings for the code repository
21 |
22 | - Name: "diamond_dogs"
23 | - Description: Diamond Dogs Wicked Application
24 | - Private repo
25 | - Check "Initialize this repository with a README"
26 | - Add `.gitignore` of type "Terraform"
27 |
28 | Once created, clone the repository to your local machine.
29 |
30 | ```
31 | git clone https://github.com//diamond_dogs.git
32 | cd diamond_dogs
33 | ```
34 |
35 | ### Update your Repository and Commit Changes to GitHub
36 |
37 | In your repository copy the contents of the `diamond-dogs-app` directory to the root of your `diamond_dogs` repository.
38 |
39 | Commit the changes in GitHub.
40 |
41 | ```bash
42 | git add .
43 | git commit -m "terraform code update for diamond_dogs app"
44 | git push
45 | ```
46 |
47 | ### Migrate to the VCS workflow
48 |
49 | #### Create Terraform Cloud VCS Connection
50 |
51 | 1. Login to github in one browser tab.
52 | 2. Login to Terraform Cloud in another browser tab.
53 | 3. Within Terraform Cloud navigate to your Organization Settings.
54 | 4. Click on Providers in Version Control
55 |
56 | For full instructions follow the **Configuring GitHub Access** section of the Terraform Cloud documentation to connect your GitHub account to your Terraform Organization.
57 |
58 | [Configuring GitHub Access](https://www.terraform.io/docs/cloud/vcs/github.html) - https://www.terraform.io/docs/cloud/vcs/github.html
59 |
60 | > Note: The process involves several back and forth changes and is documented well in the link.
61 |
62 | ### Connect Terraform Cloud Workspace to the GitHub `diamond_dogs` repository
63 |
64 | 1. Navigate to and click the diamond dogs workspace
65 | 2. Select Settings >> Version Control
66 | 3. Click the "Connect to version control"
67 | 4. Select the Version Control workflow
68 | 5. Click the VCS Connection in the "Source" section.
69 | 6. Verify you can see repositories and select the `diamond-dogs` github repository.
70 | 7. Select Update VCS Settings
71 | 8. Validate that a new Terraform run will occur on the workspace. Confirm and Apply the Terraform run.
72 |
73 | ### Use the VCS workflow to deploy an update
74 |
75 | #### Create a Development and Staging Branch
76 |
77 | In the `diamond-dogs` github repository, create a `development` branch from the `main` branch. Also create a `staging` and `production` branch from the `main` branch.
78 |
79 | Update your Terraform Cloud `diamonddogs-app-useast1-dev` workspace to point to your `development` branch.
80 |
81 | #### Create Terraform Cloud Production and Staging Workspaces
82 |
83 | Create a new TFC workspace named `diamonddogs-app-useast1-prod` that is tied to the `diamond-dogs` github repo by choosing a VCS Control workflow. Set the branch and enable speculative execution.
84 |
85 | Repeat this step for the staging environment by creating a new TFC workspace named `diamonddogs-app-useast1-staging` that is tied to the `diamond-dogs` github repo and specifying the `staging` branch. And then repeat the same process for the `production` branch.
86 |
87 | Each workspace is setup with it's respective variable for environment and watching that respective branch of our diamond-dogs repository.
88 |
89 | ### Update environment specific variables for each Terraform Cloud Workspace
90 |
91 | Update the variables for each of the diamond dog Terraform Cloud workspaces for the `environment` variable.
92 |
93 | - Navigate to your Terraform Cloud workspaces in the UI.
94 | - Once there, navigate to the `Variables` tab.
95 | - In the `Variables` tab, you can add variables related to the state file that was previously migrated.
96 | - To do this, first select the `+ Add variable` button
97 | - Let's add a Terraform variable named `environment` with a value of `development`
98 |
99 | Repeat these same steps for all diamond dogs workspaces specifying the correlating environment value for each. Validate that the application deploys with the respective settings by triggering a Manual apply >> `Actions >> Start new plan`
100 |
101 | ### Version Control Branching and Terraform Cloud Workspaces
102 |
103 | We will be using GitHub to promote a change to our app through Development into Stage and then into Production.
104 |
105 | Let's look how a change promotion would look in this configuration we outlined. We are going to start in our "Development" environment and move, or promote, that change to our other environments. Make sure you have both GitHub and Terraform Cloud web pages up. We will start in GitHub.
106 |
107 | #### Code Promotion
108 |
109 | Let's look how a change promotion would look in this configuration we outlined. We are going to start in our "Development" environment and move, or promote, that change to our other environments. Make sure you have both GitHub and Terraform Cloud web pages up. We will start in GitHub.
110 |
111 | 1. Select the `diamond-dogs` repository, if you are not already there.
112 | 2. From the branches list (pulldown menu that says main), choose `development`
113 | 3. Select the `Files` folder and the `deploy_app.sh`.
114 | 4. Click on the blue `Edit` button. We will just use the GitHub web editor for simplicity.
115 | 5. On line 14, add a message after the "Welcome" message. `The Diamond Dogs have struck again.`
116 | 6. Add a "Commit message" below that describes your change.
117 | 7. When you're done click on `Commit Changes`
118 | 8. Navigate back out to Terraform Cloud. You should see that your diamond dogs development workspace is running.
119 | 9. Approve the change by viewing the run.
120 | 9. Refresh your Diamond Dogs Development web page to confirm that your updates are now displayed.
121 |
122 | Now that we have our change in our development environment and we did our testing to confirm our application is functional, let's promote this change to our stage environment.
123 |
124 | 1. Navigate back to GitHub. You should still be on the development branch viewing the `deploy_app.sh` file that we edited. Click on the `Pull requests` option. We will be merging our changes we made in the development branch to our staging branch.
125 | 2. Select the `Compare & pull request` select `Change branches`. Our source branch will default to "development". Change the target branch to `staging` and select `Create pull request`
126 | 3. Update the Title to `Promote to Stage` and add a short description of your change.
127 | 4. For "Assignee" select `Assign to me`. We currently do not have users and groups setup in our environment but in a real world scenario we can put security controls around this approval process.
128 | 5. On the bottom of the page you can view what files and lines in those files are different between the development and stage branches. These are the changes that will be merged into our target branch.
129 | 6. Select `Create pull request`. We now have an opened a pull request. In our lab, approvals are optional but we could require multiple approvers before any changes get applied. We could deny the request and put a comment with details regarding why we denied it.
130 | 7. Click on the `Show all checks` next to the green check-mark.
131 | 8. Open the `Details` link in a new tab. As a pull request reviewer, you can use this to review the Terraform plan that was ran by the GitHub pipeline within the diamond dogs staging workspace.
132 |
133 | We peer reviewed the changes everything looks good. Now go back to the tab we left open with our merge request and select the green `Merge pull request` button and `Confirm merge`.
134 |
135 | Notice that another pipeline was started under where the merge button was. Right click on this new pipeline and open it in a new tab. You can use the external pipeline to link out to Terraform Cloud to review the apply. We could have also been watching the Terraform Cloud workspace list to see our workspaces auto apply from our merge request inside the diamond dogs staging workspace.
136 |
137 | You can open the stage URL of our Diamond Dogs app to confirm that our changes have been added.
138 |
139 | The process to promote our change into our production environment would be identical and the environments are isolated so changes in the development branch have no impact to the production / main branch.
140 |
--------------------------------------------------------------------------------