├── .gitignore ├── LICENSE ├── README.md ├── cloud-management-platform └── README.md ├── governance ├── README.md ├── aws │ ├── README.md │ ├── aws-vpcs-must-have-tags-and-enable-dns-hostnames.sentinel │ ├── enforce-ami-owners.sentinel │ ├── enforce-mandatory-tags.sentinel │ ├── enforce-tag-from-data-source.sentinel │ ├── must_have_remote_exec_provisioner.sentinel │ ├── openshift-aws-cluster-policy.sentinel │ ├── require-private-acl-and-kms-for-s3-buckets.sentinel │ ├── require-vpc-and-kms-for-lambda-functions.sentinel │ ├── restrict-aws-availability-zones.sentinel │ ├── restrict-aws-cidr-blocks.sentinel │ ├── restrict-aws-instance-type.sentinel │ ├── restrict-aws-region.sentinel │ ├── restrict-iam-policy-statement.sentinel │ └── restrict-iam-policy-statement2.sentinel ├── azure │ ├── README.md │ ├── acs-cluster-policy.sentinel │ ├── aks-cluster-policy.sentinel │ ├── block-allow-all-cidr.sentinel │ ├── restrict-current-azure-vms.sentinel │ ├── restrict-vm-image-id.sentinel │ ├── restrict-vm-publisher.sentinel │ └── restrict-vm-size.sentinel ├── cloud-agnostic │ ├── README.md │ └── destroy_limit ├── external │ ├── README.md │ ├── check_account.sh │ ├── check_account.tf │ └── check_account_balance.sentinel ├── gcp │ ├── README.md │ ├── block-allow-all-cidr.sentinel │ ├── gke-cluster-policy.sentinel │ ├── pod_limit.sentinel │ └── restrict-machine-type.sentinel └── vmware │ ├── require-storage-drs.sentinel │ ├── require_nfs41_and_kerberos.sentinel │ ├── restrict-virtual-disk-size-and-type.sentinel │ ├── restrict-vm-cpu-and-memory.sentinel │ ├── restrict-vm-creation-hours │ └── restrict-vm-disk-size.sentinel ├── infrastructure-as-code ├── README.md ├── aws-ec2-instance │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── aws-lambda-ec2-lifecycles │ ├── README.md │ ├── assets │ │ ├── aws_bot.png │ │ ├── dummy_event.png │ │ └── good_morning.png │ ├── data_collectors.tf │ ├── encryption.tf.disabled │ ├── files │ │ ├── checkInstanceTTLs.py │ │ ├── checkInstanceTTLs.zip │ │ ├── cleanUntaggedInstances.py │ │ ├── cleanUntaggedInstances.zip │ │ ├── getInstanceReport.py │ │ ├── getInstanceReport.zip │ │ ├── getRunningInstances.py │ │ ├── getRunningInstances.zip │ │ ├── getTaggedInstances.py │ │ ├── getTaggedInstances.zip │ │ ├── getUntaggedInstances.py │ │ ├── getUntaggedInstances.zip │ │ ├── iam_decrypt_kms.tpl │ │ ├── iam_lambda_notify.tpl │ │ ├── iam_lambda_read_instances.tpl │ │ ├── iam_lambda_stop_and_terminate_instances.tpl │ │ ├── notifyInstanceUsage.py │ │ ├── notifyInstanceUsage.zip │ │ ├── notifyUntaggedInstances.py │ │ └── notifyUntaggedInstances.zip │ ├── iam_roles.tf │ ├── instance_reaper.tf │ ├── main.tf │ ├── notify_instance_usage.tf │ ├── notify_untagged.tf │ ├── outputs.tf │ ├── untagged_janitor.tf │ └── variables.tf ├── azure-vm │ ├── README.md │ └── main.tf ├── dynamic-aws-creds-from-vault │ ├── LICENSE │ ├── README.md │ ├── main.tf │ ├── networks-firewalls-ingress.tf │ ├── networks-firewalls.tf │ ├── networks-gateways.tf │ ├── networks-routes.tf │ ├── networks-subnets.tf │ ├── networks.tf │ ├── outputs.tf │ └── variables.tf ├── dynamic-aws-creds │ ├── README.md │ ├── assets │ │ ├── dynamic-iam-creds-iam-ec2-policy.png │ │ ├── dynamic-iam-creds-iam-policy.png │ │ ├── dynamic-iam-creds.png │ │ └── ec2-instance.png │ ├── consumer-workspace │ │ └── main.tf │ └── producer-workspace │ │ └── main.tf ├── gcp-compute-instance │ ├── README.md │ └── main.tf ├── hashistack │ ├── .gitignore │ ├── README.md │ ├── best-practices │ │ └── terraform-aws │ │ │ ├── README.md │ │ │ ├── gitignore.tf │ │ │ ├── main.tf │ │ │ ├── outputs.tf │ │ │ ├── terraform.auto.tfvars │ │ │ └── variables.tf │ ├── dev │ │ ├── terraform-aws │ │ │ ├── README.md │ │ │ ├── gitignore.tf │ │ │ ├── main.tf │ │ │ ├── outputs.tf │ │ │ ├── terraform.auto.tfvars │ │ │ └── variables.tf │ │ ├── terraform-azure │ │ │ ├── README.md │ │ │ ├── _interface.tf │ │ │ ├── env.sh │ │ │ ├── main.tf │ │ │ ├── modules │ │ │ │ ├── consul-azure │ │ │ │ │ ├── _interface.tf │ │ │ │ │ ├── init-cluster.tpl │ │ │ │ │ ├── instances-consul.tf │ │ │ │ │ └── main.tf │ │ │ │ ├── images-azure │ │ │ │ │ └── _interface.tf │ │ │ │ ├── network-azure │ │ │ │ │ ├── README.md │ │ │ │ │ ├── _interface.tf │ │ │ │ │ ├── firewalls-jumphost.tf │ │ │ │ │ ├── instances-jumphost.tf │ │ │ │ │ ├── main.tf │ │ │ │ │ ├── networks.tf │ │ │ │ │ └── subnets.tf │ │ │ │ └── ssh-keypair-data │ │ │ │ │ ├── _interface.tf │ │ │ │ │ └── main.tf │ │ │ └── terraform.tfvars.example │ │ ├── terraform-gcp │ │ │ ├── README.md │ │ │ ├── _interface.tf │ │ │ ├── main.tf │ │ │ └── terraform.tfvars.example │ │ └── vagrant-local │ │ │ ├── README.md │ │ │ └── Vagrantfile │ ├── quick-start │ │ └── terraform-aws │ │ │ ├── README.md │ │ │ ├── gitignore.tf │ │ │ ├── main.tf │ │ │ ├── outputs.tf │ │ │ ├── terraform.auto.tfvars │ │ │ └── variables.tf │ └── templates │ │ ├── best-practices-bastion-systemd.sh.tpl │ │ ├── best-practices-hashistack-systemd.sh.tpl │ │ ├── install-base.sh.tpl │ │ ├── install-consul-systemd.sh.tpl │ │ ├── install-docker.sh.tpl │ │ ├── install-java.sh.tpl │ │ ├── install-nomad-systemd.sh.tpl │ │ ├── install-vault-systemd.sh.tpl │ │ ├── quick-start-bastion-systemd.sh.tpl │ │ └── quick-start-hashistack-systemd.sh.tpl ├── k8s-cluster-aks │ ├── README.md │ ├── aks-cluster-policy.sentinel │ ├── ca_certificate │ ├── client_certificate │ ├── client_key │ ├── config │ ├── k8s.tfvars.example │ ├── main.tf │ ├── outputs.tf │ ├── private_key.pem │ ├── sample-policy.hcl │ └── variables.tf ├── k8s-cluster-gke │ ├── README.md │ ├── gke-cluster-policy.sentinel │ ├── main.tf │ ├── outputs.tf │ ├── sample-policy.hcl │ └── variables.tf ├── k8s-cluster-openshift-aws │ ├── README.md │ ├── ca_certificate │ ├── client_certificate │ ├── client_key │ ├── config │ ├── delay-vault-aws │ ├── main.tf │ ├── modules │ │ └── openshift │ │ │ ├── 00-variables.tf │ │ │ ├── 01-amis.tf │ │ │ ├── 02-vpc.tf │ │ │ ├── 03-security-groups.tf │ │ │ ├── 04-roles.tf │ │ │ ├── 05-nodes.tf │ │ │ ├── 06-dns.tf │ │ │ ├── 07-bastion.tf │ │ │ ├── 08-outputs.tf │ │ │ ├── delay-aws │ │ │ └── files │ │ │ ├── install-from-bastion.sh │ │ │ ├── setup-master.sh │ │ │ └── setup-node.sh │ ├── openshift-aws-cluster-policy.sentinel │ ├── openshift.tfvars.example │ ├── outputs.tf │ ├── sample-policy.hcl │ ├── scripts │ │ ├── postinstall-master.sh │ │ └── postinstall-node.sh │ ├── variables.tf │ ├── vault-reviewer-rbac.yaml │ ├── vault-reviewer-token │ └── vault-reviewer.yaml ├── k8s-vault-config │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── vault-reviewer-token ├── terraform-0.12-examples │ ├── README.md │ ├── dynamic-blocks-and-splat-expressions │ │ ├── README.md │ │ └── main.tf │ ├── first-class-expressions │ │ ├── README.md │ │ └── main.tf │ ├── for-expressions │ │ ├── README.md │ │ ├── lists-and-maps-with-for.tf │ │ └── main.tf │ ├── new-template-syntax │ │ ├── README.md │ │ ├── actual_vote.txt │ │ ├── main.tf │ │ └── rigged_vote.txt │ ├── reliable-json-syntax │ │ ├── README.md │ │ ├── variable-correct.tf.txt │ │ ├── variable-with-comment.tf.txt │ │ ├── variable1.tf.json │ │ ├── variable2.tf.txt │ │ ├── variable3.tf.txt │ │ └── variable4.tf.txt │ └── rich-value-types │ │ ├── README.md │ │ ├── main.tf │ │ └── network │ │ ├── main.tf │ │ ├── outputs.tf │ │ └── variables.tf └── terraform-gcp-cloudsql │ ├── .gitignore │ ├── LICENSE │ ├── README.md │ ├── examples │ ├── prod-and-dev │ │ ├── README.md │ │ └── main.tf │ └── simple │ │ ├── README.md │ │ └── main.tf │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── operations ├── README.md ├── automation-script │ ├── README.md │ ├── apply.json │ ├── config │ │ └── main.tf │ ├── configversion.json │ ├── deleteWorkspace.sh │ ├── loadAndRunWorkspace.sh │ ├── restrict-name-variable.sentinel │ ├── run.template.json │ ├── variable.template.json │ ├── variables.csv │ └── workspace.template.json └── sentinel-policies-scripts │ ├── README.md │ ├── create-policy.template.json │ ├── delete_policies.sh │ ├── export_policies.sh │ └── import_policies.sh └── self-serve-infrastructure ├── README.md ├── cats-and-dogs ├── README.md ├── backend │ ├── Dockerfile │ └── vote-db │ │ └── start_redis.sh └── frontend │ ├── Dockerfile │ └── azure-vote │ ├── config_file.cfg │ ├── main.py │ ├── static │ └── default.css │ └── templates │ └── index.html ├── getting-started ├── README.md ├── terraform-aws │ ├── _interface.tf │ ├── main.tf │ ├── outputs.tf │ └── terraform.auto.tfvars ├── terraform-azure │ ├── _interface.tf │ ├── main.tf │ ├── outputs.tf │ └── terraform.auto.tfvars └── terraform-gcp │ ├── _interface.tf │ ├── main.tf │ ├── outputs.tf │ └── terraform.auto.tfvars ├── k8s-services-openshift ├── README.md ├── cats-and-dogs-secret-name ├── cats-and-dogs.yaml ├── main.tf ├── openshift.tfvars.example ├── outputs.tf └── variables.tf └── k8s-services ├── README.md ├── main.tf ├── outputs.tf └── variables.tf /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled files 2 | *.tfstate 3 | *.tfstate.backup 4 | *.tfstate.lock.info 5 | 6 | # logs 7 | *.log 8 | 9 | # Directories 10 | .terraform/ 11 | .vagrant/ 12 | 13 | # SSH Keys 14 | *.pem 15 | 16 | # Backup files 17 | *.bak 18 | 19 | # Ignored Terraform files 20 | *gitignore*.tf 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Terraform Guides 2 | This repository contains sample Terraform configurations, Sentinel policies, and automation scripts that can be used with Terraform Enterprise. 3 | 4 | ## infrastructure-as-code 5 | This directory contains sample Terraform configurations to provision VMs into AWS, Azure, and Google Cloud Platform (GCP) as well as Kubernetes clusters into Azure Container Service (ACS) and Google Kubernetes Engine (GKE). 6 | 7 | ## self-serve-infrastructure 8 | This directory contains sample Terraform configurations to enable self-service infrastructure. In particular, it illustrates how developers can deploy applications to Kubernetes clusters provisioned by an operations team. 9 | 10 | ## governance 11 | This directory contains some sample Sentinel policies for several clouds which ensure that all infrastructure provisioned with Terraform Enterprise complies with an organization's provisioning rules. 12 | 13 | ## operations 14 | This directory provides artifacts that can be used by operations teams using Terraform Enterprise. In particular, it includes a script that shows how the Terraform Enterprise REST API can be used to automate interactions with Terraform Enterprise. 15 | 16 | ## cloud-management-platform 17 | This directory provides samples of how Terraform can be used to support cloud management platforms. 18 | 19 | ## `gitignore.tf` Files 20 | 21 | You may notice some [`gitignore.tf`](operations/provision-consul/best-practices/terraform-aws/gitignore.tf) files in certain directories. `.tf` files that contain the word "gitignore" are ignored by git in the [`.gitignore`](./.gitignore) file. 22 | 23 | If you have local Terraform configuration that you want ignored (like Terraform backend configuration), create a new file in the directory (separate from `gitignore.tf`) that contains the word "gitignore" (e.g. `backend.gitignore.tf`) and it won't be picked up as a change. 24 | -------------------------------------------------------------------------------- /cloud-management-platform/README.md: -------------------------------------------------------------------------------- 1 | # Cloud Management Platform 2 | To be implemented. 3 | -------------------------------------------------------------------------------- /governance/README.md: -------------------------------------------------------------------------------- 1 | # Governance with Terraform Sentinel Policies 2 | 3 | Sentinel gives operations teams the governance capabilities they need to ensure that all infrastructure provisioned with Terraform Enterprise complies with their organization's provisioning rules. The files under this directory provide some sample Sentinel policies for several clouds including AWS, Microsoft Azure, Google Cloud Platform (GCP), and VMware. The external directory also includes an example of an external data source and a Sentinel policy which checks the result of that data source. 4 | 5 | > **See also:** This repo shows how to use Sentinel policies to govern a wide range of specific kinds of infrastructure, but doesn't go into detail about how to use these policies with Terraform Enterprise. If you'd rather see an end-to-end workflow for managing a set of Sentinel policies with TFE (including Sentinel tests and Terraform code for managing policy sets), see [the hashicorp/tfe-policies-example repo](https://github.com/hashicorp/tfe-policies-example). 6 | -------------------------------------------------------------------------------- /governance/aws/README.md: -------------------------------------------------------------------------------- 1 | # Sentinel Policies for AWS 2 | The sample Sentinel policy files in this directory can be used with Terraform Enterprise to ensure that provisioned AWS VPCs, EC2 instances, S3 buckets, Lambda functions, and other resources comply with your organization's provisioning rules. There are also policies that restrict AMI owner IDs and the region or availability zones into which resources can be provisioned. 3 | -------------------------------------------------------------------------------- /governance/aws/aws-vpcs-must-have-tags-and-enable-dns-hostnames.sentinel: -------------------------------------------------------------------------------- 1 | import "tfplan" 2 | 3 | # Get all AWS vpcs from all modules 4 | get_vpcs = func() { 5 | vpcs = [] 6 | for tfplan.module_paths as path { 7 | vpcs += values(tfplan.module(path).resources.aws_vpc) else [] 8 | } 9 | return vpcs 10 | } 11 | 12 | vpcs = get_vpcs() 13 | 14 | vpc_must_have_tags = rule { 15 | all vpcs as name, instances { 16 | all instances as index, r { 17 | (length(r.applied.tags) else 0) > 0 18 | } 19 | } 20 | } 21 | 22 | vpc_hostnames_true = rule { 23 | all vpcs as name, instances { 24 | all instances as index, r { 25 | r.applied.enable_dns_hostnames == true 26 | } 27 | } 28 | } 29 | 30 | main = rule { 31 | (vpc_must_have_tags and vpc_hostnames_true) else true 32 | } 33 | -------------------------------------------------------------------------------- /governance/aws/enforce-mandatory-tags.sentinel: -------------------------------------------------------------------------------- 1 | import "tfplan" 2 | 3 | # Warning, this is case sensitive. 4 | # This is on purpose especially for organizations that do cost analysis on tag names. 5 | # where case sensitivity will cause grouping issues 6 | 7 | mandatory_tags = [ 8 | "TTL", 9 | "Owner", 10 | ] 11 | 12 | # Get all AWS instances contained in all modules being used 13 | get_aws_instances = func() { 14 | instances = [] 15 | for tfplan.module_paths as path { 16 | instances += values(tfplan.module(path).resources.aws_instance) else [] 17 | } 18 | return instances 19 | } 20 | 21 | aws_instances = get_aws_instances() 22 | 23 | # Instance tag rule 24 | instance_tags = rule { 25 | all aws_instances as _, instances { 26 | all instances as index, r { 27 | all mandatory_tags as t { 28 | r.applied.tags contains t 29 | } 30 | } 31 | } 32 | } 33 | 34 | main = rule { 35 | (instance_tags) else true 36 | } 37 | -------------------------------------------------------------------------------- /governance/aws/enforce-tag-from-data-source.sentinel: -------------------------------------------------------------------------------- 1 | # This policy is an example of using a Data Source in Sentinel evaluation 2 | # It restricts the creation of EC2 instances based on "Env" tag value 3 | # The allowable value of the "Env" tag is obtained from an aws_subnet Datasource 4 | # The Env tag from EC2 must match the Env tag of aws_subnet for the policy to pass. 5 | 6 | import "tfplan" 7 | 8 | # Get all aws_instance resources from all modules 9 | get_aws_instances = func() { 10 | aws_instances = [] 11 | for tfplan.module_paths as path { 12 | aws_instances += values(tfplan.module(path).resources.aws_instance) else [] 13 | } 14 | return aws_instances 15 | } 16 | 17 | aws_instances = get_aws_instances() 18 | 19 | # Search for Env tag value in aws_subnet 20 | get_subnet_env = func() { 21 | # Get all aws_subnet Data Sources 22 | aws_subnets = [] 23 | for tfplan.module_paths as path { 24 | aws_subnets += values(tfplan.state.module(path).data.aws_subnet) else [] 25 | } 26 | 27 | # Iterate through each subnet and return first Env tag value found (if any) 28 | for aws_subnets as _, subnets { 29 | for subnets as index, subnet { 30 | env_tag = subnet.attr.tags["Env"] 31 | if length(env_tag) >= 0 { 32 | print("Using subnet environment tag:", env_tag) 33 | return env_tag 34 | } 35 | } 36 | } 37 | 38 | # Return undefined if there were no aws_subnet or none with Env tag. 39 | return undefined 40 | } 41 | 42 | # Store aws_subnet Env tag value as a variable 43 | aws_subnet_env_tag = get_subnet_env() 44 | 45 | #Ensure Env tag from each aws_instance matches that of aws_subnet 46 | validate_vm_tags_from_subnet = rule { 47 | all aws_instances as _, instances { 48 | all instances as index, ec2 { 49 | ec2.applied.tags["Env"] == aws_subnet_env_tag 50 | } 51 | } 52 | } 53 | 54 | main = rule { 55 | (validate_vm_tags_from_subnet) else true 56 | } 57 | -------------------------------------------------------------------------------- /governance/aws/must_have_remote_exec_provisioner.sentinel: -------------------------------------------------------------------------------- 1 | import "tfconfig" 2 | 3 | main = rule { 4 | all tfconfig.resources.aws_instance as _, r { 5 | any r.provisioners as _, p { 6 | p.type == "remote-exec" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /governance/aws/openshift-aws-cluster-policy.sentinel: -------------------------------------------------------------------------------- 1 | import "tfplan" 2 | 3 | # Get all master instances from openshift module 4 | get_master_instances = func() { 5 | instances = values(tfplan.module(["openshift"]).resources.aws_instance.master) else [] 6 | return instances 7 | } 8 | 9 | masters = get_master_instances() 10 | 11 | # Rule to restrict Master instance type 12 | master_type_allowed = rule { 13 | all masters as index, r { 14 | r.applied.instance_type is "m4.xlarge" 15 | } 16 | } 17 | 18 | # Get all bastion instances from openshift module 19 | get_bastion_instances = func() { 20 | instances = values(tfplan.module(["openshift"]).resources.aws_instance.bastion) else [] 21 | return instances 22 | } 23 | 24 | bastions = get_bastion_instances() 25 | 26 | # Rule to restrict Bastion instance type 27 | bastion_type_allowed = rule { 28 | all bastions as index, r { 29 | r.applied.instance_type is "t2.micro" 30 | } 31 | } 32 | 33 | # Main rule that requires other rules to be true 34 | main = rule { 35 | (master_type_allowed and bastion_type_allowed) else true 36 | } 37 | -------------------------------------------------------------------------------- /governance/aws/require-private-acl-and-kms-for-s3-buckets.sentinel: -------------------------------------------------------------------------------- 1 | import "tfplan" 2 | 3 | # Get all S3 buckets from all modules 4 | get_s3_buckets = func() { 5 | buckets = [] 6 | for tfplan.module_paths as path { 7 | buckets += values(tfplan.module(path).resources.aws_s3_bucket) else [] 8 | } 9 | return buckets 10 | } 11 | 12 | s3_buckets = get_s3_buckets() 13 | 14 | # Allowed S3 ACLs 15 | # Don't allow public-read-write 16 | allowed_acls = [ 17 | "private", 18 | ] 19 | 20 | # Rule to restrict S3 bucket ACLs 21 | acl_allowed = rule { 22 | all s3_buckets as _, instances { 23 | all instances as index, r { 24 | r.applied.acl in allowed_acls 25 | } 26 | } 27 | } 28 | 29 | # Rule to require server-side encryption 30 | require_encryption = rule { 31 | all s3_buckets as _, instances { 32 | all instances as index, r { 33 | (length(r.applied.server_side_encryption_configuration) > 0 and r.applied.server_side_encryption_configuration[0]["rule"][0].apply_server_side_encryption_by_default[0].sse_algorithm is "aws:kms") else false 34 | } 35 | } 36 | } 37 | 38 | # Main rule that requires other rules to be true 39 | main = rule { 40 | (acl_allowed and require_encryption) else true 41 | } 42 | 43 | -------------------------------------------------------------------------------- /governance/aws/require-vpc-and-kms-for-lambda-functions.sentinel: -------------------------------------------------------------------------------- 1 | import "tfplan" 2 | 3 | # Get all Lambda functions from all modules 4 | get_lambda_functions = func() { 5 | lambdas = [] 6 | for tfplan.module_paths as path { 7 | lambdas += values(tfplan.module(path).resources.aws_lambda_function) else [] 8 | } 9 | return lambdas 10 | } 11 | 12 | lambda_functions = get_lambda_functions() 13 | 14 | # Rule to require KMS key 15 | require_kms_key = rule { 16 | all lambda_functions as _, instances { 17 | all instances as index, r { 18 | (r.applied.kms_key_arn is not "") else false 19 | } 20 | } 21 | } 22 | 23 | # Rule to require VPC 24 | require_vpc = rule { 25 | all lambda_functions as _, instances { 26 | all instances as index, r { 27 | (length(r.applied.vpc_config) > 0 and 28 | length(r.applied.vpc_config[0].security_group_ids) > 0 and 29 | length(r.applied.vpc_config[0].subnet_ids) > 0) else false 30 | } 31 | } 32 | } 33 | 34 | # Main rule that requires other rules to be true 35 | main = rule { 36 | (require_kms_key and require_vpc) else true 37 | } 38 | 39 | -------------------------------------------------------------------------------- /governance/aws/restrict-aws-availability-zones.sentinel: -------------------------------------------------------------------------------- 1 | # NOTE that you must explicitly specify availability_zone on all aws_instances 2 | # or this policy will fail since the computed availability_zone is not available 3 | # to plan 4 | 5 | import "tfplan" 6 | 7 | # Get all AWS instances from all modules 8 | get_aws_instances = func() { 9 | instances = [] 10 | for tfplan.module_paths as path { 11 | instances += values(tfplan.module(path).resources.aws_instance) else [] 12 | } 13 | return instances 14 | } 15 | 16 | # Allowed availability zones 17 | allowed_zones = [ 18 | "us-east-1a", 19 | "us-east-1b", 20 | "us-east-1c", 21 | "us-east-1d", 22 | "us-east-1e", 23 | "us-east-1f", 24 | ] 25 | 26 | aws_instances = get_aws_instances() 27 | 28 | # Rule to restrict availability zones and region 29 | region_allowed = rule { 30 | all aws_instances as _, instances { 31 | all instances as index, r { 32 | r.applied.availability_zone in allowed_zones 33 | } 34 | } 35 | } 36 | 37 | # Main rule that requires other rules to be true 38 | main = rule { 39 | (region_allowed) else true 40 | } 41 | -------------------------------------------------------------------------------- /governance/aws/restrict-aws-cidr-blocks.sentinel: -------------------------------------------------------------------------------- 1 | import "tfplan" 2 | 3 | disallowed_cidr_blocks = [ 4 | "0.0.0.0/0", 5 | ] 6 | 7 | main = rule { 8 | all tfplan.resources.aws_security_group as _, instances { 9 | all instances as _, sg { 10 | all sg.applied.ingress as ingress { 11 | all disallowed_cidr_blocks as block { 12 | ingress.cidr_blocks not contains block 13 | } 14 | } 15 | } 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /governance/aws/restrict-aws-instance-type.sentinel: -------------------------------------------------------------------------------- 1 | import "tfplan" 2 | 3 | # Get all AWS instances from all modules 4 | get_aws_instances = func() { 5 | instances = [] 6 | for tfplan.module_paths as path { 7 | instances += values(tfplan.module(path).resources.aws_instance) else [] 8 | } 9 | return instances 10 | } 11 | 12 | # Allowed Types 13 | allowed_types = [ 14 | "t2.small", 15 | "t2.medium", 16 | "t2.large", 17 | ] 18 | 19 | aws_instances = get_aws_instances() 20 | 21 | # Rule to restrict instance types 22 | instance_type_allowed = rule { 23 | all aws_instances as _, instances { 24 | all instances as index, r { 25 | r.applied.instance_type in allowed_types 26 | } 27 | } 28 | } 29 | 30 | # Main rule that requires other rules to be true 31 | main = rule { 32 | (instance_type_allowed) else true 33 | } 34 | -------------------------------------------------------------------------------- /governance/aws/restrict-aws-region.sentinel: -------------------------------------------------------------------------------- 1 | # This policy restricts the AWS region based on the region set for 2 | # instances of the AWS provider in the root module of the workspace. 3 | # It does not check providers in nested modules. 4 | 5 | import "tfconfig" 6 | import "tfplan" 7 | import "strings" 8 | 9 | # Initialize array of regions found in AWS providers 10 | region_values = [] 11 | 12 | # Allowed Regions 13 | allowed_regions = [ 14 | "us-east-1", 15 | "us-east-2", 16 | "us-west-1", 17 | "us-west-2", 18 | ] 19 | 20 | 21 | # Iterate through all AWS providers in root module 22 | if ((length(tfconfig.providers) else 0) > 0) { 23 | providers = tfconfig.providers 24 | if "aws" in keys(providers) { 25 | aws = tfconfig.providers.aws 26 | aliases = aws["alias"] 27 | for aliases as alias, data { 28 | print ( "alias is: ", alias ) 29 | region = data["config"]["region"] 30 | if region matches "\\$\\{var\\.(.*)\\}" { 31 | # AWS provider was configured with variable 32 | print ( "region is a variable" ) 33 | region_variable = strings.trim_suffix(strings.trim_prefix(region, "${var."), "}") 34 | print ( "region variable is: ", region_variable ) 35 | print ( "Value of region is: ", tfplan.variables[region_variable] ) 36 | region_value = tfplan.variables[region_variable] 37 | region_values += [region_value] 38 | } else { 39 | print ( "region is a hard-coded value" ) 40 | print ( "Value of region is: ", region ) 41 | region_value = region 42 | region_values += [region_value] 43 | } 44 | } 45 | } 46 | } 47 | 48 | # Print all regions found in AWS providers 49 | print ( "region_values is: ", region_values ) 50 | 51 | aws_region_valid = rule { 52 | all region_values as rv { 53 | rv in allowed_regions 54 | } 55 | } 56 | 57 | main = rule { 58 | (aws_region_valid) else true 59 | } 60 | -------------------------------------------------------------------------------- /governance/aws/restrict-iam-policy-statement.sentinel: -------------------------------------------------------------------------------- 1 | import "json" 2 | import "tfplan" 3 | 4 | # get all IAM policy resources from the tfplan 5 | all_policy_resources = func() { 6 | policies = [] 7 | for tfplan.module_paths as path { 8 | resources = values(tfplan.module(path).resources.aws_iam_policy) else [] 9 | for resources as _, r { 10 | policies += values(r) 11 | } 12 | } 13 | 14 | return policies 15 | } 16 | 17 | # get all IAM Policy statements 18 | policy_statements = func() { 19 | statements = [] 20 | for all_policy_resources() as r { 21 | statements += json.unmarshal(r.applied.policy).Statement 22 | } 23 | return statements 24 | } 25 | 26 | valid_statement = func(s) { 27 | if s.Action contains "iam:PassRole" { 28 | return s.Resource is not "*" 29 | } 30 | 31 | return true 32 | } 33 | 34 | # Main rule that requires other rules to be true 35 | main = rule { 36 | all policy_statements() as s { 37 | valid_statement(s) 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /governance/aws/restrict-iam-policy-statement2.sentinel: -------------------------------------------------------------------------------- 1 | # This policy allows you to define a list of forbidden IAM policy statement actions 2 | # to prevent Terraform from creating. Put all statements in the forbidden_actions. 3 | # This does not do by-resource level restrictions, but restricts all resources with 4 | # these actions 5 | 6 | import "json" 7 | import "tfplan" 8 | 9 | forbidden_actions = [ 10 | "iam:*", 11 | "iam:Create*", 12 | "iam:Delete*", 13 | ] 14 | 15 | # get all IAM policy resources from the tfplan 16 | all_policy_resources = func() { 17 | policies = [] 18 | for tfplan.module_paths as path { 19 | resources = values(tfplan.module(path).resources.aws_iam_policy) else [] 20 | for resources as _, r { 21 | policies += values(r) 22 | } 23 | } 24 | 25 | return policies 26 | } 27 | 28 | # get all IAM Policy statements 29 | policy_statements = func() { 30 | statements = [] 31 | for all_policy_resources() as r { 32 | statements += json.unmarshal(r.applied.policy).Statement 33 | } 34 | return statements 35 | } 36 | 37 | valid_statement = func(s,a) { 38 | if s.Action contains a { 39 | return false 40 | } 41 | return true 42 | } 43 | 44 | # Main rule that requires other rules to be true 45 | main = rule { 46 | all policy_statements() as s { 47 | all forbidden_actions as a { 48 | valid_statement(s, a) 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /governance/azure/README.md: -------------------------------------------------------------------------------- 1 | # Sentinel Policies for Azure 2 | The sample Sentinel policy files in this directory can be used with Terraform Enterprise to ensure that provisioned Azure security groups, VMs, and ACS clusters comply with your organization's provisioning rules. 3 | 4 | The restrict-current-azure-vms.sentinel policy is interesting because it actually checks VMs that have already been provisioned using the tfstate import and because it only prints the VMs that are not from an allowed publisher. It achieves the latter by using double negation (two nots) and "any" instead of "all". (For those familiar with logic, we are using one of De Morgan's laws: `not(P or Q) <-> (not P) and (not Q)`.) 5 | -------------------------------------------------------------------------------- /governance/azure/acs-cluster-policy.sentinel: -------------------------------------------------------------------------------- 1 | import "tfplan" 2 | 3 | clusters = tfplan.resources.azurerm_container_service 4 | 5 | agent_node_count_limit = rule { 6 | all clusters as name, instances { 7 | all instances as index, r { 8 | int(r.applied.agent_pool_profile[0].count) < 10 9 | } 10 | } 11 | } 12 | 13 | master_node_count_limit = rule { 14 | all clusters as name, instances { 15 | all instances as index, r { 16 | int(r.applied.master_profile[0].count) <= 3 17 | } 18 | } 19 | } 20 | 21 | vm_size_allowed = rule { 22 | all clusters as name, instances { 23 | all instances as index, r { 24 | r.applied.agent_pool_profile[0].vm_size matches "Standard_A1" 25 | } 26 | } 27 | } 28 | main = rule { 29 | (master_node_count_limit and agent_node_count_limit and vm_size_allowed) else true 30 | } 31 | -------------------------------------------------------------------------------- /governance/azure/aks-cluster-policy.sentinel: -------------------------------------------------------------------------------- 1 | import "tfplan" 2 | 3 | clusters = tfplan.resources.azurerm_kubernetes_cluster 4 | 5 | agent_node_count_limit = rule { 6 | all clusters as name, instances { 7 | all instances as index, r { 8 | int(r.applied.agent_pool_profile[0].count) < 10 9 | } 10 | } 11 | } 12 | 13 | # Allowed VM Sizes 14 | allowed_vm_sizes = [ 15 | "Standard_A1", 16 | "Standard_A2", 17 | "Standard_A3", 18 | ] 19 | 20 | vm_size_allowed = rule { 21 | all clusters as name, instances { 22 | all instances as index, r { 23 | r.applied.agent_pool_profile[0].vm_size in allowed_vm_sizes 24 | } 25 | } 26 | } 27 | main = rule { 28 | (agent_node_count_limit and vm_size_allowed) else true 29 | } 30 | -------------------------------------------------------------------------------- /governance/azure/block-allow-all-cidr.sentinel: -------------------------------------------------------------------------------- 1 | import "tfplan" 2 | 3 | get_sgs = func() { 4 | sgs = [] 5 | for tfplan.module_paths as path { 6 | sgs += values(tfplan.module(path).resources.azurerm_network_security_group) else [] 7 | } 8 | return sgs 9 | } 10 | 11 | network_sgs = get_sgs() 12 | 13 | disallowed_cidr_blocks = [ 14 | "0.0.0.0/0", 15 | "0.0.0.0", 16 | "*", 17 | ] 18 | 19 | block_allow_all = rule { 20 | all network_sgs as _, instances { 21 | all instances as _, sg { 22 | all sg.applied.security_rule as _, sr { 23 | (sr.source_address_prefix not in disallowed_cidr_blocks) or sr.access == "Deny" 24 | } 25 | } 26 | } 27 | } 28 | 29 | main = rule { 30 | (block_allow_all) else true 31 | } 32 | -------------------------------------------------------------------------------- /governance/azure/restrict-current-azure-vms.sentinel: -------------------------------------------------------------------------------- 1 | import "tfstate" 2 | 3 | # Get VMs that already exist in the state of this workspace 4 | get_vms = func() { 5 | vms = [] 6 | for tfstate.module_paths as path { 7 | vms += values(tfstate.module(path).resources.azurerm_virtual_machine) else [] 8 | } 9 | return vms 10 | } 11 | 12 | # List of allowed publishers 13 | allowed_publishers = [ 14 | "RedHat", 15 | "Canonical", 16 | ] 17 | 18 | vms = get_vms() 19 | 20 | # This rule uses a double negative expression (two nots) so that 21 | # only VMs that are NOT in the list of approved publishers 22 | # will be printed. 23 | # If we had used all instead of any and left out the nots, 24 | # only the valid VMs would have been printed. 25 | # Or, if we had done that and put the print() statement before 26 | # testing the VM's publisher, all VMs would have been printed. 27 | vm_publisher_allowed = rule { 28 | not ( 29 | any vms as _, instances { 30 | any instances as index, r { 31 | (r.attr.storage_image_reference[0].publisher not in allowed_publishers) and print("Existing VM publisher ", r.attr.storage_image_reference[0].publisher, "for VM ", r.attr.name, "is invalid") 32 | } 33 | }) 34 | } 35 | 36 | main = rule { 37 | (vm_publisher_allowed) else true 38 | } 39 | -------------------------------------------------------------------------------- /governance/azure/restrict-vm-image-id.sentinel: -------------------------------------------------------------------------------- 1 | import "tfplan" 2 | 3 | get_vms = func() { 4 | vms = [] 5 | for tfplan.module_paths as path { 6 | vms += values(tfplan.module(path).resources.azurerm_virtual_machine) else [] 7 | } 8 | return vms 9 | } 10 | 11 | 12 | allowed_image_ids = [ 13 | "/subscriptions//resourceGroups/ABC-US-Z-RGP-NJS3-E005/providers/Microsoft.Compute/images/WIN-base-image", 14 | "/subscriptions//resourceGroups/ABC-US-Z-RGP-NJS3-E005/providers/Microsoft.Compute/images/RHEL-base-image", 15 | ] 16 | 17 | vms = get_vms() 18 | vm_image_id_allowed = rule { 19 | all vms as _, instances { 20 | all instances as index, r { 21 | # Two possible ways to restrict image ID 22 | r.applied.storage_image_reference[0].id in allowed_image_ids 23 | #r.applied.storage_image_reference[0].id matches "/subscriptions//(.*)[Win|RHEL](.*)" 24 | } 25 | } 26 | } 27 | 28 | main = rule { 29 | (vm_image_id_allowed) else true 30 | } 31 | -------------------------------------------------------------------------------- /governance/azure/restrict-vm-publisher.sentinel: -------------------------------------------------------------------------------- 1 | import "tfplan" 2 | 3 | get_vms = func() { 4 | vms = [] 5 | for tfplan.module_paths as path { 6 | vms += values(tfplan.module(path).resources.azurerm_virtual_machine) else [] 7 | } 8 | return vms 9 | } 10 | 11 | 12 | allowed_publishers = [ 13 | "MicrosoftWindowsServer", 14 | "RedHat", 15 | ] 16 | 17 | vms = get_vms() 18 | vm_publisher_allowed = rule { 19 | all vms as _, instances { 20 | all instances as index, r { 21 | r.applied.storage_image_reference[0].publisher in allowed_publishers 22 | } 23 | } 24 | } 25 | 26 | main = rule { 27 | (vm_publisher_allowed) else true 28 | } 29 | -------------------------------------------------------------------------------- /governance/azure/restrict-vm-size.sentinel: -------------------------------------------------------------------------------- 1 | import "tfplan" 2 | 3 | get_vms = func() { 4 | vms = [] 5 | for tfplan.module_paths as path { 6 | vms += values(tfplan.module(path).resources.azurerm_virtual_machine) else [] 7 | } 8 | return vms 9 | } 10 | 11 | # comparison is case-sensitive 12 | # so including both cases for "v" 13 | # since we have seen both used 14 | allowed_vm_sizes = [ 15 | "Standard_D1_v2", 16 | "Standard_D1_V2", 17 | "Standard_D2_v2", 18 | "Standard_D2_V2", 19 | "Standard_DS1_v2", 20 | "Standard_DS1_V2", 21 | "Standard_DS2_v2", 22 | "Standard_DS2_V2", 23 | "Standard_A1", 24 | "Standard_A2", 25 | "Standard_D1", 26 | "Standard_D2", 27 | ] 28 | 29 | vms = get_vms() 30 | vm_size_allowed = rule { 31 | all vms as _, instances { 32 | all instances as index, r { 33 | r.applied.vm_size in allowed_vm_sizes 34 | } 35 | } 36 | } 37 | 38 | main = rule { 39 | (vm_size_allowed) else true 40 | } 41 | -------------------------------------------------------------------------------- /governance/cloud-agnostic/README.md: -------------------------------------------------------------------------------- 1 | This directory contains example policies that are cloud agnostic. 2 | -------------------------------------------------------------------------------- /governance/cloud-agnostic/destroy_limit: -------------------------------------------------------------------------------- 1 | # This policy allows you to define a maximum number of resources that can be destroyed on a given run 2 | 3 | import "tfplan" 4 | 5 | get_all_resources = func() { 6 | resources = [] 7 | for tfplan.module_paths as path { 8 | all_resources = tfplan.module(path).resources else {} 9 | for all_resources as _, named_and_counted_resources { 10 | for named_and_counted_resources as _, instances { 11 | for instances as _, body { 12 | append(resources, body) 13 | } 14 | } 15 | } 16 | } 17 | return resources 18 | } 19 | 20 | allowed_to_destroy = func() { 21 | num_destroyed = 0 22 | for resources as i { 23 | if length(i.diff) is 0 { // True for resources that are being completely destroyed. 24 | num_destroyed = num_destroyed + 1 25 | } else if i.diff.id.new is "" { // True if the same resource is being destroyed and recreated and has an `id` attribute 26 | num_destroyed = num_destroyed + 1 27 | } 28 | } 29 | if num_destroyed > destroy_limit { 30 | print("ERROR: Trying to destroy", num_destroyed, "resources. Allowed to destroy", destroy_limit) 31 | return false 32 | } else { 33 | print("INFO: Trying to destroy", num_destroyed, "resources. Allowed to destroy", destroy_limit) 34 | return true 35 | } 36 | } 37 | 38 | destroy_limit = 2 39 | resources = get_all_resources() 40 | 41 | main = rule { allowed_to_destroy() } 42 | -------------------------------------------------------------------------------- /governance/external/README.md: -------------------------------------------------------------------------------- 1 | This repository contains an example of using a data source that calls a function together with a Sentinel policy that checks the result returned by the function call. While the example here just trivially runs a shell script that returns different values based on the account number passed to it by the Terraform code, a customer could actually call an external API to capture real data and then have Sentinel check the result. 2 | 3 | There is a commented out line, `#(length(check_account_balance) > 0) and`, which if uncommented would require the check_balance data source to be present in every single workspace in the current organization. Enable this with caution since any workspace that did not have this data source would cause hard-mandatory failure of this policy. 4 | -------------------------------------------------------------------------------- /governance/external/check_account.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #set -e 4 | 5 | CODE=$1 6 | 7 | case $CODE in 8 | 1) 9 | BALANCE=100 10 | ;; 11 | 2) 12 | BALANCE=0 13 | ;; 14 | *) 15 | BALANCE=0 16 | ;; 17 | esac 18 | 19 | echo "{ \"balance\": \"$BALANCE\" }" 20 | -------------------------------------------------------------------------------- /governance/external/check_account.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.11.7" 3 | } 4 | 5 | variable "account_code" { 6 | description = "code of cloud account: can be 1 or 2" 7 | } 8 | 9 | 10 | # Add fake resource to make sure that TFE runs this each time 11 | resource "null_resource" "fake" { 12 | triggers { 13 | uuid = "${uuid()}" 14 | } 15 | } 16 | 17 | data "external" "check_balance" { 18 | program = ["./check_account.sh", "${var.account_code}"] 19 | } 20 | 21 | output "balance" { 22 | value = "${data.external.check_balance.result["balance"]}" 23 | } 24 | 25 | 26 | -------------------------------------------------------------------------------- /governance/external/check_account_balance.sentinel: -------------------------------------------------------------------------------- 1 | import "tfplan" 2 | 3 | # Get instances of from root module 4 | check_account_balance = tfplan.state.data.external.check_balance 5 | 6 | # Rule to validate that account has balance > 50 7 | balance_test = rule { 8 | # If you wanted every single workspace to include the check_balance 9 | # data source, you could uncomment the following line. Use caution! 10 | #(length(check_account_balance) > 0) and 11 | all check_account_balance as _, r { 12 | print( r.attr.result.balance ) and (int(r.attr.result.balance) else 0) > 50 13 | } 14 | } 15 | 16 | # Main rule that requires other rules to be true 17 | main = rule { 18 | (balance_test) else true 19 | } 20 | -------------------------------------------------------------------------------- /governance/gcp/README.md: -------------------------------------------------------------------------------- 1 | # Sentinel Policies for Google Cloud Platform 2 | The sample Sentinel policy files in this directory can be used with Terraform Enterprise to ensure that provisioned GCP VMs, firewalls, and GKE clusters comply with your organization's provisioning rules. 3 | -------------------------------------------------------------------------------- /governance/gcp/block-allow-all-cidr.sentinel: -------------------------------------------------------------------------------- 1 | import "tfplan" 2 | 3 | get_firewalls = func() { 4 | firewalls = [] 5 | for tfplan.module_paths as path { 6 | firewalls += values(tfplan.module(path).resources.google_compute_firewall) else [] 7 | } 8 | return firewalls 9 | } 10 | 11 | firewalls = get_firewalls() 12 | 13 | disallowed_cidr_block = "0.0.0.0/0" 14 | 15 | block_allow_all = rule { 16 | all firewalls as _, instances { 17 | all instances as _, fw { 18 | disallowed_cidr_block not in fw.applied.source_ranges[0] 19 | } 20 | } 21 | } 22 | 23 | main = rule { 24 | (block_allow_all) else true 25 | } 26 | -------------------------------------------------------------------------------- /governance/gcp/gke-cluster-policy.sentinel: -------------------------------------------------------------------------------- 1 | import "tfplan" 2 | 3 | clusters = tfplan.resources.google_container_cluster 4 | 5 | node_count_limit = rule { 6 | all clusters as name, instances { 7 | all instances as index, r { 8 | int(r.applied.initial_node_count) < 10 9 | } 10 | } 11 | } 12 | 13 | machine_type_allowed = rule { 14 | all clusters as name, instances { 15 | all instances as index, r { 16 | r.applied.node_config[0].machine_type in ["n1-standard-1", "n1-standard-2", "n1-standard-4"] 17 | } 18 | } 19 | } 20 | 21 | main = rule { 22 | (machine_type_allowed and node_count_limit) else true 23 | } 24 | -------------------------------------------------------------------------------- /governance/gcp/pod_limit.sentinel: -------------------------------------------------------------------------------- 1 | import "tfplan" 2 | 3 | pods_limit = tfplan.resources.kubernetes_resource_quota 4 | 5 | pod_count_limit = rule { 6 | all pods_limit as name, instances { 7 | all instances as index, r { 8 | int(r.applied.spec[0].hard.pods) < 10 9 | } 10 | } 11 | } 12 | 13 | main = rule { 14 | (pod_count_limit) else true 15 | } 16 | 17 | -------------------------------------------------------------------------------- /governance/gcp/restrict-machine-type.sentinel: -------------------------------------------------------------------------------- 1 | import "tfplan" 2 | 3 | get_vms = func() { 4 | vms = [] 5 | for tfplan.module_paths as path { 6 | vms += values(tfplan.module(path).resources.google_compute_instance) else [] 7 | } 8 | return vms 9 | } 10 | 11 | 12 | allowed_machine_types = [ 13 | "n1-standard-1", 14 | "n1-standard-2", 15 | "n1-standard-4", 16 | ] 17 | 18 | vms = get_vms() 19 | machine_type_allowed = rule { 20 | all vms as _, instances { 21 | all instances as index, r { 22 | r.applied.machine_type in allowed_machine_types 23 | } 24 | } 25 | } 26 | 27 | main = rule { 28 | (machine_type_allowed) else true 29 | } 30 | -------------------------------------------------------------------------------- /governance/vmware/require-storage-drs.sentinel: -------------------------------------------------------------------------------- 1 | import "tfplan" 2 | 3 | get_datastore_clusters = func() { 4 | datastore_clusters = [] 5 | for tfplan.module_paths as path { 6 | datastore_clusters += values(tfplan.module(path).resources.vsphere_datastore_cluster) else [] 7 | } 8 | return datastore_clusters 9 | } 10 | 11 | datastore_clusters = get_datastore_clusters() 12 | 13 | # Require Storage DRS be enabled (true) 14 | # other option is false 15 | require_storage_drs = rule { 16 | all datastore_clusters as _, instances { 17 | all instances as index, r { 18 | r.applied.sdrs_enabled is true 19 | } 20 | } 21 | } 22 | 23 | main = rule { 24 | (require_storage_drs) else true 25 | } 26 | -------------------------------------------------------------------------------- /governance/vmware/require_nfs41_and_kerberos.sentinel: -------------------------------------------------------------------------------- 1 | import "tfplan" 2 | 3 | get_nas_datastores = func() { 4 | nas_datastores = [] 5 | for tfplan.module_paths as path { 6 | nas_datastores += values(tfplan.module(path).resources.vsphere_nas_datastore) else [] 7 | } 8 | return nas_datastores 9 | } 10 | 11 | nas_datastores = get_nas_datastores() 12 | 13 | # Require NAS datastore type be nfs41 14 | # Default is NFS (which means NFS v3) 15 | require_nfs41 = rule { 16 | all nas_datastores as _, instances { 17 | all instances as index, r { 18 | r.applied.type is "NFS41" 19 | } 20 | } 21 | } 22 | 23 | # Require security type be one of two Kerberos options 24 | # Only applicable when type is NFS41. 25 | # Only other option is AUTH_SYS 26 | require_kerberos = rule { 27 | all nas_datastores as _, instances { 28 | all instances as index, r { 29 | r.applied.security_type in ["SEC_KRB5", "SEC_KRB5I"] 30 | } 31 | } 32 | } 33 | 34 | main = rule { 35 | (require_nfs41 and require_kerberos) else true 36 | } 37 | -------------------------------------------------------------------------------- /governance/vmware/restrict-virtual-disk-size-and-type.sentinel: -------------------------------------------------------------------------------- 1 | import "tfplan" 2 | 3 | get_virtual_disks = func() { 4 | virtual_disks = [] 5 | for tfplan.module_paths as path { 6 | virtual_disks += values(tfplan.module(path).resources.vsphere_virtual_disk) else [] 7 | } 8 | return virtual_disks 9 | } 10 | 11 | virtual_disks = get_virtual_disks() 12 | 13 | # Restrict size of disk in GB 14 | disk_size_limit = rule { 15 | all virtual_disks as _, instances { 16 | all instances as index, r { 17 | r.applied.size < 100 18 | } 19 | } 20 | } 21 | 22 | # Specify disk type as thin. 23 | # Could also be eagerZeroedThick (the default) or lazy 24 | disk_type = rule { 25 | all virtual_disks as _, instances { 26 | all instances as index, r { 27 | r.applied.type is "thin" 28 | } 29 | } 30 | } 31 | 32 | main = rule { 33 | (disk_size_limit and disk_type) else true 34 | } 35 | -------------------------------------------------------------------------------- /governance/vmware/restrict-vm-cpu-and-memory.sentinel: -------------------------------------------------------------------------------- 1 | import "tfplan" 2 | 3 | get_vms = func() { 4 | vms = [] 5 | for tfplan.module_paths as path { 6 | vms += values(tfplan.module(path).resources.vsphere_virtual_machine) else [] 7 | } 8 | return vms 9 | } 10 | 11 | vms = get_vms() 12 | 13 | # Restrict number of CPUs 14 | cpus_limit = rule { 15 | all vms as _, instances { 16 | all instances as index, r { 17 | r.applied.num_cpus <= 4 18 | } 19 | } 20 | } 21 | 22 | # Restrict Memory (in MB) 23 | memory_limit = rule { 24 | all vms as _, instances { 25 | all instances as index, r { 26 | r.applied.memory <= 8192 27 | } 28 | } 29 | } 30 | 31 | main = rule { 32 | (cpus_limit and memory_limit) else true 33 | } 34 | -------------------------------------------------------------------------------- /governance/vmware/restrict-vm-creation-hours: -------------------------------------------------------------------------------- 1 | import "tfplan" 2 | import "time" 3 | 4 | get_vms = func() { 5 | vms = [] 6 | for tfplan.module_paths as path { 7 | vms += values(tfplan.module(path).resources.vsphere_virtual_machine) else [] 8 | } 9 | return vms 10 | } 11 | 12 | vms = get_vms() 13 | 14 | # UTC Hours when VMs can be created 15 | # to be after 10pm or before 4am 16 | restrict_hours = rule { 17 | all vms as _, instances { 18 | all instances as index, r { 19 | time.now.hour >= 22 or time.now.hour <= 4 20 | } 21 | } 22 | } 23 | 24 | main = rule { 25 | (restrict_hours) else true 26 | } 27 | -------------------------------------------------------------------------------- /governance/vmware/restrict-vm-disk-size.sentinel: -------------------------------------------------------------------------------- 1 | import "tfplan" 2 | 3 | get_vms = func() { 4 | vms = [] 5 | for tfplan.module_paths as path { 6 | vms += values(tfplan.module(path).resources.vsphere_virtual_machine) else [] 7 | } 8 | return vms 9 | } 10 | 11 | vms = get_vms() 12 | 13 | # Restrict size of VM disk in GiB 14 | disk_size_limit = rule { 15 | all vms as _, instances { 16 | all instances as index, r { 17 | all r.applied.disk as disk { 18 | int(disk.size) < 100 19 | } 20 | } 21 | } 22 | } 23 | 24 | main = rule { 25 | (disk_size_limit) else true 26 | } 27 | -------------------------------------------------------------------------------- /infrastructure-as-code/README.md: -------------------------------------------------------------------------------- 1 | # Infrastructure as Code Examples 2 | The Terraform code in the directories under this one provide examples for provisioning infrastructure into AWS, Azure, and Google Cloud Platform (GCP). 3 | 4 | ## aws-ec2-instance 5 | This example provides a simple example to provision an EC2 instance running Ubuntu in AWS. 6 | 7 | ## azure-vm 8 | This example provides a simple example to provision an Azure Windows VM and required resources in Azure. Note that it uses a module from the public [Terraform Module Registry](https://registry.terraform.io/). 9 | 10 | ## gcp-compute-instance 11 | This example provides a simple example to provision a Google compute instance in GCP. 12 | 13 | ## k8s-cluster-aks 14 | This example illustrates how you can provision an Azure Kubernetes Service (AKS) cluster using the AKS service. If you use this, also check out the [k8s-vault-config](./k8s-vault-config) and [k8s-services](../self-serve-infrastructure/k8s-services) directories. The first provisions the Vault Kubernetes authentication method against your cluster while the second provisions a web app and redis database as Kubernetes pods to the AKS cluster. 15 | 16 | ## k8s-cluster-gke 17 | This example illustrates how you can provision a Google Kubernetes Engine (GKE) cluster. If you use this, also check out the [k8s-vault-config](./k8s-vault-config) and [k8s-services](../self-serve-infrastructure/k8s-services) directories. The first provisions the Vault Kubernetes authentication method against your cluster while the second provisions a web app and redis database as Kubernetes pods to the GKE cluster. 18 | 19 | ## k8s-cluster-openshift-aws 20 | This example illustrates how you can provision an OpenShift 3.11 cluster into AWS using Terraform and ansible-playbook. If you use this, also check out the [k8s-services-openshift](../self-serve-infrastructure/k8s-services-openshift) directory which provisions a web app and redis database as Kubernetes pods to the OpenShift cluster. 21 | 22 | ## aws-lambda-ec2-lifecycles 23 | This example illustrates how you can provision some AWS Lambda functions to help you terminate EC2 instances that are running longer than desired. 24 | 25 | ## dynamic-aws-creds 26 | This example illustrates how you can use short lived AWS keys dynamically generated by Vault in your Terraform projects. It breaks up the usage of those keys into producer and consumer roles. 27 | 28 | ## dynamic-aws-creds-from-vault 29 | This example also illustrates the use of short lived AWS keys dynamically generated by Vault in the context of provisioning some AWS networking infrastructure. 30 | 31 | ## hashistack 32 | This example illustrates how to provision a HashiStack cluster running Nomad, Consul, and Vault in AWS, Azure, and Google. 33 | -------------------------------------------------------------------------------- /infrastructure-as-code/aws-ec2-instance/README.md: -------------------------------------------------------------------------------- 1 | # Provision an EC2 instance in AWS 2 | This Terraform configuration provisions an EC2 instance in AWS. 3 | 4 | ## Details 5 | By default, this configuration provisions a Ubuntu 14.04 Base Image AMI (with ID ami-2e1ef954) with type t2.micro in the us-east-1 region. The AMI ID, region, and type can all be set as variables. You can also set the name variable to determine the value set for the Name tag. 6 | 7 | Note that you need to set environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY. 8 | -------------------------------------------------------------------------------- /infrastructure-as-code/aws-ec2-instance/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.11.0" 3 | } 4 | 5 | provider "aws" { 6 | region = "${var.aws_region}" 7 | } 8 | 9 | resource "aws_instance" "ubuntu" { 10 | ami = "${var.ami_id}" 11 | instance_type = "${var.instance_type}" 12 | availability_zone = "${var.aws_region}a" 13 | 14 | tags { 15 | Name = "${var.name}" 16 | Test = "PRTest" 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /infrastructure-as-code/aws-ec2-instance/outputs.tf: -------------------------------------------------------------------------------- 1 | output "public_dns" { 2 | value = "${aws_instance.ubuntu.public_dns}" 3 | } 4 | -------------------------------------------------------------------------------- /infrastructure-as-code/aws-ec2-instance/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_region" { 2 | description = "AWS region" 3 | default = "eu-west-2" 4 | } 5 | 6 | variable "ami_id" { 7 | description = "ID of the AMI to provision. Default is Ubuntu 14.04 Base Image" 8 | default = "ami-afa31dd8" 9 | } 10 | 11 | variable "instance_type" { 12 | description = "type of EC2 instance to provision." 13 | default = "t2.medium" 14 | } 15 | 16 | variable "name" { 17 | description = "name to pass to Name tag" 18 | default = "Provisioned by Terraform" 19 | } 20 | -------------------------------------------------------------------------------- /infrastructure-as-code/aws-lambda-ec2-lifecycles/assets/aws_bot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jboero/terraform-guides/79c183f76f786ab00bc2d1a6a995dc20295de6e8/infrastructure-as-code/aws-lambda-ec2-lifecycles/assets/aws_bot.png -------------------------------------------------------------------------------- /infrastructure-as-code/aws-lambda-ec2-lifecycles/assets/dummy_event.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jboero/terraform-guides/79c183f76f786ab00bc2d1a6a995dc20295de6e8/infrastructure-as-code/aws-lambda-ec2-lifecycles/assets/dummy_event.png -------------------------------------------------------------------------------- /infrastructure-as-code/aws-lambda-ec2-lifecycles/assets/good_morning.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jboero/terraform-guides/79c183f76f786ab00bc2d1a6a995dc20295de6e8/infrastructure-as-code/aws-lambda-ec2-lifecycles/assets/good_morning.png -------------------------------------------------------------------------------- /infrastructure-as-code/aws-lambda-ec2-lifecycles/data_collectors.tf: -------------------------------------------------------------------------------- 1 | # These lambda functions return dictionaries of instances. 2 | # Use them with other functions to take action on tagged, untagged 3 | # or running instances. 4 | 5 | resource "aws_lambda_function" "getUntaggedInstances" { 6 | filename = "./files/getUntaggedInstances.zip" 7 | function_name = "getUntaggedInstances" 8 | role = "${aws_iam_role.lambda_read_instances.arn}" 9 | handler = "getUntaggedInstances.lambda_handler" 10 | source_code_hash = "${base64sha256(file("./files/getUntaggedInstances.zip"))}" 11 | runtime = "python3.6" 12 | timeout = "120" 13 | description = "Gathers a list of untagged or improperly tagged instances." 14 | 15 | environment { 16 | variables = { 17 | "REQTAGS" = "${var.mandatory_tags}" 18 | } 19 | } 20 | } 21 | 22 | resource "aws_lambda_function" "getTaggedInstances" { 23 | filename = "./files/getTaggedInstances.zip" 24 | function_name = "getTaggedInstances" 25 | role = "${aws_iam_role.lambda_read_instances.arn}" 26 | handler = "getTaggedInstances.lambda_handler" 27 | source_code_hash = "${base64sha256(file("./files/getTaggedInstances.zip"))}" 28 | runtime = "python3.6" 29 | timeout = "120" 30 | description = "Gathers a list of correctly tagged instances." 31 | 32 | environment { 33 | variables = { 34 | "REQTAGS" = "${var.mandatory_tags}" 35 | } 36 | } 37 | } 38 | 39 | resource "aws_lambda_function" "getRunningInstances" { 40 | filename = "./files/getRunningInstances.zip" 41 | function_name = "getRunningInstances" 42 | role = "${aws_iam_role.lambda_read_instances.arn}" 43 | handler = "getRunningInstances.lambda_handler" 44 | source_code_hash = "${base64sha256(file("./files/getRunningInstances.zip"))}" 45 | runtime = "python3.6" 46 | timeout = "120" 47 | description = "Gathers a list of running instances." 48 | } -------------------------------------------------------------------------------- /infrastructure-as-code/aws-lambda-ec2-lifecycles/encryption.tf.disabled: -------------------------------------------------------------------------------- 1 | # Optional extra resources to help encrypt your Slack Webhook URL 2 | 3 | # This key is used to encrypt the slack webhook URL 4 | resource "aws_kms_key" "notify_slack" { 5 | description = "Key for encrypting the Slack webhook URL" 6 | enable_key_rotation = "false" 7 | is_enabled = "true" 8 | } 9 | 10 | # A human friendly alias so we can find it in the UI 11 | resource "aws_kms_alias" "notify_slack" { 12 | name = "alias/notify_slack" 13 | target_key_id = "${aws_kms_key.notify_slack.key_id}" 14 | } 15 | 16 | # Template for our 'decrypt_kms' lambda IAM policy 17 | data "template_file" "iam_decrypt_kms" { 18 | template = "${file("./files/iam_decrypt_kms.tpl")}" 19 | 20 | vars { 21 | kmskey = "${aws_kms_key.notify_slack.arn}" 22 | account_id = "${data.aws_caller_identity.current.account_id}" 23 | region = "${var.region}" 24 | } 25 | } 26 | 27 | # Here we ingest the template and attach it to our notify_slack role 28 | resource "aws_iam_role_policy" "decrypt_kms" { 29 | name = "decrypt_kms" 30 | policy = "${data.template_file.iam_decrypt_kms.rendered}" 31 | role = "${aws_iam_role.lambda_notify_slack.id}" 32 | } -------------------------------------------------------------------------------- /infrastructure-as-code/aws-lambda-ec2-lifecycles/files/checkInstanceTTLs.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jboero/terraform-guides/79c183f76f786ab00bc2d1a6a995dc20295de6e8/infrastructure-as-code/aws-lambda-ec2-lifecycles/files/checkInstanceTTLs.zip -------------------------------------------------------------------------------- /infrastructure-as-code/aws-lambda-ec2-lifecycles/files/cleanUntaggedInstances.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jboero/terraform-guides/79c183f76f786ab00bc2d1a6a995dc20295de6e8/infrastructure-as-code/aws-lambda-ec2-lifecycles/files/cleanUntaggedInstances.zip -------------------------------------------------------------------------------- /infrastructure-as-code/aws-lambda-ec2-lifecycles/files/getInstanceReport.py: -------------------------------------------------------------------------------- 1 | # Example functions for AWS reporting. Use as a base to build your own. 2 | 3 | import boto3 4 | import json 5 | import logging 6 | import os 7 | import csv 8 | import io 9 | 10 | logger = logging.getLogger() 11 | logger.setLevel(logging.INFO) 12 | 13 | lam = boto3.client('lambda') 14 | 15 | def lambda_handler(event, context): 16 | """Generates a tab-separated list of running instances.""" 17 | # You could also use get_tagged_instances or get_untagged_instances here 18 | running = get_running_instances() 19 | report = generate_tsv(running) 20 | #logger.info(report) 21 | return(report) 22 | 23 | def get_running_instances(): 24 | """Calls the Lambda function that returns a dictionary of instances.""" 25 | try: 26 | response = lam.invoke(FunctionName='getRunningInstances', InvocationType='RequestResponse') 27 | except Exception as e: 28 | print(e) 29 | raise e 30 | return response 31 | 32 | # This could be useful for generating email reports or dumping a list of running 33 | # instances into an S3 bucket. 34 | def generate_tsv(response): 35 | """Ingests data from a lambda response, converts it to tab-separated format.""" 36 | data=json.loads(response['Payload'].read().decode('utf-8')) 37 | data=json.loads(data) 38 | output = io.StringIO() 39 | writer = csv.writer(output, delimiter='\t') 40 | for key, value in data.items(): 41 | value['InstanceId'] = key 42 | writer.writerow(value.values()) 43 | contents = output.getvalue() 44 | return(contents) 45 | 46 | if __name__ == '__main__': 47 | lambda_handler({}, {}) -------------------------------------------------------------------------------- /infrastructure-as-code/aws-lambda-ec2-lifecycles/files/getInstanceReport.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jboero/terraform-guides/79c183f76f786ab00bc2d1a6a995dc20295de6e8/infrastructure-as-code/aws-lambda-ec2-lifecycles/files/getInstanceReport.zip -------------------------------------------------------------------------------- /infrastructure-as-code/aws-lambda-ec2-lifecycles/files/getRunningInstances.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jboero/terraform-guides/79c183f76f786ab00bc2d1a6a995dc20295de6e8/infrastructure-as-code/aws-lambda-ec2-lifecycles/files/getRunningInstances.zip -------------------------------------------------------------------------------- /infrastructure-as-code/aws-lambda-ec2-lifecycles/files/getTaggedInstances.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jboero/terraform-guides/79c183f76f786ab00bc2d1a6a995dc20295de6e8/infrastructure-as-code/aws-lambda-ec2-lifecycles/files/getTaggedInstances.zip -------------------------------------------------------------------------------- /infrastructure-as-code/aws-lambda-ec2-lifecycles/files/getUntaggedInstances.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jboero/terraform-guides/79c183f76f786ab00bc2d1a6a995dc20295de6e8/infrastructure-as-code/aws-lambda-ec2-lifecycles/files/getUntaggedInstances.zip -------------------------------------------------------------------------------- /infrastructure-as-code/aws-lambda-ec2-lifecycles/files/iam_decrypt_kms.tpl: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Action": [ 7 | "kms:Decrypt" 8 | ], 9 | "Resource": [ 10 | "${kmskey}" 11 | ] 12 | } 13 | ] 14 | } -------------------------------------------------------------------------------- /infrastructure-as-code/aws-lambda-ec2-lifecycles/files/iam_lambda_notify.tpl: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Action": [ 7 | "logs:CreateLogGroup", 8 | "logs:CreateLogStream", 9 | "logs:PutLogEvents" 10 | ], 11 | "Resource": "arn:aws:logs:${region}:${account_id}:*" 12 | }, 13 | { 14 | "Effect": "Allow", 15 | "Action": [ 16 | "lambda:InvokeFunction", 17 | "ses:SendEmail", 18 | "ses:SendRawEmail" 19 | ], 20 | "Resource": "*" 21 | } 22 | ] 23 | } -------------------------------------------------------------------------------- /infrastructure-as-code/aws-lambda-ec2-lifecycles/files/iam_lambda_read_instances.tpl: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Action": "logs:CreateLogGroup", 7 | "Resource": "arn:aws:logs:${region}:${account_id}:*" 8 | }, 9 | { 10 | "Effect": "Allow", 11 | "Action": [ 12 | "logs:CreateLogStream", 13 | "logs:PutLogEvents" 14 | ], 15 | "Resource": [ 16 | "arn:aws:logs:${region}:${account_id}:*" 17 | ] 18 | }, 19 | { 20 | "Effect": "Allow", 21 | "Action": "ec2:Describe*", 22 | "Resource": "*" 23 | }, 24 | { 25 | "Effect": "Allow", 26 | "Action": "elasticloadbalancing:Describe*", 27 | "Resource": "*" 28 | }, 29 | { 30 | "Effect": "Allow", 31 | "Action": [ 32 | "cloudwatch:ListMetrics", 33 | "cloudwatch:GetMetricStatistics", 34 | "cloudwatch:Describe*" 35 | ], 36 | "Resource": "*" 37 | }, 38 | { 39 | "Effect": "Allow", 40 | "Action": "autoscaling:Describe*", 41 | "Resource": "*" 42 | } 43 | ] 44 | } -------------------------------------------------------------------------------- /infrastructure-as-code/aws-lambda-ec2-lifecycles/files/iam_lambda_stop_and_terminate_instances.tpl: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Action": "logs:CreateLogGroup", 7 | "Resource": "arn:aws:logs:${region}:${account_id}:*" 8 | }, 9 | { 10 | "Effect": "Allow", 11 | "Action": [ 12 | "logs:CreateLogStream", 13 | "logs:PutLogEvents" 14 | ], 15 | "Resource": [ 16 | "arn:aws:logs:${region}:${account_id}:*" 17 | ] 18 | }, 19 | { 20 | "Effect": "Allow", 21 | "Action": [ 22 | "lambda:InvokeFunction", 23 | "ses:SendEmail", 24 | "ses:SendRawEmail", 25 | "ec2:*" 26 | ], 27 | "Resource": "*" 28 | } 29 | ] 30 | } -------------------------------------------------------------------------------- /infrastructure-as-code/aws-lambda-ec2-lifecycles/files/notifyInstanceUsage.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jboero/terraform-guides/79c183f76f786ab00bc2d1a6a995dc20295de6e8/infrastructure-as-code/aws-lambda-ec2-lifecycles/files/notifyInstanceUsage.zip -------------------------------------------------------------------------------- /infrastructure-as-code/aws-lambda-ec2-lifecycles/files/notifyUntaggedInstances.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jboero/terraform-guides/79c183f76f786ab00bc2d1a6a995dc20295de6e8/infrastructure-as-code/aws-lambda-ec2-lifecycles/files/notifyUntaggedInstances.zip -------------------------------------------------------------------------------- /infrastructure-as-code/aws-lambda-ec2-lifecycles/instance_reaper.tf: -------------------------------------------------------------------------------- 1 | # Checks the TTL of your instances, if expired can stop or terminate them. 2 | resource "aws_lambda_function" "checkInstanceTTLs" { 3 | filename = "./files/checkInstanceTTLs.zip" 4 | function_name = "checkInstanceTTLs" 5 | role = "${aws_iam_role.lambda_stop_and_terminate_instances.arn}" 6 | handler = "checkInstanceTTLs.lambda_handler" 7 | source_code_hash = "${base64sha256(file("./files/checkInstanceTTLs.zip"))}" 8 | runtime = "python3.6" 9 | timeout = "120" 10 | description = "Checks instance TTLs for expiration and deals with them accordingly." 11 | environment { 12 | variables = { 13 | slackChannel = "${var.slack_channel}" 14 | slackHookUrl = "${var.slack_hook_url}" 15 | isActive = "${var.is_active}" 16 | } 17 | } 18 | } 19 | 20 | # Here we create a cloudwatch event rule, essentially a cron job that 21 | # will call our lambda function every hour. Adjust to your schedule. 22 | resource "aws_cloudwatch_event_rule" "check_instance_ttls" { 23 | name = "check_instance_ttls" 24 | description = "Check instance TTLs to see if they are expired" 25 | schedule_expression = "cron(0 * * * ? *)" 26 | } 27 | 28 | resource "aws_cloudwatch_event_target" "reaper_report" { 29 | rule = "${aws_cloudwatch_event_rule.check_instance_ttls.name}" 30 | target_id = "${aws_lambda_function.checkInstanceTTLs.function_name}" 31 | arn = "${aws_lambda_function.checkInstanceTTLs.arn}" 32 | } 33 | 34 | resource "aws_lambda_permission" "allow_cloudwatch_check_ttls" { 35 | statement_id = "AllowExecutionFromCloudWatch" 36 | action = "lambda:InvokeFunction" 37 | function_name = "${aws_lambda_function.checkInstanceTTLs.function_name}" 38 | principal = "events.amazonaws.com" 39 | source_arn = "${aws_cloudwatch_event_rule.check_instance_ttls.arn}" 40 | depends_on = [ 41 | "aws_lambda_function.checkInstanceTTLs" 42 | ] 43 | } -------------------------------------------------------------------------------- /infrastructure-as-code/aws-lambda-ec2-lifecycles/main.tf: -------------------------------------------------------------------------------- 1 | # Terraform configurations for creating lambda functions to help manage 2 | # your ec2 instance lifecycles. The data_collectors.tf and iam_roles.tf 3 | # files are required. You may also use one or more of the following: 4 | # 5 | # notify_instance_usage.tf - Notify slack with instance usage #s 6 | # notify_untagged.tf - Checks for mandatory tags, notifies slack. 7 | # instance_reaper.tf - Terminates instances that have passed their TTL. 8 | # untagged_janitor.tf - Cleans up untagged instances. 9 | 10 | provider "aws" { 11 | region = "${var.region}" 12 | } 13 | -------------------------------------------------------------------------------- /infrastructure-as-code/aws-lambda-ec2-lifecycles/notify_instance_usage.tf: -------------------------------------------------------------------------------- 1 | # Notify slack about the # of each instance type you have running. 2 | resource "aws_lambda_function" "notifyInstanceUsage" { 3 | filename = "./files/notifyInstanceUsage.zip" 4 | function_name = "notifyInstanceUsage" 5 | role = "${aws_iam_role.lambda_notify.arn}" 6 | handler = "notifyInstanceUsage.lambda_handler" 7 | source_code_hash = "${base64sha256(file("./files/notifyInstanceUsage.zip"))}" 8 | runtime = "python3.6" 9 | timeout = "120" 10 | description = "Sends a notification message with info about number of running instances by type." 11 | 12 | environment { 13 | variables = { 14 | slackChannel = "${var.slack_channel}" 15 | slackHookUrl = "${var.slack_hook_url}" 16 | } 17 | } 18 | } 19 | 20 | # Here we create a cloudwatch event rule, essentially a cron job that 21 | # will call our lambda function every day. Adjust to your schedule. 22 | resource "aws_cloudwatch_event_rule" "notify_running_instances" { 23 | name = "notify_running_instances" 24 | description = "Notify users about their running AWS instances" 25 | schedule_expression = "cron(0 8 * * ? *)" 26 | } 27 | 28 | resource "aws_cloudwatch_event_target" "daily_running_report" { 29 | rule = "${aws_cloudwatch_event_rule.notify_running_instances.name}" 30 | target_id = "${aws_lambda_function.notifyInstanceUsage.function_name}" 31 | arn = "${aws_lambda_function.notifyInstanceUsage.arn}" 32 | } 33 | 34 | resource "aws_lambda_permission" "allow_cloudwatch_instance_usage" { 35 | statement_id = "AllowExecutionFromCloudWatch" 36 | action = "lambda:InvokeFunction" 37 | function_name = "${aws_lambda_function.notifyInstanceUsage.function_name}" 38 | principal = "events.amazonaws.com" 39 | source_arn = "${aws_cloudwatch_event_rule.notify_running_instances.arn}" 40 | depends_on = [ 41 | "aws_lambda_function.notifyInstanceUsage" 42 | ] 43 | } 44 | -------------------------------------------------------------------------------- /infrastructure-as-code/aws-lambda-ec2-lifecycles/notify_untagged.tf: -------------------------------------------------------------------------------- 1 | # Notify about untagged instances and their key names. 2 | resource "aws_lambda_function" "notifyUntaggedInstances" { 3 | filename = "./files/notifyUntaggedInstances.zip" 4 | function_name = "notifyUntaggedInstances" 5 | role = "${aws_iam_role.lambda_notify.arn}" 6 | handler = "notifyUntaggedInstances.lambda_handler" 7 | source_code_hash = "${base64sha256(file("./files/notifyUntaggedInstances.zip"))}" 8 | runtime = "python3.6" 9 | timeout = "120" 10 | description = "Sends a notification message with info about untagged instances." 11 | 12 | environment { 13 | variables = { 14 | slackChannel = "${var.slack_channel}" 15 | slackHookUrl = "${var.slack_hook_url}" 16 | } 17 | } 18 | } 19 | 20 | # Here we create a cloudwatch event rule, essentially a cron job that 21 | # will call our lambda function every day. Adjust to your schedule. 22 | resource "aws_cloudwatch_event_rule" "notify_untagged_instances" { 23 | name = "notify_untagged_instances" 24 | description = "Notify users about their untagged AWS instances" 25 | schedule_expression = "cron(0 6 * * ? *)" 26 | } 27 | 28 | resource "aws_cloudwatch_event_target" "daily_untagged_report" { 29 | rule = "${aws_cloudwatch_event_rule.notify_untagged_instances.name}" 30 | target_id = "${aws_lambda_function.notifyUntaggedInstances.function_name}" 31 | arn = "${aws_lambda_function.notifyUntaggedInstances.arn}" 32 | } 33 | 34 | resource "aws_lambda_permission" "allow_cloudwatch_untagged_instances" { 35 | statement_id = "AllowExecutionFromCloudWatch" 36 | action = "lambda:InvokeFunction" 37 | function_name = "${aws_lambda_function.notifyUntaggedInstances.function_name}" 38 | principal = "events.amazonaws.com" 39 | source_arn = "${aws_cloudwatch_event_rule.notify_untagged_instances.arn}" 40 | depends_on = [ 41 | "aws_lambda_function.notifyUntaggedInstances" 42 | ] 43 | } -------------------------------------------------------------------------------- /infrastructure-as-code/aws-lambda-ec2-lifecycles/outputs.tf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jboero/terraform-guides/79c183f76f786ab00bc2d1a6a995dc20295de6e8/infrastructure-as-code/aws-lambda-ec2-lifecycles/outputs.tf -------------------------------------------------------------------------------- /infrastructure-as-code/aws-lambda-ec2-lifecycles/untagged_janitor.tf: -------------------------------------------------------------------------------- 1 | # This lambda is intended to deal with untagged instances by either stopping 2 | # and then terminating them according to your lifecycle policy. 3 | resource "aws_lambda_function" "cleanUntaggedInstances" { 4 | filename = "./files/cleanUntaggedInstances.zip" 5 | function_name = "cleanUntaggedInstances" 6 | role = "${aws_iam_role.lambda_stop_and_terminate_instances.arn}" 7 | handler = "cleanUntaggedInstances.lambda_handler" 8 | source_code_hash = "${base64sha256(file("./files/cleanUntaggedInstances.zip"))}" 9 | runtime = "python3.6" 10 | timeout = "120" 11 | description = "Stops or terminates untagged instances after a pre-set number of days." 12 | environment { 13 | variables = { 14 | slackChannel = "${var.slack_channel}" 15 | slackHookUrl = "${var.slack_hook_url}" 16 | sleepDays = "${var.sleep_days}" 17 | reapDays = "${var.reap_days}" 18 | isActive = "${var.is_active}" 19 | } 20 | } 21 | } 22 | 23 | # Here we create a cloudwatch event rule, essentially a cron job that 24 | # will call our lambda function every day. Adjust to your schedule. 25 | resource "aws_cloudwatch_event_rule" "clean_untagged_instances" { 26 | name = "clean_untagged_instances" 27 | description = "Check untagged instances and stop/terminate old ones" 28 | schedule_expression = "cron(0 8 * * ? *)" 29 | } 30 | 31 | resource "aws_cloudwatch_event_target" "untagged_instance_cleanup" { 32 | rule = "${aws_cloudwatch_event_rule.clean_untagged_instances.name}" 33 | target_id = "${aws_lambda_function.cleanUntaggedInstances.function_name}" 34 | arn = "${aws_lambda_function.cleanUntaggedInstances.arn}" 35 | } 36 | 37 | resource "aws_lambda_permission" "allow_cloudwatch_clean_untagged_instances" { 38 | statement_id = "AllowExecutionFromCloudWatch" 39 | action = "lambda:InvokeFunction" 40 | function_name = "${aws_lambda_function.cleanUntaggedInstances.function_name}" 41 | principal = "events.amazonaws.com" 42 | source_arn = "${aws_cloudwatch_event_rule.clean_untagged_instances.arn}" 43 | depends_on = [ 44 | "aws_lambda_function.cleanUntaggedInstances" 45 | ] 46 | } -------------------------------------------------------------------------------- /infrastructure-as-code/aws-lambda-ec2-lifecycles/variables.tf: -------------------------------------------------------------------------------- 1 | variable "region" { 2 | default = "us-west-2" 3 | description = "AWS Region" 4 | } 5 | 6 | # Set your Slack Webhook URL here. For extra security you can use AWS KMS to 7 | # encrypt this data in the AWS console. 8 | variable "slack_hook_url" { 9 | default = "https://hooks.slack.com/services/REPLACE/WITH/YOUR_WEBHOOK_URL" 10 | description = "Slack incoming webhook URL, get this from the slack management page." 11 | } 12 | 13 | variable "slack_channel" { 14 | default = "#aws-hc-se-demos" 15 | description = "Slack channel your bot will post messages to." 16 | } 17 | 18 | variable "mandatory_tags" { 19 | default = "TTL,owner" 20 | description = "Comma separated string mandatory tag values." 21 | } 22 | 23 | variable "sleep_days" { 24 | default = "14" 25 | description = "Days after launch after which untagged instances are stopped." 26 | } 27 | 28 | variable "reap_days" { 29 | default = "90" 30 | description = "Days after launch after which untagged instances are terminated." 31 | } 32 | 33 | variable "is_active" { 34 | default = "False" 35 | description = "Determines whether scripts will actually stop and terminate instances or do a dry run instead." 36 | } 37 | -------------------------------------------------------------------------------- /infrastructure-as-code/azure-vm/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.11.1" 3 | } 4 | 5 | variable "location" { 6 | description = "Azure location in which to create resources" 7 | default = "East US" 8 | } 9 | 10 | variable "windows_dns_prefix" { 11 | description = "DNS prefix to add to to public IP address for Windows VM" 12 | } 13 | 14 | variable "admin_password" { 15 | description = "admin password for Windows VM" 16 | default = "pTFE1234!" 17 | } 18 | 19 | module "windowsserver" { 20 | source = "Azure/compute/azurerm" 21 | version = "1.1.5" 22 | location = "${var.location}" 23 | resource_group_name = "${var.windows_dns_prefix}-rc" 24 | vm_hostname = "pwc-ptfe" 25 | admin_password = "${var.admin_password}" 26 | vm_os_simple = "WindowsServer" 27 | public_ip_dns = ["${var.windows_dns_prefix}"] 28 | vnet_subnet_id = "${module.network.vnet_subnets[0]}" 29 | } 30 | 31 | module "network" { 32 | source = "Azure/network/azurerm" 33 | version = "1.1.1" 34 | location = "${var.location}" 35 | resource_group_name = "${var.windows_dns_prefix}-rc" 36 | allow_ssh_traffic = true 37 | } 38 | 39 | output "windows_vm_public_name"{ 40 | value = "${module.windowsserver.public_ip_dns_name}" 41 | } 42 | -------------------------------------------------------------------------------- /infrastructure-as-code/dynamic-aws-creds-from-vault/README.md: -------------------------------------------------------------------------------- 1 | # Dynamic AWS Credential for Terraform 2 | This repository illustrates how you can retrieve dynamically generated, short-lived AWS keys from Vault and then pass them to the Terraform AWS Provider and provision a VPC in AWS. 3 | 4 | The configuration creates a standard VPC with associated AWS resources. 5 | 6 | ## Requirements 7 | 8 | This module requires a running Vault server with an existing AWS secret backend that has been configured to dynamically generate AWS keys. See [Vault Getting Started: Dynamic Secrets](https://www.vaultproject.io/intro/getting-started/dynamic-secrets.html) for a tutorial on how to configure the AWS backend. 9 | 10 | ## Required Environment Variables 11 | 12 | - VAULT_ADDR: the address of your Vault server 13 | - VAULT_TOKEN: a Vault token that has permission to request AWS credentials from the AWS backend. 14 | 15 | ## Usage 16 | If using Terraform Open Source, execute the following commands: 17 | ``` 18 | export VAULT_ADDR= 19 | export VAULT_TOKEN= 20 | terraform init 21 | terraform plan 22 | terraform apply 23 | ``` 24 | If using Terraform Enterprise, do the following: 25 | 26 | 1. Create a workspace in an organization connected to Github.com with an OAuth app and connect your workspace to this repository or a one containing the same code. 27 | 1. Set the VAULT_ADDR and VAULT_TOKEN environment variables on the workspace. 28 | 1. Click the "Queue Plan" button in the workspace. 29 | 1. Verify that the Plan does not give any errors. 30 | 1. Click the "Confirm and Apply" button to dynamically generate your AWS keys and provision your VPC with them. 31 | 32 | ## Cleanup 33 | If using Terraform Open Source, execute `terraform destroy`. 34 | 35 | If using Terraform Enterprise, add the environment variable "CONFIRM_DESTROY" with value 1 to your workspace and then click the "Queue destroy plan" button on the Settings tab of the workspace to queue the destruction of your VPC. After the plan finishes, click the "Confirm and Apply" button to destroy your VPC and associated resources. 36 | -------------------------------------------------------------------------------- /infrastructure-as-code/dynamic-aws-creds-from-vault/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.11.0" 3 | } 4 | 5 | // Vault provider 6 | // Set VAULT_ADDR and VAULT_TOKEN environment variables 7 | provider "vault" {} 8 | 9 | // AWS credentials from Vault 10 | data "vault_aws_access_credentials" "aws_creds" { 11 | backend = "aws" 12 | role = "deploy" 13 | } 14 | 15 | // Setup the core provider information. 16 | provider "aws" { 17 | access_key = "${data.vault_aws_access_credentials.aws_creds.access_key}" 18 | secret_key = "${data.vault_aws_access_credentials.aws_creds.secret_key}" 19 | region = "${var.region}" 20 | } 21 | 22 | data "aws_availability_zones" "main" {} 23 | -------------------------------------------------------------------------------- /infrastructure-as-code/dynamic-aws-creds-from-vault/networks-firewalls-ingress.tf: -------------------------------------------------------------------------------- 1 | resource "aws_security_group_rule" "ssh" { 2 | security_group_id = "${aws_security_group.egress_public.id}" 3 | type = "ingress" 4 | protocol = "tcp" 5 | from_port = 22 6 | to_port = 22 7 | cidr_blocks = ["0.0.0.0/0"] 8 | } 9 | -------------------------------------------------------------------------------- /infrastructure-as-code/dynamic-aws-creds-from-vault/networks-firewalls.tf: -------------------------------------------------------------------------------- 1 | resource "aws_security_group" "egress_public" { 2 | name = "${var.environment_name}-egress_public" 3 | description = "${var.environment_name}-egress_public" 4 | vpc_id = "${aws_vpc.main.id}" 5 | } 6 | 7 | resource "aws_security_group_rule" "egress_public" { 8 | security_group_id = "${aws_security_group.egress_public.id}" 9 | type = "egress" 10 | protocol = "-1" 11 | from_port = 0 12 | to_port = 0 13 | cidr_blocks = ["0.0.0.0/0"] 14 | } 15 | 16 | resource "aws_security_group_rule" "ingress_internal" { 17 | security_group_id = "${aws_security_group.egress_public.id}" 18 | type = "ingress" 19 | protocol = "-1" 20 | from_port = 0 21 | to_port = 0 22 | self = "true" 23 | } 24 | -------------------------------------------------------------------------------- /infrastructure-as-code/dynamic-aws-creds-from-vault/networks-gateways.tf: -------------------------------------------------------------------------------- 1 | resource "aws_internet_gateway" "main" { 2 | vpc_id = "${aws_vpc.main.id}" 3 | 4 | tags { 5 | Name = "${var.environment_name}" 6 | } 7 | } 8 | 9 | resource "aws_nat_gateway" "nat" { 10 | count = "${length(var.vpc_cidrs_public)}" 11 | 12 | allocation_id = "${element(aws_eip.nat.*.id,count.index)}" 13 | subnet_id = "${element(aws_subnet.public.*.id,count.index)}" 14 | } 15 | 16 | resource "aws_eip" "nat" { 17 | count = "${length(var.vpc_cidrs_public)}" 18 | 19 | vpc = true 20 | } 21 | -------------------------------------------------------------------------------- /infrastructure-as-code/dynamic-aws-creds-from-vault/networks-routes.tf: -------------------------------------------------------------------------------- 1 | # 2 | # Public 3 | # 4 | resource "aws_route_table" "public" { 5 | vpc_id = "${aws_vpc.main.id}" 6 | 7 | route { 8 | cidr_block = "0.0.0.0/0" 9 | gateway_id = "${aws_internet_gateway.main.id}" 10 | } 11 | 12 | tags { 13 | Name = "${var.environment_name}-public" 14 | } 15 | } 16 | 17 | resource "aws_route_table_association" "public" { 18 | count = "${length(var.vpc_cidrs_public)}" 19 | 20 | subnet_id = "${element(aws_subnet.public.*.id,count.index)}" 21 | route_table_id = "${aws_route_table.public.id}" 22 | } 23 | 24 | -------------------------------------------------------------------------------- /infrastructure-as-code/dynamic-aws-creds-from-vault/networks-subnets.tf: -------------------------------------------------------------------------------- 1 | resource "aws_subnet" "public" { 2 | count = "${length(var.vpc_cidrs_public)}" 3 | 4 | vpc_id = "${aws_vpc.main.id}" 5 | availability_zone = "${element(data.aws_availability_zones.main.names,count.index)}" 6 | cidr_block = "${element(var.vpc_cidrs_public,count.index)}" 7 | map_public_ip_on_launch = true 8 | 9 | tags { 10 | Name = "${var.environment_name}-public-${count.index}" 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /infrastructure-as-code/dynamic-aws-creds-from-vault/networks.tf: -------------------------------------------------------------------------------- 1 | resource "aws_vpc" "main" { 2 | cidr_block = "${var.vpc_cidr}" 3 | enable_dns_hostnames = true 4 | 5 | tags { 6 | Name = "${var.environment_name}" 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /infrastructure-as-code/dynamic-aws-creds-from-vault/outputs.tf: -------------------------------------------------------------------------------- 1 | # Outputs 2 | output "vpc_id" { 3 | value = "${aws_vpc.main.id}" 4 | } 5 | 6 | output "subnet_public_ids" { 7 | value = ["${aws_subnet.public.*.id}"] 8 | } 9 | 10 | output "security_group_apps" { 11 | value = "${aws_security_group.egress_public.id}" 12 | } 13 | -------------------------------------------------------------------------------- /infrastructure-as-code/dynamic-aws-creds-from-vault/variables.tf: -------------------------------------------------------------------------------- 1 | # Required variables 2 | variable "environment_name" { 3 | description = "Environment Name" 4 | default = "Acme" 5 | } 6 | 7 | variable "region" { 8 | description = "AWS region" 9 | default = "us-west-2" 10 | } 11 | 12 | # Optional variables 13 | variable "vpc_cidr" { 14 | default = "172.19.0.0/16" 15 | } 16 | 17 | variable "vpc_cidrs_public" { 18 | default = [ 19 | "172.19.0.0/20", 20 | ] 21 | } 22 | -------------------------------------------------------------------------------- /infrastructure-as-code/dynamic-aws-creds/assets/dynamic-iam-creds-iam-ec2-policy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jboero/terraform-guides/79c183f76f786ab00bc2d1a6a995dc20295de6e8/infrastructure-as-code/dynamic-aws-creds/assets/dynamic-iam-creds-iam-ec2-policy.png -------------------------------------------------------------------------------- /infrastructure-as-code/dynamic-aws-creds/assets/dynamic-iam-creds-iam-policy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jboero/terraform-guides/79c183f76f786ab00bc2d1a6a995dc20295de6e8/infrastructure-as-code/dynamic-aws-creds/assets/dynamic-iam-creds-iam-policy.png -------------------------------------------------------------------------------- /infrastructure-as-code/dynamic-aws-creds/assets/dynamic-iam-creds.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jboero/terraform-guides/79c183f76f786ab00bc2d1a6a995dc20295de6e8/infrastructure-as-code/dynamic-aws-creds/assets/dynamic-iam-creds.png -------------------------------------------------------------------------------- /infrastructure-as-code/dynamic-aws-creds/assets/ec2-instance.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jboero/terraform-guides/79c183f76f786ab00bc2d1a6a995dc20295de6e8/infrastructure-as-code/dynamic-aws-creds/assets/ec2-instance.png -------------------------------------------------------------------------------- /infrastructure-as-code/dynamic-aws-creds/consumer-workspace/main.tf: -------------------------------------------------------------------------------- 1 | variable "name" { default = "dynamic-aws-creds-consumer" } 2 | variable "path" { default = "../producer-workspace/terraform.tfstate" } 3 | variable "ttl" { default = "1" } 4 | 5 | terraform { 6 | backend "local" { 7 | path = "terraform.tfstate" 8 | } 9 | } 10 | 11 | data "terraform_remote_state" "producer" { 12 | backend = "local" 13 | 14 | config { 15 | path = "${var.path}" 16 | } 17 | } 18 | 19 | data "vault_aws_access_credentials" "creds" { 20 | backend = "${data.terraform_remote_state.producer.backend}" 21 | role = "${data.terraform_remote_state.producer.role}" 22 | } 23 | 24 | provider "aws" { 25 | access_key = "${data.vault_aws_access_credentials.creds.access_key}" 26 | secret_key = "${data.vault_aws_access_credentials.creds.secret_key}" 27 | } 28 | 29 | data "aws_ami" "ubuntu" { 30 | most_recent = true 31 | 32 | filter { 33 | name = "name" 34 | values = ["ubuntu/images/hvm-ssd/ubuntu-trusty-14.04-amd64-server-*"] 35 | } 36 | 37 | filter { 38 | name = "virtualization-type" 39 | values = ["hvm"] 40 | } 41 | 42 | owners = ["099720109477"] # Canonical 43 | } 44 | 45 | # Create AWS EC2 Instance 46 | resource "aws_instance" "main" { 47 | ami = "${data.aws_ami.ubuntu.id}" 48 | instance_type = "t2.nano" 49 | 50 | tags { 51 | Name = "${var.name}" 52 | TTL = "${var.ttl}" 53 | owner = "${var.name}-guide" 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /infrastructure-as-code/dynamic-aws-creds/producer-workspace/main.tf: -------------------------------------------------------------------------------- 1 | variable "aws_access_key" { } 2 | variable "aws_secret_key" { } 3 | variable "name" { default = "dynamic-aws-creds-producer" } 4 | 5 | terraform { 6 | backend "local" { 7 | path = "terraform.tfstate" 8 | } 9 | } 10 | 11 | provider "vault" {} 12 | 13 | resource "vault_aws_secret_backend" "aws" { 14 | access_key = "${var.aws_access_key}" 15 | secret_key = "${var.aws_secret_key}" 16 | path = "${var.name}-path" 17 | 18 | default_lease_ttl_seconds = "120" 19 | max_lease_ttl_seconds = "240" 20 | } 21 | 22 | resource "vault_aws_secret_backend_role" "producer" { 23 | backend = "${vault_aws_secret_backend.aws.path}" 24 | name = "${var.name}-role" 25 | 26 | policy = < >(tee /var/log/user-data.log|logger -t user-data ) 2>&1 4 | 5 | local_ipv4="$(echo -e `hostname -I` |awk '{print $1}' | tr -d '[:space:]')" 6 | 7 | # stop consul so it can be configured correctly 8 | systemctl stop consul 9 | 10 | # clear the consul data directory ready for a fresh start 11 | rm -rf /opt/consul/data/* 12 | 13 | # seeing failed nodes listed in consul members with their solo config 14 | # try a 2 min sleep to see if it helps with all instances wiping data 15 | # in a similar time window 16 | #sleep 120 17 | 18 | jq ".retry_join += [\"provider=azure tag_name=consul_datacenter tag_value=${consul_datacenter} subscription_id=${auto_join_subscription_id} tenant_id=${auto_join_tenant_id} client_id=${auto_join_client_id} secret_access_key=${auto_join_secret_access_key}\"]" < /etc/consul.d/consul-default.json > /tmp/consul-default.json.tmp 19 | 20 | sed -i -e "s/127.0.0.1/$${local_ipv4}/" /tmp/consul-default.json.tmp 21 | mv /tmp/consul-default.json.tmp /etc/consul.d/consul-default.json 22 | chown consul:consul /etc/consul.d/consul-default.json 23 | 24 | # add the cluster instance count to the config with jq 25 | jq ".bootstrap_expect = ${cluster_size}" < /etc/consul.d/consul-server.json > /tmp/consul-server.json.tmp 26 | 27 | # change 'leave_on_terminate' to false for server nodes (this is the default but we had it set to true to quickly remove nodes before configuring) 28 | jq ".leave_on_terminate = false" < /etc/consul.d/consul-server.json > /tmp/consul-server.json.tmp 29 | 30 | mv /tmp/consul-server.json.tmp /etc/consul.d/consul-server.json 31 | chown consul:consul /etc/consul.d/consul-server.json 32 | 33 | # start consul once it is configured correctly 34 | systemctl start consul 35 | -------------------------------------------------------------------------------- /infrastructure-as-code/hashistack/dev/terraform-azure/modules/consul-azure/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.10.1" 3 | } 4 | 5 | module "images" { 6 | source = "../images-azure" 7 | 8 | os = "${var.os}" 9 | } 10 | -------------------------------------------------------------------------------- /infrastructure-as-code/hashistack/dev/terraform-azure/modules/images-azure/_interface.tf: -------------------------------------------------------------------------------- 1 | // 2 | // Variables 3 | // 4 | variable "os" { 5 | type = "string" 6 | } 7 | 8 | // 9 | // Variables w/ Defaults 10 | // 11 | variable "user" { 12 | default = "azure-user" 13 | } 14 | 15 | ################################################################ 16 | # NOTE!! 17 | # 18 | # As of 2017/03/17, the RHEL images on Azure do not support cloud-init, so 19 | # we specifically disabled support for RHEL on Azure until cloud-init is 20 | # available. 21 | ################################################################ 22 | variable "publisher_map" { 23 | default = { 24 | #rhel = "RedHat" 25 | ubuntu = "Canonical" 26 | } 27 | } 28 | 29 | variable "offer_map" { 30 | default = { 31 | #rhel = "RHEL" 32 | ubuntu = "UbuntuServer" 33 | } 34 | } 35 | 36 | variable "sku_map" { 37 | default = { 38 | #rhel = "7.3" 39 | ubuntu = "16.04-LTS" 40 | } 41 | } 42 | 43 | variable "version_map" { 44 | default = { 45 | #rhel = "latest" 46 | ubuntu = "latest" 47 | } 48 | } 49 | 50 | // 51 | // Outputs 52 | // 53 | output "os_user" { 54 | value = "${var.user}" 55 | } 56 | 57 | output "base_publisher" { 58 | value = "${lookup(var.publisher_map,var.os)}" 59 | } 60 | 61 | output "base_offer" { 62 | value = "${lookup(var.offer_map,var.os)}" 63 | } 64 | 65 | output "base_sku" { 66 | value = "${lookup(var.sku_map,var.os)}" 67 | } 68 | 69 | output "base_version" { 70 | value = "${lookup(var.version_map,var.os)}" 71 | } 72 | -------------------------------------------------------------------------------- /infrastructure-as-code/hashistack/dev/terraform-azure/modules/network-azure/README.md: -------------------------------------------------------------------------------- 1 | # network-azure 2 | 3 | Creates a standard network with: 4 | * Three public subnets 5 | * Three private subnets 6 | * One jumphost in each public subnet CIDR 7 | * The default is 3 but this can be controlled by the number of CIDRs passed into `var.network_cidrs_public` 8 | 9 | ## Requirements 10 | 11 | The following environment variables must be set: 12 | 13 | ``` 14 | AZURE_CLIENT_ID 15 | AZURE_CLIENT_SECRET 16 | AZURE_SUBSCRIPTION_ID 17 | AZURE_TENANT_ID 18 | ``` 19 | 20 | ## Usage 21 | 22 | ``` 23 | resource "azurerm_resource_group" "main" { 24 | name = "${var.environment_name}" 25 | location = "${var.location}" 26 | } 27 | 28 | module "ssh_key" { 29 | source = "github.com/hashicorp-modules/ssh-keypair-data.git" 30 | } 31 | 32 | module "network" { 33 | source = "github.com/hashicorp-modules/network-azure.git" 34 | environment_name = "${var.environment_name}" 35 | resource_group_name = "${azurerm_resource_group.main.name}" 36 | location = "${var.location}" 37 | network_cidrs_private = "${var.network_cidrs_private}" 38 | network_cidrs_public = "${var.network_cidrs_public}" 39 | os = "${var.os}" 40 | public_key_data = "${module.ssh_key.public_key_data}" 41 | } 42 | ``` 43 | -------------------------------------------------------------------------------- /infrastructure-as-code/hashistack/dev/terraform-azure/modules/network-azure/_interface.tf: -------------------------------------------------------------------------------- 1 | # Required Variables 2 | variable "environment_name" { 3 | type = "string" 4 | } 5 | 6 | variable "resource_group_name" { 7 | type = "string" 8 | } 9 | 10 | variable "location" { 11 | type = "string" 12 | } 13 | 14 | variable "os" { 15 | type = "string" 16 | } 17 | 18 | variable "public_key_data" { 19 | type = "string" 20 | } 21 | 22 | # Optional Variables 23 | variable "network_cidr" { 24 | default = "172.31.0.0/16" 25 | } 26 | 27 | variable "network_cidrs_public" { 28 | default = [ 29 | "172.31.0.0/20", 30 | "172.31.16.0/20", 31 | "172.31.32.0/20", 32 | ] 33 | } 34 | 35 | variable "network_cidrs_private" { 36 | default = [ 37 | "172.31.48.0/20", 38 | "172.31.64.0/20", 39 | "172.31.80.0/20", 40 | ] 41 | } 42 | 43 | variable "jumphost_vm_size" { 44 | default = "Standard_A0" 45 | description = "Azure virtual machine size for jumphost" 46 | } 47 | 48 | # Outputs 49 | output "jumphost_ips_public" { 50 | value = ["${azurerm_public_ip.jumphost.*.ip_address}"] 51 | } 52 | 53 | output "jumphost_username" { 54 | value = "${module.images.os_user}" 55 | } 56 | 57 | output "subnet_public_ids" { 58 | value = ["${azurerm_subnet.public.*.id}"] 59 | } 60 | 61 | output "subnet_private_ids" { 62 | value = ["${azurerm_subnet.private.*.id}"] 63 | } 64 | -------------------------------------------------------------------------------- /infrastructure-as-code/hashistack/dev/terraform-azure/modules/network-azure/firewalls-jumphost.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_network_security_group" "jumphost" { 2 | name = "${var.environment_name}-jumphost" 3 | location = "${var.location}" 4 | resource_group_name = "${var.resource_group_name}" 5 | } 6 | 7 | resource "azurerm_network_security_rule" "jumphost_ssh" { 8 | name = "${var.environment_name}-jumphost-ssh" 9 | resource_group_name = "${var.resource_group_name}" 10 | network_security_group_name = "${azurerm_network_security_group.jumphost.name}" 11 | 12 | priority = 100 13 | direction = "Inbound" 14 | access = "Allow" 15 | protocol = "Tcp" 16 | 17 | source_address_prefix = "*" 18 | source_port_range = "*" 19 | destination_port_range = "22" 20 | destination_address_prefix = "*" 21 | } 22 | -------------------------------------------------------------------------------- /infrastructure-as-code/hashistack/dev/terraform-azure/modules/network-azure/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.10.1" 3 | } 4 | 5 | module "images" { 6 | source = "../images-azure" 7 | 8 | os = "${var.os}" 9 | } 10 | -------------------------------------------------------------------------------- /infrastructure-as-code/hashistack/dev/terraform-azure/modules/network-azure/networks.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_virtual_network" "main" { 2 | name = "${var.environment_name}" 3 | address_space = ["${var.network_cidr}"] 4 | location = "${var.location}" 5 | resource_group_name = "${var.resource_group_name}" 6 | } 7 | -------------------------------------------------------------------------------- /infrastructure-as-code/hashistack/dev/terraform-azure/modules/network-azure/subnets.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_subnet" "public" { 2 | count = "${length(var.network_cidrs_public)}" 3 | 4 | name = "${var.environment_name}-public-${count.index}" 5 | resource_group_name = "${var.resource_group_name}" 6 | virtual_network_name = "${azurerm_virtual_network.main.name}" 7 | address_prefix = "${element(var.network_cidrs_public,count.index)}" 8 | } 9 | 10 | resource "azurerm_subnet" "private" { 11 | count = "${length(var.network_cidrs_private)}" 12 | 13 | name = "${var.environment_name}-private-${count.index}" 14 | resource_group_name = "${var.resource_group_name}" 15 | virtual_network_name = "${azurerm_virtual_network.main.name}" 16 | address_prefix = "${element(var.network_cidrs_private,count.index)}" 17 | } 18 | -------------------------------------------------------------------------------- /infrastructure-as-code/hashistack/dev/terraform-azure/modules/ssh-keypair-data/_interface.tf: -------------------------------------------------------------------------------- 1 | # Optional Variables 2 | variable "private_key_filename" { 3 | default = "private_key.pem" 4 | description = "Filename to write the private key data to eg key.pem" 5 | } 6 | 7 | # Outputs 8 | output "private_key_pem" { 9 | value = "${tls_private_key.main.private_key_pem}" 10 | } 11 | 12 | output "public_key_data" { 13 | value = "${tls_private_key.main.public_key_openssh}" 14 | } 15 | -------------------------------------------------------------------------------- /infrastructure-as-code/hashistack/dev/terraform-azure/modules/ssh-keypair-data/main.tf: -------------------------------------------------------------------------------- 1 | resource "tls_private_key" "main" { 2 | algorithm = "RSA" 3 | } 4 | 5 | resource "null_resource" "main" { 6 | provisioner "local-exec" { 7 | command = "echo \"${tls_private_key.main.private_key_pem}\" > ${var.private_key_filename}" 8 | } 9 | 10 | provisioner "local-exec" { 11 | command = "chmod 600 ${var.private_key_filename}" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /infrastructure-as-code/hashistack/dev/terraform-azure/terraform.tfvars.example: -------------------------------------------------------------------------------- 1 | custom_image_id = "" 2 | 3 | auto_join_subscription_id = "" 4 | 5 | auto_join_client_id = "" 6 | 7 | auto_join_client_secret = "" 8 | 9 | auto_join_tenant_id = "" 10 | -------------------------------------------------------------------------------- /infrastructure-as-code/hashistack/dev/terraform-gcp/README.md: -------------------------------------------------------------------------------- 1 | # Build the HashiCorp Stack on AWS 2 | 3 | ## Usage for `terraform-gcp` 4 | - Set up your gcp credentials locally or on TFE. You may have a file on your local machine like this when you're done: 5 | ``` 6 | export GOOGLE_CREDENTIALS="/home/username/.gcloud/my-project.json" 7 | export GOOGLE_PROJECT="my-project" 8 | export GOOGLE_REGION="us-east1" 9 | ``` 10 | 11 | - Clone this repository. 12 | ``` 13 | $ git clone git@github.com:hashicorp-guides/hashistack.git 14 | ``` 15 | 16 | - Change into the correct directory. 17 | ``` 18 | $ cd /path/to/hashistack/terraform-gcp 19 | ``` 20 | 21 | - Make a `terraform.tfvars` file and put in the appropriate variables. 22 | ``` 23 | $ cp terraform.tfvars.example terraform.tfvars 24 | $ vi terraform.tfvars 25 | ``` 26 | 27 | - Run a terraform plan and an apply if the plan succeeds. 28 | ``` 29 | $ terraform plan 30 | $ terraform apply 31 | ``` 32 | 33 | - There will be a `.pem` file named like this that you can use to SSH to your instances: `hashistack-r4nd0m456.pem` 34 | 35 | - To access the UIs for Consul and Vault respectively from your local machine (on http://localhost:< port >), you can create the following SSH tunnels: 36 | 37 | ``` 38 | $ ssh -i hashistack-r4nd0m456.pem -L 8200::8200 ec2-user@ 39 | $ ssh -i hashistack-r4nd0m456.pem -L 8500::8500 ec2-user@ 40 | ``` 41 | 42 | **Note:** Terraform currently does not allow specifying a network name and subnet which the Google API requires. As such you can only deploy the hashistack instances into a default network and subnet. This means you cannot use the network created by the network-gcp module. This restriction is no longer compatible with the Google API, and Terraform needs to be updated to correct this. Thus this does not work in the same way as the AWS and Azure versions, and is essentially broken at the current time. But the general structure is here. 43 | 44 | ### Limitations noted in the the [hashistack-gcp](https://github.com/hashicorp-modules/hashistack-gcp) repository 45 | - **This repository is currently being tested.** 46 | - Vault is not configured to use TLS. 47 | - Vault is not initialized. Please refer to the [Vault documentation](https://www.vaultproject.io/docs/internals/architecture.html) for instructions. 48 | - Nomad is not configured to use Vault as it requires a Vault Token. Please refer to the [Nomad documentation](https://www.nomadproject.io/docs/vault-integration/) for information on how to configure the integration. 49 | -------------------------------------------------------------------------------- /infrastructure-as-code/hashistack/dev/terraform-gcp/main.tf: -------------------------------------------------------------------------------- 1 | # Set environment name 2 | resource "random_id" "environment_name" { 3 | byte_length = 4 4 | prefix = "${var.environment_name_prefix}-" 5 | } 6 | 7 | provider "google" { 8 | region = "${var.gcp_region}" 9 | project = "${var.project_name}" 10 | credentials = "${file(var.account_file_json)}" 11 | } 12 | 13 | module "network-gcp" { 14 | source = "git::ssh://git@github.com/hashicorp-modules/network-gcp" 15 | environment_name = "${random_id.environment_name.hex}" 16 | os = "${var.os}" 17 | os_version = "${var.os_version}" 18 | ssh_key_data = "${module.ssh-keypair-data.public_key_data}" 19 | ssh_user = "${var.ssh_user}" 20 | } 21 | 22 | module "hashistack-gcp" { 23 | source = "git::ssh://git@github.com/hashicorp-modules/hashistack-gcp" 24 | region = "${var.gcp_region}" 25 | project_name = "${var.project_name}" 26 | image_bucket_name = "${var.image_bucket_name}" 27 | account_file_json = "${var.account_file_json}" 28 | nomad_version = "${var.nomad_version}" 29 | vault_version = "${var.vault_version}" 30 | consul_version = "${var.consul_version}" 31 | environment_name = "${random_id.environment_name.hex}" 32 | cluster_name = "${random_id.environment_name.hex}" 33 | cluster_size = "${var.cluster_size}" 34 | os = "${var.os}" 35 | os_version = "${var.os_version}" 36 | ssh_user = "${var.ssh_user}" 37 | ssh_key_data = "${module.ssh-keypair-data.public_key_data}" 38 | # Terraform currently does not let you specify a network and subnet which the 39 | # Google API requires. As such this only works in the default network. 40 | #subnet = "${module.network-gcp.subnet_private_names[0]}" 41 | #network = "${module.network-gcp.network_name}" 42 | machine_type = "${var.machine_type}" 43 | environment = "${var.environment}" 44 | } 45 | 46 | module "ssh-keypair-data" { 47 | source = "git::git@github.com:hashicorp-modules/ssh-keypair-data.git" 48 | private_key_filename = "${random_id.environment_name.hex}" 49 | } 50 | -------------------------------------------------------------------------------- /infrastructure-as-code/hashistack/dev/terraform-gcp/terraform.tfvars.example: -------------------------------------------------------------------------------- 1 | # Operating System to use ie RHEL or Ubuntu 2 | os = "Ubuntu" 3 | #os = "RHEL" 4 | 5 | # Operating System version to use ie 7.3 (for RHEL) or 16.04 (for Ubuntu) 6 | os_version = "16.04" 7 | #os_version = "7.3" 8 | 9 | # GCP Region 10 | gcp_region = "us-east1" 11 | 12 | project_name = "my-hashistack-test-1" 13 | 14 | account_file_json = "~/.gcloud/my-image-test.json" 15 | 16 | cluster_name = "my-hashistack" 17 | environment_name = "my-hashistack" 18 | 19 | consul_version = "0.9.2+ent" 20 | nomad_version = "0.6.2" 21 | vault_version = "0.8.1+ent" 22 | 23 | machine_type = "g1-small" 24 | 25 | image_bucket_name = "my-image-store" 26 | environment = "test" 27 | -------------------------------------------------------------------------------- /infrastructure-as-code/hashistack/dev/vagrant-local/README.md: -------------------------------------------------------------------------------- 1 | # Provision a Development HashiStack Cluster in Vagrant 2 | 3 | The goal of this guide is to allows users to easily provision a development HashiStack cluster in just a few commands. 4 | 5 | ## Reference Material 6 | 7 | - [Vagrant Getting Started](https://www.vagrantup.com/intro/getting-started/index.html) 8 | - [Vagrant Docs](https://www.vagrantup.com/docs/index.html) 9 | - [Consul Getting Started](https://www.consul.io/intro/getting-started/install.html) 10 | - [Consul Docs](https://www.consul.io/docs/index.html) 11 | - [Vault Getting Started](https://www.vaultproject.io/intro/getting-started/install.html) 12 | - [Vault Docs](https://www.vaultproject.io/docs/index.html) 13 | - [Nomad Getting Started](https://www.nomadproject.io/intro/getting-started/install.html) 14 | - [Nomad Docs](https://www.nomadproject.io/docs/index.html) 15 | 16 | ## Estimated Time to Complete 17 | 18 | 5 minutes. 19 | 20 | ## Challenge 21 | 22 | There are many different ways to provision and configure an easily accessible development HashiStack cluster, making it difficult to get started. 23 | 24 | ## Solution 25 | 26 | Provision a development HashiStack cluster in Vagrant. 27 | 28 | The Vagrant Development HashiStack guide is for **educational purposes only**. It's designed to allow you to quickly standup a single instance with Consul, Vault, & Nomad running in `-dev` mode. The single node is provisioned into a local VM, allowing for easy access to the instance. Because Consul, Vault, & Nomad are running in `-dev` mode, all data is in-memory and not persisted to disk. If any agent fails or the node restarts, all data will be lost. This is only mean for local use. 29 | 30 | ## Prerequisites 31 | 32 | - [Download Vagrant](https://www.vagrantup.com/downloads.html) 33 | - [Download Virtualbox](https://www.virtualbox.org/wiki/Downloads) 34 | 35 | ## Steps 36 | 37 | We will now provision the development HashiStack cluster in Vagrant. 38 | 39 | ### Step 1: Start Vagrant 40 | 41 | Run `vagrant up` to start the VM and configure the HashiStack. That's it! Once provisioned, view the Vagrant ouput for next steps. 42 | 43 | #### CLI 44 | 45 | [`vagrant up` Command](https://www.vagrantup.com/docs/cli/up.html) 46 | 47 | ##### Request 48 | 49 | ```sh 50 | $ vagrant up 51 | ``` 52 | 53 | ##### Response 54 | ``` 55 | ``` 56 | 57 | ## Next Steps 58 | 59 | Now that you've provisioned and configured the HashiStack, start walking through the below product guides. 60 | 61 | - [Consul Guides](https://www.consul.io/docs/guides/index.html) 62 | - [Vault Guides](https://www.vaultproject.io/guides/index.html) 63 | - [Nomad Guides](https://www.nomadproject.io/guides/index.html) 64 | -------------------------------------------------------------------------------- /infrastructure-as-code/hashistack/quick-start/terraform-aws/gitignore.tf: -------------------------------------------------------------------------------- 1 | # `.tf` files that contain the word "gitignore" are ignored 2 | # by git in the `.gitignore` file at the root of this repo. 3 | 4 | # If you have local Terraform configuration that you want 5 | # ignored like Terraform backend configuration, create 6 | # a new file (separate from this one) that contains the 7 | # word "gitignore" (e.g. `backend.gitignore.tf`). 8 | -------------------------------------------------------------------------------- /infrastructure-as-code/hashistack/templates/install-base.sh.tpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "[---Begin install-base.sh---]" 4 | 5 | echo "Wait for system to be ready" 6 | sleep 10 7 | 8 | echo "Run base script" 9 | curl https://raw.githubusercontent.com/hashicorp/guides-configuration/master/shared/scripts/base.sh | bash 10 | 11 | echo "[---install-base.sh Complete---]" 12 | -------------------------------------------------------------------------------- /infrastructure-as-code/hashistack/templates/install-consul-systemd.sh.tpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "[---Begin install-consul-systemd.sh---]" 4 | 5 | echo "Setup Consul user" 6 | export GROUP=consul 7 | export USER=consul 8 | export COMMENT=Consul 9 | export HOME=/srv/consul 10 | curl https://raw.githubusercontent.com/hashicorp/guides-configuration/master/shared/scripts/setup-user.sh | bash 11 | 12 | echo "Install Consul" 13 | export VERSION=${consul_version} 14 | export URL=${consul_url} 15 | curl https://raw.githubusercontent.com/hashicorp/guides-configuration/master/consul/scripts/install-consul.sh | bash 16 | 17 | echo "Install Consul Systemd" 18 | curl https://raw.githubusercontent.com/hashicorp/guides-configuration/master/consul/scripts/install-consul-systemd.sh | bash 19 | 20 | echo "Cleanup install files" 21 | curl https://raw.githubusercontent.com/hashicorp/guides-configuration/master/shared/scripts/cleanup.sh | bash 22 | 23 | echo "Set variables" 24 | CONSUL_CONFIG_FILE=/etc/consul.d/default.json 25 | CONSUL_CONFIG_OVERRIDE_FILE=/etc/consul.d/z-override.json 26 | NODE_NAME=$(hostname) 27 | 28 | echo "Minimal configuration for Consul UI" 29 | cat < upper(l)} 19 | } 20 | -------------------------------------------------------------------------------- /infrastructure-as-code/terraform-0.12-examples/for-expressions/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12.0" 3 | } 4 | 5 | provider "aws" { 6 | region = "us-east-1" 7 | } 8 | 9 | resource "aws_vpc" "my_vpc" { 10 | cidr_block = "172.16.0.0/16" 11 | 12 | tags = { 13 | Name = "tf-0.12-for-example" 14 | } 15 | } 16 | 17 | resource "aws_subnet" "my_subnet" { 18 | vpc_id = aws_vpc.my_vpc.id 19 | cidr_block = "172.16.10.0/24" 20 | 21 | tags = { 22 | Name = "tf-0.12-for-example" 23 | } 24 | } 25 | 26 | data "aws_ami" "ubuntu_14_04" { 27 | most_recent = true 28 | 29 | filter { 30 | name = "name" 31 | values = ["ubuntu/images/hvm/ubuntu-trusty-14.04-amd64-server-*"] 32 | } 33 | 34 | owners = ["099720109477"] 35 | } 36 | 37 | resource "aws_instance" "ubuntu" { 38 | count = 3 39 | ami = data.aws_ami.ubuntu_14_04.image_id 40 | instance_type = "t2.micro" 41 | associate_public_ip_address = ( count.index == 1 ? true : false) 42 | subnet_id = aws_subnet.my_subnet.id 43 | 44 | tags = { 45 | Name = format("terraform-0.12-for-demo-%d", count.index) 46 | } 47 | } 48 | 49 | # This uses the old splat expression 50 | output "private_addresses_old" { 51 | value = aws_instance.ubuntu.*.private_dns 52 | } 53 | 54 | # This uses the new full splat operator (*) 55 | output "private_addresses_full_splat" { 56 | value = [ aws_instance.ubuntu[*].private_dns ] 57 | } 58 | 59 | # This uses the new for expression 60 | output "private_addresses_new" { 61 | value = [ 62 | for instance in aws_instance.ubuntu: 63 | instance.private_dns 64 | ] 65 | } 66 | 67 | # This uses the new conditional expression 68 | # that can work with lists 69 | # This uses the list interpolation function 70 | output "ips_with_list_interpolation" { 71 | value = [ 72 | for instance in aws_instance.ubuntu: 73 | (instance.public_ip != "" ? list(instance.private_ip, instance.public_ip) : list(instance.private_ip)) 74 | ] 75 | } 76 | 77 | # It also works with lists in [x, y, z] form 78 | output "ips_with_list_in_brackets" { 79 | value = [ 80 | for instance in aws_instance.ubuntu: 81 | (instance.public_ip != "" ? [instance.private_ip, instance.public_ip] : [instance.private_ip]) 82 | ] 83 | } 84 | -------------------------------------------------------------------------------- /infrastructure-as-code/terraform-0.12-examples/new-template-syntax/README.md: -------------------------------------------------------------------------------- 1 | # New Template Syntax 2 | The [New Template Syntax](./new-template-syntax) example illustrates how the new [Template Syntax](https://www.hashicorp.com/blog/terraform-0-12-template-syntax) can be used to support **if** conditionals and **for** expressions inside `%{}` template strings which are also referred to as directives. 3 | 4 | The new template syntax can be used inside Terraform code just like the older `${}` interpolations. It can also be used inside template files loaded with the template_file data source provided that you use version 2.0 or higher of the Template Provider. 5 | 6 | The code in main.tf creates a variable called names with a list of 3 names and uses the code below to show all of them on their own rows in an output called all_names: 7 | ``` 8 | output all_names { 9 | value = < ${policy_names_list[i]}.temp 44 | # Remove ^M from end of the file 45 | sed "s/ //g" < ${policy_names_list[i]}.temp > ${policy_names_list[i]}.sentinel 46 | # delete temporary file 47 | rm ${policy_names_list[i]}.temp 48 | done 49 | 50 | echo "Exported ${#policy_names_list[@]} policies" 51 | -------------------------------------------------------------------------------- /operations/sentinel-policies-scripts/import_policies.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script imports all policies in the current directory into a 3 | # specific policy set within a specific organization on a TFE server. 4 | 5 | # Make sure ATLAS_TOKEN environment variable is set 6 | # to owners team token for organization 7 | # or to user token for member of the owners team 8 | 9 | # Set address if using private Terraform Enterprise server. 10 | # You should edit these before running. 11 | address="app.terraform.io" 12 | # Set organization to use 13 | organization="" 14 | # Set ID of policy set that all policies should be added to 15 | policy_set_id="" 16 | 17 | echo "Using address: $address" 18 | echo "Using organization: $organization" 19 | echo "Using policy set ID: $policy_set_id" 20 | 21 | # Count the policies 22 | declare -i count=0 23 | 24 | # for loop to process all files with *.sentinel extension 25 | for f in *.sentinel; do 26 | echo "file is: $f" 27 | policy_name=$(echo "${f%%.*}") 28 | count=$count+1 29 | 30 | # Replace placeholders in template 31 | sed "s/file-name/$f/;s/policy-name/$policy_name/;s/policy-set-id/$policy_set_id/" < create-policy.template.json > create-policy.json 32 | 33 | # Create the policy 34 | policy_create_result=$(curl --header "Authorization: Bearer $ATLAS_TOKEN" --header "Content-Type: application/vnd.api+json" --request POST --data @create-policy.json "https://${address}/api/v2/organizations/${organization}/policies") 35 | 36 | # Extract policy ID 37 | policy_id=$(echo $policy_create_result | python -c "import sys, json; print(json.load(sys.stdin)['data']['id'])") 38 | echo "Policy ID: " $policy_id 39 | 40 | # Upload policy 41 | policy_upload_result=$(curl --header "Authorization: Bearer $ATLAS_TOKEN" --header "Content-Type: application/octet-stream" --request PUT --data-binary @$f "https://${address}/api/v2/policies/$policy_id/upload" ) 42 | echo "Policy Upload Response: " $policy_upload_result 43 | 44 | done 45 | 46 | echo "Found $count Sentinel policies" 47 | 48 | -------------------------------------------------------------------------------- /self-serve-infrastructure/README.md: -------------------------------------------------------------------------------- 1 | # Self Service Infrastructure 2 | The examples in this directory illustrate how Terraform can be used to support self service Infrastructure. 3 | 4 | ## getting-started 5 | This directory includes some examples of provisioning networking infrastructure in AWS, Azure, and Google. 6 | 7 | ## k8s-services 8 | This Terraform example can be used to provision a web application and redis database as Kubernetes pods to a Kubernetes cluster. It is intended to be used with the [k8s-cluster-aks](../infrastructure-as-code/k8s-cluster-aks) and [k8s-cluster-gke](../infrastructure-as-code/k8s-cluster-gke) configurations of this repository which provision Kubernetes clusters into Azure Kubernetes Service and Google Kubernetes Engine. But it could be used with other Kubernetes clusters too. 9 | 10 | ## k8s-services-openshift 11 | This Terraform example can be used to provision a web application and redis database as Kubernetes pods to an OpenShift cluster. It differs slightly from the k8s-services example because OpenShift service accounts each have two secrets, preventing the service_account resource of the Kubernetes provider from being used. It is intended to be used with the [k8s-cluster-openshift-aws](../infrastructure-as-code/k8s-cluster-openshift-aws) configuration which provisions an OpenShift 3.11 cluster into AWS. 12 | 13 | ## cats-and-dogs 14 | This directory contains the source code and docker files for the cats-and-dogs frontend and backend pods provisioned by the k8s-services and k8s-services-openshift Terraform code. 15 | -------------------------------------------------------------------------------- /self-serve-infrastructure/cats-and-dogs/README.md: -------------------------------------------------------------------------------- 1 | # Source Code for Cats-and-Dogs Applications 2 | The source code for the Cats-and-Dogs applications deployed to Kubernetes pods and services using the Terraform configuration [k8s-services](../k8s-services) for AKS or GKE or the Terraform configuration [k8s-services-openshift](../k8s-services-openshift) for OpenShift. 3 | 4 | ## Backend 5 | The cats-and-dogs-backend application runs a redis database that stores votes recorded by users of the cats-and-dogs-frontend web application. It authenticates itself to a Vault server with the Kubernetes JWT service account token, reads the redis_pwd from Vault, and then starts the redis database with that password. 6 | 7 | ## Frontend 8 | The cats-and-dogs-frontend application is a simple Python web application that lets users vote for their favorite pets. It authenticates itself to a Vault server with the Kubernetes JWT service account token, reads the redis_pwd from Vault, and then connects to the redis database running in the cats-and-dogs-backend pod with that password. 9 | -------------------------------------------------------------------------------- /self-serve-infrastructure/cats-and-dogs/backend/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM redis 2 | 3 | RUN apt-get update 4 | RUN apt-get install -y curl 5 | RUN apt-get install -y python3-pip 6 | ADD /vote-db /app 7 | 8 | ENTRYPOINT ["docker-entrypoint.sh"] 9 | 10 | EXPOSE 6379 11 | 12 | CMD ["/app/start_redis.sh"] 13 | -------------------------------------------------------------------------------- /self-serve-infrastructure/cats-and-dogs/backend/vote-db/start_redis.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Authenticate against Vault 3 | login_result=$(curl --request POST --data '{"role": "demo", "jwt": "'"${K8S_TOKEN}"'"}' ${VAULT_ADDR}/v1/auth/${VAULT_K8S_BACKEND}login) 4 | 5 | # Read cats-and-dogs secret from Vault 6 | vault_token=$(echo $login_result | python3 -c "import sys, json; print(json.load(sys.stdin)['auth']['client_token'])") 7 | 8 | cats_and_dogs=$(curl -H "X-Vault-Token:$vault_token" ${VAULT_ADDR}/v1/secret/${VAULT_USER}/kubernetes/cats-and-dogs) 9 | 10 | redis_pwd=$(echo $cats_and_dogs | python3 -c "import sys, json; print(json.load(sys.stdin)['data']['redis_pwd'])") 11 | echo "redis_pwd is: $redis_pwd" 12 | redis-server --requirepass $redis_pwd 13 | -------------------------------------------------------------------------------- /self-serve-infrastructure/cats-and-dogs/frontend/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tiangolo/uwsgi-nginx-flask:python3.6 2 | 3 | RUN pip install redis hvac 4 | 5 | ADD /azure-vote /app 6 | -------------------------------------------------------------------------------- /self-serve-infrastructure/cats-and-dogs/frontend/azure-vote/config_file.cfg: -------------------------------------------------------------------------------- 1 | # UI Configurations 2 | TITLE = 'Pets Voting App' 3 | VOTE1VALUE = 'Cats' 4 | VOTE2VALUE = 'Dogs' 5 | SHOWHOST = 'false' 6 | -------------------------------------------------------------------------------- /self-serve-infrastructure/cats-and-dogs/frontend/azure-vote/static/default.css: -------------------------------------------------------------------------------- 1 | body { 2 | background-color:#F8F8F8; 3 | } 4 | 5 | div#container { 6 | margin-top:5%; 7 | } 8 | 9 | div#space { 10 | display:block; 11 | margin: 0 auto; 12 | width: 500px; 13 | height: 10px; 14 | 15 | } 16 | 17 | div#logo { 18 | display:block; 19 | margin: 0 auto; 20 | width: 500px; 21 | text-align: center; 22 | font-size:30px; 23 | font-family:Helvetica; 24 | /*border-bottom: 1px solid black;*/ 25 | } 26 | 27 | div#form { 28 | padding: 20px; 29 | padding-right: 20px; 30 | padding-top: 20px; 31 | display:block; 32 | margin: 0 auto; 33 | width: 500px; 34 | text-align: center; 35 | font-size:30px; 36 | font-family:Helvetica; 37 | border-bottom: 1px solid black; 38 | border-top: 1px solid black; 39 | } 40 | 41 | div#results { 42 | display:block; 43 | margin: 0 auto; 44 | width: 500px; 45 | text-align: center; 46 | font-size:30px; 47 | font-family:Helvetica; 48 | } 49 | 50 | .button { 51 | background-color: #4CAF50; /* Green */ 52 | border: none; 53 | color: white; 54 | padding: 16px 32px; 55 | text-align: center; 56 | text-decoration: none; 57 | display: inline-block; 58 | font-size: 16px; 59 | margin: 4px 2px; 60 | -webkit-transition-duration: 0.4s; /* Safari */ 61 | transition-duration: 0.4s; 62 | cursor: pointer; 63 | width: 250px; 64 | } 65 | 66 | .button1 { 67 | background-color: white; 68 | color: black; 69 | border: 2px solid #008CBA; 70 | } 71 | 72 | .button1:hover { 73 | background-color: #008CBA; 74 | color: white; 75 | } 76 | .button2 { 77 | background-color: white; 78 | color: black; 79 | border: 2px solid #555555; 80 | } 81 | 82 | .button2:hover { 83 | background-color: #555555; 84 | color: white; 85 | } 86 | 87 | .button3 { 88 | background-color: white; 89 | color: black; 90 | border: 2px solid #f44336; 91 | } 92 | 93 | .button3:hover { 94 | background-color: #f44336; 95 | color: white; 96 | } -------------------------------------------------------------------------------- /self-serve-infrastructure/cats-and-dogs/frontend/azure-vote/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | {{title}} 6 | 7 | 11 | 12 | 13 | 14 |
15 |
16 | 17 |
18 |
19 | 20 | 21 | 22 |
23 |
24 |
{{button1}} - {{ value1 }} | {{button2}} - {{ value2 }}
25 | 26 |
27 |
28 | 29 | -------------------------------------------------------------------------------- /self-serve-infrastructure/getting-started/README.md: -------------------------------------------------------------------------------- 1 | This directory includes some examples of provisioning networking infrastructure in AWS, Azure, and Google. 2 | -------------------------------------------------------------------------------- /self-serve-infrastructure/getting-started/terraform-aws/_interface.tf: -------------------------------------------------------------------------------- 1 | variable "region" { 2 | default = "" 3 | description = "The default AZ to provision to for the provider" 4 | } 5 | 6 | variable "vpc_cidr_block" { 7 | default = "" 8 | description = "The default CIDR block for the VPC demo" 9 | } 10 | 11 | variable "subnet_cidr_block" { 12 | default = "" 13 | description = "The default CIDR block for the subnet demo" 14 | } 15 | 16 | variable "subnet_availability_zone" { 17 | default = "" 18 | description = "The default AZ for the subnet" 19 | } 20 | -------------------------------------------------------------------------------- /self-serve-infrastructure/getting-started/terraform-aws/main.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = "${var.region}" 3 | } 4 | 5 | resource "aws_vpc" "demo_vpc" { 6 | cidr_block = "${var.vpc_cidr_block}" 7 | 8 | tags { 9 | Name = "fp_demo_vpc" 10 | } 11 | } 12 | 13 | resource "aws_subnet" "demo_subnet" { 14 | vpc_id = "${aws_vpc.demo_vpc.id}" 15 | cidr_block = "${var.subnet_cidr_block}" 16 | availability_zone = "${var.subnet_availability_zone}" 17 | 18 | tags { 19 | Name = "fp_demo_subnet" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /self-serve-infrastructure/getting-started/terraform-aws/outputs.tf: -------------------------------------------------------------------------------- 1 | output "vpc_id_consumable" { 2 | value = "${aws_vpc.demo_vpc.id}" 3 | description = "This is the VPC ID for later use" 4 | } 5 | 6 | output "demo_subnet_id" { 7 | value = "${aws_subnet.demo_subnet.id}" 8 | description = "This is the Subnet ID for later use" 9 | } 10 | -------------------------------------------------------------------------------- /self-serve-infrastructure/getting-started/terraform-aws/terraform.auto.tfvars: -------------------------------------------------------------------------------- 1 | # name = "self-serve-getting-started-override" # Override "name" variable default 2 | # instance_type = "t2.small" # Override "instance_type" variable default 3 | # tags = { foo = "bar" fizz = "buzz" } # Override "tags" variable default 4 | -------------------------------------------------------------------------------- /self-serve-infrastructure/getting-started/terraform-azure/_interface.tf: -------------------------------------------------------------------------------- 1 | variable "rg_name" { 2 | default = "" 3 | description = "The default name for the Resource Group" 4 | } 5 | 6 | variable "rg_location" { 7 | default = "" 8 | description = "The default name for the Resource Group" 9 | } 10 | 11 | variable "vn_name" { 12 | default = "" 13 | description = "The default name for the Virtual Network" 14 | } 15 | 16 | variable "vn_address_space" { 17 | default = "" 18 | description = "The default address space for the Virtual Network" 19 | } 20 | 21 | variable "sb_name" { 22 | default = "" 23 | description = "The default name for the subnet" 24 | } 25 | 26 | variable "sb_address_prefix" { 27 | default = "" 28 | description = "The default address prefix for the Subnet" 29 | } 30 | -------------------------------------------------------------------------------- /self-serve-infrastructure/getting-started/terraform-azure/main.tf: -------------------------------------------------------------------------------- 1 | provider "azurerm" { 2 | subscription_id = "" 3 | client_id = "" 4 | client_secret = "" 5 | tenant_id = "" 6 | } 7 | 8 | resource "azurerm_resource_group" "demo_resource_group" { 9 | name = "${var.rg_name}" 10 | location = "${var.rg_location}" 11 | } 12 | 13 | resource "azurerm_virtual_network" "demo_virtual_network" { 14 | name = "${var.vn_name}" 15 | address_space = ["${var.vn_address_space}"] 16 | location = "${azurerm_resource_group.demo_resource_group.location}" 17 | resource_group_name = "${azurerm_resource_group.demo_resource_group.name}" 18 | } 19 | 20 | resource "azurerm_subnet" "demo_subnet" { 21 | name = "${var.sb_name}" 22 | resource_group_name = "${azurerm_resource_group.demo_virtual_network.name}" 23 | virtual_network_name = "${azurerm_virtual_network.demo_virtual_network.name}" 24 | address_prefix = "${var.sb_address_prefix}" 25 | } 26 | -------------------------------------------------------------------------------- /self-serve-infrastructure/getting-started/terraform-azure/outputs.tf: -------------------------------------------------------------------------------- 1 | output "resource_group_consumable" { 2 | value = "${azurerm_resource_group.demo_resource_group.name}" 3 | description = "The Demo VPC Name for later use" 4 | } 5 | 6 | output "virtual_network_consumable_name" { 7 | value = "${azurerm_virtual_network.demo_virtual_network.name}" 8 | description = "The Demo Virtaul Network name for later use" 9 | } 10 | 11 | output "virtual_network_consumable_address_space" { 12 | value = "${azurerm_virtual_network.demo_virtual_network.address_space}" 13 | description = "The Demo Virtaul Network address space for later use" 14 | } 15 | 16 | output "subnet_consumable" { 17 | value = "${azurerm_subnet.demo_subnet.address_prefix}" 18 | description = "The Demo Subnet for later use" 19 | } 20 | -------------------------------------------------------------------------------- /self-serve-infrastructure/getting-started/terraform-azure/terraform.auto.tfvars: -------------------------------------------------------------------------------- 1 | # name = "self-serve-getting-started-override" # Override "name" variable default 2 | # network_location = "westus" # Override "network_location" variable default 3 | # compute_location = "West US 2" # Override "compute_location" variable default 4 | # tags = { foo = "bar" fizz = "buzz" } # Override "tags" variable default 5 | -------------------------------------------------------------------------------- /self-serve-infrastructure/getting-started/terraform-gcp/_interface.tf: -------------------------------------------------------------------------------- 1 | variable "gn_name" { 2 | default = "" 3 | description = "The default name for the Compute Network" 4 | } 5 | 6 | variable "sn_name" { 7 | default = "" 8 | description = "The default name for the subnet" 9 | } 10 | 11 | variable "sn_region" { 12 | default = "" 13 | description = "The default region for the subnet" 14 | } 15 | 16 | variable "sn_cidr_range" { 17 | default = "" 18 | description = "The default Subnet Cidr Range" 19 | } 20 | -------------------------------------------------------------------------------- /self-serve-infrastructure/getting-started/terraform-gcp/main.tf: -------------------------------------------------------------------------------- 1 | provider "google" { 2 | project = "terraform-gcp-module-test" 3 | region = "us-central1" 4 | project = "terraform-gcp-module-test" 5 | } 6 | 7 | resource "google_compute_network" "demo_network" { 8 | name = "${var.gn_name}" 9 | auto_create_subnetworks = "false" 10 | } 11 | 12 | resource "google_compute_subnetwork" "demo_subnetwork" { 13 | network = "${google_compute_network.demo_network.name}" 14 | name = "${var.sn_name}" 15 | region = "${var.sn_region}" 16 | ip_cidr_range = "${var.sn_cidr_range}" 17 | } 18 | -------------------------------------------------------------------------------- /self-serve-infrastructure/getting-started/terraform-gcp/outputs.tf: -------------------------------------------------------------------------------- 1 | output "compute_network_consumable" { 2 | value = "${google_compute_network.demo_network.name}" 3 | description = "The Network Name" 4 | } 5 | 6 | output "subnetwork_consumable_name" { 7 | value = "${google_compute_subnetwork.demo_subnetwork.name}" 8 | description = "The Subnet Name" 9 | } 10 | 11 | output "subnetwork_consumable_ip_cidr_range" { 12 | value = "${google_compute_subnetwork.demo_subnetwork.ip_cidr_range}" 13 | description = "The default Cidr Range" 14 | } 15 | -------------------------------------------------------------------------------- /self-serve-infrastructure/getting-started/terraform-gcp/terraform.auto.tfvars: -------------------------------------------------------------------------------- 1 | # name = "self-serve-getting-started-override" # Override "name" variable default 2 | # region = "us-west1" # Override "region" variable default 3 | # zone = "us-west1-a" # Override "zone" variable default 4 | # service_port = "80" # Override "service_port" variable default 5 | # tags = { foo = "bar" fizz = "buzz" } # Override "tags" variable default 6 | -------------------------------------------------------------------------------- /self-serve-infrastructure/k8s-services-openshift/cats-and-dogs-secret-name: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jboero/terraform-guides/79c183f76f786ab00bc2d1a6a995dc20295de6e8/self-serve-infrastructure/k8s-services-openshift/cats-and-dogs-secret-name -------------------------------------------------------------------------------- /self-serve-infrastructure/k8s-services-openshift/cats-and-dogs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: cats-and-dogs 5 | namespace: cats-and-dogs 6 | -------------------------------------------------------------------------------- /self-serve-infrastructure/k8s-services-openshift/openshift.tfvars.example: -------------------------------------------------------------------------------- 1 | tfe_organization = "" 2 | k8s_cluster_workspace = "k8s-cluster-openshift" 3 | private_key_data= "" 4 | -------------------------------------------------------------------------------- /self-serve-infrastructure/k8s-services-openshift/outputs.tf: -------------------------------------------------------------------------------- 1 | output "cats_and_dogs_dns" { 2 | value = "http://cats-and-dogs-frontend.${data.terraform_remote_state.k8s_cluster.master_public_ip}.xip.io" 3 | } 4 | -------------------------------------------------------------------------------- /self-serve-infrastructure/k8s-services-openshift/variables.tf: -------------------------------------------------------------------------------- 1 | variable "tfe_organization" { 2 | description = "TFE organization" 3 | } 4 | 5 | variable "k8s_cluster_workspace" { 6 | description = "workspace to use for the k8s cluster" 7 | } 8 | 9 | variable "private_key_data" { 10 | description = "contents of the private key" 11 | } 12 | -------------------------------------------------------------------------------- /self-serve-infrastructure/k8s-services/outputs.tf: -------------------------------------------------------------------------------- 1 | output "cats_and_dogs_ip" { 2 | value = "${kubernetes_service.cats-and-dogs-frontend.load_balancer_ingress.0.ip}" 3 | } 4 | -------------------------------------------------------------------------------- /self-serve-infrastructure/k8s-services/variables.tf: -------------------------------------------------------------------------------- 1 | variable "tfe_organization" { 2 | description = "TFE organization" 3 | default = "RogerBerlind" 4 | } 5 | 6 | variable "k8s_cluster_workspace" { 7 | description = "workspace to use for the k8s cluster" 8 | } 9 | 10 | variable "k8s_vault_config_workspace" { 11 | description = "workspace to use for the vault configuration" 12 | } 13 | 14 | variable "frontend_image" { 15 | default = "rberlind/cats-and-dogs-frontend:k8s-auth" 16 | description = "Docker image location of the frontend app" 17 | } 18 | 19 | variable "backend_image" { 20 | default = "rberlind/cats-and-dogs-backend:k8s-auth" 21 | description = "Docker image location of the frontend app" 22 | } 23 | --------------------------------------------------------------------------------