├── .circleci └── config.yml ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── pull_request_template.md ├── .gitignore ├── CODEOWNERS ├── LICENSE ├── NOTICE ├── README.md ├── _docs ├── architecture-load-balancer.png └── architecture.png ├── examples ├── root-example │ ├── README.md │ ├── startup-script-consul.sh │ └── startup-script-vault.sh ├── vault-cluster-authentication-gce │ ├── README.md │ ├── images │ │ └── gce_auth.svg │ ├── main.tf │ ├── outputs.tf │ ├── startup-script-client.sh │ ├── startup-script-consul.sh │ ├── startup-script-vault.sh │ └── variables.tf ├── vault-cluster-authentication-iam │ ├── README.md │ ├── images │ │ └── iam_auth.svg │ ├── main.tf │ ├── outputs.tf │ ├── startup-script-client.sh │ ├── startup-script-consul.sh │ ├── startup-script-vault.sh │ └── variables.tf ├── vault-cluster-enterprise │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── startup-script-consul.sh │ ├── startup-script-vault-enterprise.sh │ └── variables.tf ├── vault-cluster-private-with-public-lb │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── startup-script-consul.sh │ ├── startup-script-vault.sh │ └── variables.tf ├── vault-cluster-private │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── startup-script-consul.sh │ ├── startup-script-vault.sh │ └── variables.tf ├── vault-consul-image │ ├── README.md │ ├── tls │ │ ├── README.md │ │ ├── ca.crt.pem │ │ ├── vault.crt.pem │ │ └── vault.key.pem │ └── vault-consul.json └── vault-examples-helper │ ├── README.md │ └── vault-examples-helper.sh ├── main.tf ├── modules ├── install-nginx │ ├── README.md │ ├── install-nginx │ └── nginx_signing.key ├── install-vault │ ├── README.md │ ├── install-vault │ ├── supervisor-initd-script.sh │ └── supervisord.conf ├── private-tls-cert │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── run-nginx │ ├── README.md │ └── run-nginx ├── run-vault │ ├── README.md │ └── run-vault ├── update-certificate-store │ ├── README.md │ └── update-certificate-store ├── vault-cluster │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ └── variables.tf └── vault-lb-fr │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── outputs.tf ├── test ├── Gopkg.lock ├── Gopkg.toml ├── README.md ├── terratest_helpers.go ├── tls_helpers.go ├── vault_cluster_auth_test.go ├── vault_cluster_enterprise_test.go ├── vault_cluster_private_test.go ├── vault_cluster_public_test.go ├── vault_helpers.go └── vault_main_test.go └── variables.tf /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | workspace_root: &workspace_root 2 | /go/src/github.com/hashicorp/terraform-google-vault 3 | 4 | defaults: &defaults 5 | working_directory: *workspace_root 6 | docker: 7 | - image: docker.mirror.hashicorp.services/gruntwork/circle-ci-test-image-base:go1.11 8 | 9 | version: 2 10 | jobs: 11 | build: 12 | <<: *defaults 13 | steps: 14 | - checkout 15 | - attach_workspace: 16 | at: *workspace_root 17 | - restore_cache: 18 | keys: 19 | - dep-{{ checksum "test/Gopkg.lock" }} 20 | - run: configure-environment-for-gruntwork-module --go-src-path test --use-go-dep --circle-ci-2 21 | - save_cache: 22 | key: dep-{{ checksum "test/Gopkg.lock" }} 23 | paths: 24 | - /go/src/github.com/hashicorp/terraform-google-vault/test/vendor 25 | - persist_to_workspace: 26 | root: *workspace_root 27 | paths: test/vendor 28 | test: 29 | <<: *defaults 30 | steps: 31 | - checkout 32 | - run: echo 'export PATH=$HOME/terraform:$HOME/packer:$PATH' >> $BASH_ENV 33 | - attach_workspace: 34 | at: *workspace_root 35 | # Write service account creds to disk where the API expects them 36 | - run: mkdir -p /tmp/logs 37 | - run: | 38 | export GOOGLE_APPLICATION_CREDENTIALS="/tmp/gcloud.json" && \ 39 | echo $GCLOUD_SERVICE_KEY > /tmp/gcloud.json && \ 40 | run-go-tests --circle-ci-2 --path test | tee /tmp/logs/all.log 41 | - run: 42 | command: terratest_log_parser --testlog /tmp/logs/all.log --outputdir /tmp/logs 43 | when: always 44 | - store_artifacts: 45 | path: /tmp/logs 46 | - store_test_results: 47 | path: /tmp/logs 48 | 49 | workflows: 50 | version: 2 51 | build-and-test: 52 | jobs: 53 | - build 54 | - test: 55 | requires: 56 | - build 57 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a bug report to help us improve. 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | 14 | 15 | **Describe the bug** 16 | A clear and concise description of what the bug is. 17 | 18 | **To Reproduce** 19 | Steps to reproduce the behavior including the relevant Terraform/Terragrunt/Packer version number and any code snippets and module inputs you used. 20 | 21 | ```hcl 22 | // paste code snippets here 23 | ``` 24 | 25 | **Expected behavior** 26 | A clear and concise description of what you expected to happen. 27 | 28 | **Nice to have** 29 | - [ ] Terminal output 30 | - [ ] Screenshots 31 | 32 | **Additional context** 33 | Add any other context about the problem here. 34 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Submit a feature request for this repo. 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | 14 | 15 | **Describe the solution you'd like** 16 | A clear and concise description of what you want to happen. 17 | 18 | **Describe alternatives you've considered** 19 | A clear and concise description of any alternative solutions or features you've considered. 20 | 21 | **Additional context** 22 | Add any other context or screenshots about the feature request here. 23 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | 6 | 7 | ## Description 8 | 9 | 10 | 11 | ### Documentation 12 | 13 | 21 | 22 | 23 | 24 | ## TODOs 25 | 26 | Please ensure all of these TODOs are completed before asking for a review. 27 | 28 | - [ ] Ensure the branch is named correctly with the issue number. e.g: `feature/new-vpc-endpoints-955` or `bug/missing-count-param-434`. 29 | - [ ] Update the docs. 30 | - [ ] Keep the changes backward compatible where possible. 31 | - [ ] Run the pre-commit checks successfully. 32 | - [ ] Run the relevant tests successfully. 33 | - [ ] Ensure any 3rd party code adheres with our [license policy](https://www.notion.so/gruntwork/Gruntwork-licenses-and-open-source-usage-policy-f7dece1f780341c7b69c1763f22b1378) or delete this line if its not applicable. 34 | 35 | 36 | ## Related Issues 37 | 38 | 44 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Terraform files 2 | .terraform 3 | terraform.tfstate 4 | terraform.tfvars 5 | *.tfstate* 6 | .test-data 7 | 8 | # OS X files 9 | .history 10 | .DS_Store 11 | 12 | # IntelliJ files 13 | .idea_modules 14 | *.iml 15 | *.iws 16 | *.ipr 17 | .idea/ 18 | build/ 19 | */build/ 20 | out/ 21 | 22 | # Go best practices dictate that libraries should not include the vendor directory 23 | vendor 24 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @bwhaley @josh-padnick @robmorgan 2 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | terraform-google-vault 2 | Copyright 2017 Gruntwork, Inc. 3 | 4 | This product includes software developed at Gruntwork (http://www.gruntwork.io/). -------------------------------------------------------------------------------- /_docs/architecture-load-balancer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/terraform-google-vault/01cac9c9c1dbfb12a745a910a1e61da091bb4dea/_docs/architecture-load-balancer.png -------------------------------------------------------------------------------- /_docs/architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/terraform-google-vault/01cac9c9c1dbfb12a745a910a1e61da091bb4dea/_docs/architecture.png -------------------------------------------------------------------------------- /examples/root-example/README.md: -------------------------------------------------------------------------------- 1 | # Root Example 2 | 3 | This folder contains files for the example Terraform configuration contained in the "root" of this repo. 4 | 5 | That example deploys a publicly accessible [Vault](https://www.vaultproject.io/) cluster in [GCP](https://cloud.google.com/) 6 | using the [vault-cluster](https://github.com/hashicorp/terraform-google-vault/tree/master/modules/vault-cluster) module. For an example of a private Vault cluster that is accessible 7 | only from inside the Google Cloud VPC, see [vault-cluster-private](https://github.com/hashicorp/terraform-google-vault/tree/master/examples/vault-cluster-private). **Do NOT use this 8 | example in a production setting. Deploying Vault as a publicly accessible cluster is not recommended in production; we 9 | do it here only to provide a convenient quick start experience.**. 10 | 11 | The Vault cluster uses [Consul](https://www.consul.io/) as a storage backend, so this example also deploys a separate 12 | Consul server cluster using the [consul-cluster module]( 13 | https://github.com/hashicorp/terraform-google-consul/tree/master/modules/consul-cluster) from the Consul GCP Module. 14 | 15 | You will need to create a [Google Image](https://cloud.google.com/compute/docs/images) that has Vault and Consul 16 | installed, which you can do using the [vault-consul-image example](https://github.com/hashicorp/terraform-google-vault/tree/master/examples/vault-consul-image)). 17 | 18 | For more info on how the Vault cluster works, check out the [vault-cluster](https://github.com/hashicorp/terraform-google-vault/tree/master/modules/vault-cluster) documentation. 19 | 20 | 21 | ## Quick start 22 | 23 | To deploy a Vault Cluster: 24 | 25 | 1. `git clone` this repo to your computer. 26 | 1. Build a Vault and Consul Google Image. See the [vault-consul-image example](https://github.com/hashicorp/terraform-google-vault/tree/master/examples/vault-consul-image) documentation 27 | for instructions. Make sure to note down the ID of the Google Image. 28 | 1. Install [Terraform](https://www.terraform.io/). 29 | 1. Make sure you local environment is authenticated to Google Cloud. 30 | 1. Open `variables.tf` and fill in any variables that don't have a default, including putting your Google Image ID into 31 | the `vault_source_image` and `consul_server_source_image` variables. 32 | 1. Run `terraform init`. 33 | 1. Run `terraform plan`. 34 | 1. If the plan looks good, run `terraform apply`. 35 | 1. Run the [vault-examples-helper.sh script](https://github.com/hashicorp/terraform-google-vault/tree/master/examples/vault-examples-helper/vault-examples-helper.sh) to 36 | print out the names and IP addresses of the Vault servers and some example commands you can run to interact with the 37 | cluster: `../vault-examples-helper/vault-examples-helper.sh`. 38 | 39 | To see how to connect to the Vault cluster, initialize it, and start reading and writing secrets, head over to the 40 | [How do you use the Vault cluster?](https://github.com/hashicorp/terraform-google-vault/tree/master/modules/vault-cluster#how-do-you-use-the-vault-cluster) docs. 41 | -------------------------------------------------------------------------------- /examples/root-example/startup-script-consul.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script is meant to be run as the Startup Script of each Compute Instance while it's booting. The script uses the 3 | # run-consul script to configure and start Consul in server mode. This script assumes it's running in a Compute Instance 4 | # based on a Google Image built from the Packer template in https://github.com/hashicorp/terraform-google-consul at 5 | # /examples/consul-image. 6 | 7 | set -e 8 | 9 | # Send the log output from this script to startup-script.log, syslog, and the console 10 | # Inspired by https://alestic.com/2010/12/ec2-user-data-output/ 11 | exec > >(tee /var/log/startup-script.log|logger -t startup-script -s 2>/dev/console) 2>&1 12 | 13 | # Note that any variables below with are expected to be interpolated by Terraform. 14 | /opt/consul/bin/run-consul --server --cluster-tag-name "${cluster_tag_name}" 15 | -------------------------------------------------------------------------------- /examples/root-example/startup-script-vault.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script is meant to be run as the Startup Script of each Compute Instance while it's booting. The script uses the 3 | # run-consul and run-vault scripts to configure and start both Vault and Consul in client mode. This script assumes it's 4 | # running in a Compute Instance based on a Google Image built from the Packer template in 5 | # examples/vault-consul-image/vault-consul.json. 6 | 7 | set -e 8 | 9 | # Send the log output from this script to startup-script.log, syslog, and the console 10 | # Inspired by https://alestic.com/2010/12/ec2-user-data-output/ 11 | exec > >(tee /var/log/startup-script.log|logger -t startup-script -s 2>/dev/console) 2>&1 12 | 13 | # The Packer template puts the TLS certs in these file paths 14 | readonly VAULT_TLS_CERT_FILE="/opt/vault/tls/vault.crt.pem" 15 | readonly VAULT_TLS_KEY_FILE="/opt/vault/tls/vault.key.pem" 16 | 17 | # Note that any variables below with are expected to be interpolated by Terraform. 18 | /opt/consul/bin/run-consul --client --cluster-tag-name "${consul_cluster_tag_name}" 19 | /opt/vault/bin/run-vault --gcs-bucket ${vault_cluster_tag_name} --tls-cert-file "$VAULT_TLS_CERT_FILE" --tls-key-file "$VAULT_TLS_KEY_FILE" ${enable_vault_ui} -------------------------------------------------------------------------------- /examples/vault-cluster-authentication-gce/outputs.tf: -------------------------------------------------------------------------------- 1 | output "gcp_project_id" { 2 | value = var.gcp_project_id 3 | } 4 | 5 | output "gcp_region" { 6 | value = var.gcp_region 7 | } 8 | 9 | output "vault_cluster_size" { 10 | value = var.vault_cluster_size 11 | } 12 | 13 | output "cluster_tag_name" { 14 | value = module.vault_cluster.cluster_tag_name 15 | } 16 | 17 | output "web_client_name" { 18 | value = var.web_client_name 19 | } 20 | 21 | output "web_client_public_ip" { 22 | value = google_compute_instance.web_client.network_interface[0].access_config[0].nat_ip 23 | } 24 | 25 | output "instance_group_id" { 26 | value = module.vault_cluster.instance_group_id 27 | } 28 | 29 | output "instance_group_url" { 30 | value = module.vault_cluster.instance_group_url 31 | } 32 | 33 | output "instance_template_url" { 34 | value = module.vault_cluster.instance_template_url 35 | } 36 | 37 | output "firewall_rule_allow_intracluster_vault_id" { 38 | value = module.vault_cluster.firewall_rule_allow_intracluster_vault_id 39 | } 40 | 41 | output "firewall_rule_allow_intracluster_vault_url" { 42 | value = module.vault_cluster.firewall_rule_allow_intracluster_vault_url 43 | } 44 | 45 | output "firewall_rule_allow_inbound_api_id" { 46 | value = module.vault_cluster.firewall_rule_allow_inbound_api_id 47 | } 48 | 49 | output "firewall_rule_allow_inbound_api_url" { 50 | value = module.vault_cluster.firewall_rule_allow_inbound_api_url 51 | } 52 | 53 | output "firewall_rule_allow_inbound_health_check_id" { 54 | value = module.vault_cluster.firewall_rule_allow_inbound_health_check_id 55 | } 56 | 57 | output "firewall_rule_allow_inbound_health_check_url" { 58 | value = module.vault_cluster.firewall_rule_allow_inbound_health_check_url 59 | } 60 | 61 | output "bucket_name_id" { 62 | value = module.vault_cluster.bucket_name_id 63 | } 64 | 65 | output "bucket_name_url" { 66 | value = module.vault_cluster.bucket_name_url 67 | } 68 | 69 | -------------------------------------------------------------------------------- /examples/vault-cluster-authentication-gce/startup-script-client.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script is meant to be run as the Startup Script of a Compute Instance 3 | # while it's booting. Afterwards it performs the necessary api requests to login 4 | # to a Vault cluster. At the end it also serves a simple webserver with a message 5 | # read from Vault, for test purposes, so we can curl the response and test that 6 | # the authentication example is working as expected. 7 | # 8 | # This script assumes it's running in a Compute Instance based on a Google Image 9 | # built from the Packer template in examples/vault-consul-image/vault-consul.json. 10 | # 11 | # For more information about GCP auth, please refer to https://www.vaultproject.io/docs/auth/gcp.html 12 | # ========================================================================== 13 | 14 | set -e 15 | 16 | # Send the log output from this script to startup-script.log, syslog, and the console 17 | # Inspired by https://alestic.com/2010/12/ec2-user-data-output/ 18 | exec > >(tee /var/log/startup-script.log|logger -t startup-script -s 2>/dev/console) 2>&1 19 | 20 | # Note that any variables below with are expected to be interpolated by Terraform. 21 | /opt/consul/bin/run-consul --client --cluster-tag-name "${consul_cluster_tag_name}" 22 | 23 | 24 | # Log the given message. All logs are written to stderr with a timestamp. 25 | function log { 26 | local -r message="$1" 27 | local readonly timestamp=$(date +"%Y-%m-%d %H:%M:%S") 28 | >&2 echo -e "$timestamp $message" 29 | } 30 | 31 | # A retry function that attempts to run a command a number of times and returns the output 32 | function retry { 33 | local -r cmd="$1" 34 | local -r description="$2" 35 | 36 | for i in $(seq 1 30); do 37 | log "$description" 38 | 39 | # The boolean operations with the exit status are there to temporarily circumvent the "set -e" at the 40 | # beginning of this script which exits the script immediatelly for error status while not losing the exit status code 41 | output=$(eval "$cmd") && exit_status=0 || exit_status=$? 42 | log "$output" 43 | if [[ $exit_status -eq 0 ]]; then 44 | echo "$output" 45 | return 46 | fi 47 | log "$description failed. Will sleep for 10 seconds and try again." 48 | sleep 10 49 | done; 50 | 51 | log "$description failed after 30 attempts." 52 | exit $exit_status 53 | } 54 | 55 | # ========================================================================== 56 | # BEGIN GCP GCE AUTH EXAMPLE 57 | # ========================================================================== 58 | # Getting the signed JWT token from instance metadata 59 | # ========================================================================== 60 | # `example_role_name is being filled by terraform, it should be the same name 61 | # used to create the Vault Role when configuring the authentication on the Vault 62 | # server. In this example we are using the default project service account, to 63 | # fetch the necessary credentials. If you wish to use a different service account, 64 | # then the service account email should be used instead of "default". 65 | SERVICE_ACCOUNT="default" 66 | JWT_TOKEN=$(curl \ 67 | --fail \ 68 | --header "Metadata-Flavor: Google" \ 69 | --get \ 70 | --data-urlencode "audience=vault/${example_role_name}" \ 71 | --data-urlencode "format=full" \ 72 | "http://metadata/computeMetadata/v1/instance/service-accounts/$SERVICE_ACCOUNT/identity") 73 | 74 | log $JWT_TOKEN 75 | 76 | # ========================================================================== 77 | # Login 78 | # ========================================================================== 79 | # In this example, we are using the HTTP API to login and read secrets from vault, 80 | # although the vault cli tool could also have been used. The vault cli tool makes 81 | # this process easier by fetching the signed JWT token, needed for login, automatically. 82 | # We have used the vault cli tool in the example with the IAM auth method, which 83 | # you can find at /examples/vault-cluster-authentication-iam 84 | # For more information on GCP auth, check https://www.vaultproject.io/docs/auth/gcp.html#authentication 85 | LOGIN_PAYLOAD=$(cat < index.html 118 | python -m SimpleHTTPServer 8080 & 119 | -------------------------------------------------------------------------------- /examples/vault-cluster-authentication-gce/startup-script-consul.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script is meant to be run as the Startup Script of each Compute Instance while it's booting. The script uses the 3 | # run-consul script to configure and start Consul in server mode. This script assumes it's running in a Compute Instance 4 | # based on a Google Image built from the Packer template in https://github.com/hashicorp/terraform-google-consul at 5 | # /examples/consul-image. 6 | 7 | set -e 8 | 9 | # Send the log output from this script to startup-script.log, syslog, and the console 10 | # Inspired by https://alestic.com/2010/12/ec2-user-data-output/ 11 | exec > >(tee /var/log/startup-script.log|logger -t startup-script -s 2>/dev/console) 2>&1 12 | 13 | # Note that any variables below with are expected to be interpolated by Terraform. 14 | /opt/consul/bin/run-consul --server --cluster-tag-name "${cluster_tag_name}" 15 | -------------------------------------------------------------------------------- /examples/vault-cluster-authentication-gce/startup-script-vault.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script is meant to be run as the Startup Script of each Compute Instance 3 | # while it's booting. The script uses the run-consul and run-vault scripts to 4 | # configure and start Consul in client mode and Vault in server mode, and then, 5 | # after initializing and unsealing vault, it configures vault authentication and 6 | # writes an example that can be read by a client. This script assumes it's running 7 | # in a Compute Instance based on a Google Image built from the Packer template in 8 | # examples/vault-consul-image/vault-consul.json. 9 | # 10 | # For more information about GCP auth, please refer to https://www.vaultproject.io/docs/auth/gcp.html 11 | # ========================================================================== 12 | 13 | set -e 14 | 15 | # Send the log output from this script to startup-script.log, syslog, and the console 16 | # Inspired by https://alestic.com/2010/12/ec2-user-data-output/ 17 | exec > >(tee /var/log/startup-script.log|logger -t startup-script -s 2>/dev/console) 2>&1 18 | 19 | # The Packer template puts the TLS certs in these file paths 20 | readonly VAULT_TLS_CERT_FILE="/opt/vault/tls/vault.crt.pem" 21 | readonly VAULT_TLS_KEY_FILE="/opt/vault/tls/vault.key.pem" 22 | 23 | # Note that any variables below with are expected to be interpolated by Terraform. 24 | /opt/consul/bin/run-consul --client --cluster-tag-name "${consul_cluster_tag_name}" 25 | /opt/vault/bin/run-vault --gcs-bucket ${vault_cluster_tag_name} --tls-cert-file "$VAULT_TLS_CERT_FILE" --tls-key-file "$VAULT_TLS_KEY_FILE" ${enable_vault_ui} 26 | 27 | # Log the given message. All logs are written to stderr with a timestamp. 28 | function log { 29 | local -r message="$1" 30 | local readonly timestamp=$(date +"%Y-%m-%d %H:%M:%S") 31 | >&2 echo -e "$timestamp $message" 32 | } 33 | 34 | # A retry function that attempts to run a command a number of times and returns the output 35 | function retry { 36 | local -r cmd="$1" 37 | local -r description="$2" 38 | 39 | for i in $(seq 1 30); do 40 | log "$description" 41 | 42 | # The boolean operations with the exit status are there to temporarily circumvent the "set -e" at the 43 | # beginning of this script which exits the script immediatelly for error status while not losing the exit status code 44 | output=$(eval "$cmd") && exit_status=0 || exit_status=$? 45 | log "$output" 46 | if [[ $exit_status -eq 0 ]]; then 47 | echo "$output" 48 | return 49 | fi 50 | log "$description failed. Will sleep for 10 seconds and try again." 51 | sleep 10 52 | done; 53 | 54 | log "$description failed after 30 attempts." 55 | exit $exit_status 56 | } 57 | 58 | # Initializes a vault server 59 | # run-vault is running on the background and we have to wait for it to be done, 60 | # so in case this fails we retry. 61 | SERVER_OUTPUT=$(retry \ 62 | "/opt/vault/bin/vault operator init" \ 63 | "Trying to initialize vault") 64 | 65 | # The expected output should be similar to this: 66 | # ========================================================================== 67 | # Unseal Key 1: ddPRelXzh9BdgqIDqQO9K0ldtHIBmY9AqsTohM6zCRl7 68 | # Unseal Key 2: liSgypzdVrAxz73KbKyCMjVeSnRMuxCZMk1PWIZdjENS 69 | # Unseal Key 3: pmgeVu/fs8+jl8bOzf3Cq56BFufm4o7Sxt2oaUcvt6Dp 70 | # Unseal Key 4: i3W2xJEyUqUqcO1QSjTA+Ua0RUPxnNWM27AqaC8wW7Zh 71 | # Unseal Key 5: vHsQtCRgfblPeFYw1hhCVbji0MoNUP8zyIWhLWs3PebS 72 | # 73 | # Initial Root Token: cb076fc1-cc1f-6766-795f-b3822ba1ac57 74 | # 75 | # Vault initialized with 5 key shares and a key threshold of 3. Please securely 76 | # distribute the key shares printed above. When the Vault is re-sealed, 77 | # restarted, or stopped, you must supply at least 3 of these keys to unseal it 78 | # before it can start servicing requests. 79 | # 80 | # Vault does not store the generated master key. Without at least 3 key to 81 | # reconstruct the master key, Vault will remain permanently sealed! 82 | # 83 | # It is possible to generate new unseal keys, provided you have a quorum of 84 | # existing unseal keys shares. See "vault operator rekey" for more information. 85 | # ========================================================================== 86 | 87 | # Unseals the server with 3 keys from this output 88 | # Please note that this is not how it should be done in production as it is not 89 | # secure and and we are not storing any of the tokens, so in case it gets resealed, 90 | # the tokens are lost and we wouldn't be able to unseal it again. Normally, an 91 | # operator would SSH and unseal the server in each node manually or, ideally, it 92 | # should be auto unsealed https://www.vaultproject.io/docs/enterprise/auto-unseal/index.html 93 | # For this quick example specifically, we are just running one vault server and 94 | # unsealing it like this for simplicity as this example focuses on authentication 95 | # and not on unsealing. For a more detailed example on auto unsealing, check the 96 | # vault enterprise example at /examples/vault-cluster-enterprise 97 | FIRST_THREE_LINES=$(echo "$SERVER_OUTPUT" | head -n 3) 98 | UNSEAL_KEYS=$(echo "$FIRST_THREE_LINES" | awk '{ print $4; }') 99 | echo "$UNSEAL_KEYS" | xargs -l /opt/vault/bin/vault operator unseal 100 | 101 | # Exports the client token environment variable necessary for running the following vault commands 102 | SEVENTH_LINE=$(echo "$SERVER_OUTPUT" | head -n 7 | tail -n 1) 103 | export VAULT_TOKEN=$(echo "$SEVENTH_LINE" | awk '{ print $4; }') 104 | 105 | 106 | # ========================================================================== 107 | # BEGIN GCP GCE AUTH EXAMPLE 108 | # ========================================================================== 109 | # Auth methods must be configured in advance before users or machines can authenticate. 110 | 111 | # Enables authentication 112 | # This is an http request, and sometimes fails, hence we retry 113 | retry \ 114 | "/opt/vault/bin/vault auth enable gcp" \ 115 | "Trying to enable gcp authentication" 116 | 117 | # To be able to verify authentication attempts with the help of the Google API, 118 | # Vault needs to have access to a service account with the necessary roles. 119 | # In this example runs on a Google Compute Instance, which means that the credentials 120 | # are provided to Vault automatically. but the following command would be necessary 121 | # for using the GCP auth method outside of GCP, such as locally, for example. 122 | # 123 | # vault write auth/gcp/config credentials=@/path/to/credentials.json 124 | 125 | # Creates a policy that allows writing and reading from an "example_" prefix at "secret" backend 126 | /opt/vault/bin/vault policy write "example-policy" -< >(tee /var/log/startup-script.log|logger -t startup-script -s 2>/dev/console) 2>&1 21 | 22 | # Note that any variables below with are expected to be interpolated by Terraform. 23 | /opt/consul/bin/run-consul --client --cluster-tag-name "${consul_cluster_tag_name}" 24 | 25 | 26 | # Log the given message. All logs are written to stderr with a timestamp. 27 | function log { 28 | local -r message="$1" 29 | local readonly timestamp=$(date +"%Y-%m-%d %H:%M:%S") 30 | >&2 echo -e "$timestamp $message" 31 | } 32 | 33 | # A retry function that attempts to run a command a number of times and returns the output 34 | function retry { 35 | local -r cmd="$1" 36 | local -r description="$2" 37 | 38 | for i in $(seq 1 30); do 39 | log "$description" 40 | 41 | # The boolean operations with the exit status are there to temporarily circumvent the "set -e" at the 42 | # beginning of this script which exits the script immediatelly for error status while not losing the exit status code 43 | output=$(eval "$cmd") && exit_status=0 || exit_status=$? 44 | log "$output" 45 | if [[ $exit_status -eq 0 ]]; then 46 | echo "$output" 47 | return 48 | fi 49 | log "$description failed. Will sleep for 10 seconds and try again." 50 | sleep 10 51 | done; 52 | 53 | log "$description failed after 30 attempts." 54 | exit $exit_status 55 | } 56 | 57 | # Consul is being used as a service discovery mechanism, thanks to dnsmasq, so 58 | # this web client can locate the vault cluster through the following 59 | # private hostname: vault.service.consul 60 | # We can use the vault cli to reach the Google API to sign a JSON Web Token and 61 | # perform the authentication with a bound Service Account. 62 | # The Vault Role must be previously created and configured by a vault server 63 | LOGIN_OUTPUT=$(retry \ 64 | "vault login -method=gcp -address='https://vault.service.consul:8200' role='${example_role_name}' jwt_exp=15m project='${project_id}' service_account='${service_account_email}'" \ 65 | "Attempting to login to Vault") 66 | 67 | # After logging in, we can use the vault cli to make operations such as reading a secret 68 | RESPONSE_READ=$(retry \ 69 | "vault read -address=https://vault.service.consul:8200 secret/example_gruntwork" \ 70 | "Trying to read secret from vault") 71 | 72 | # Serves the answer in a web server so we can test that this auth client is 73 | # authenticating to vault and fetching data correctly 74 | echo $RESPONSE_READ | awk '{print $NF}' > index.html 75 | python -m SimpleHTTPServer 8080 & 76 | -------------------------------------------------------------------------------- /examples/vault-cluster-authentication-iam/startup-script-consul.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script is meant to be run as the Startup Script of each Compute Instance while it's booting. The script uses the 3 | # run-consul script to configure and start Consul in server mode. This script assumes it's running in a Compute Instance 4 | # based on a Google Image built from the Packer template in https://github.com/hashicorp/terraform-google-consul at 5 | # /examples/consul-image. 6 | 7 | set -e 8 | 9 | # Send the log output from this script to startup-script.log, syslog, and the console 10 | # Inspired by https://alestic.com/2010/12/ec2-user-data-output/ 11 | exec > >(tee /var/log/startup-script.log|logger -t startup-script -s 2>/dev/console) 2>&1 12 | 13 | # Note that any variables below with are expected to be interpolated by Terraform. 14 | /opt/consul/bin/run-consul --server --cluster-tag-name "${cluster_tag_name}" 15 | -------------------------------------------------------------------------------- /examples/vault-cluster-authentication-iam/startup-script-vault.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script is meant to be run as the Startup Script of each Compute Instance 3 | # while it's booting. The script uses the run-consul and run-vault scripts to 4 | # configure and start Consul in client mode and Vault in server mode, and then, 5 | # after initializing and unsealing vault, it configures vault authentication and 6 | # writes an example that can be read by a client. This script assumes it's running 7 | # in a Compute Instance based on a Google Image built from the Packer template in 8 | # examples/vault-consul-image/vault-consul.json. 9 | # 10 | # For more information about GCP auth, please refer to https://www.vaultproject.io/docs/auth/gcp.html 11 | # ========================================================================== 12 | 13 | set -e 14 | 15 | # Send the log output from this script to startup-script.log, syslog, and the console 16 | # Inspired by https://alestic.com/2010/12/ec2-user-data-output/ 17 | exec > >(tee /var/log/startup-script.log|logger -t startup-script -s 2>/dev/console) 2>&1 18 | 19 | # The Packer template puts the TLS certs in these file paths 20 | readonly VAULT_TLS_CERT_FILE="/opt/vault/tls/vault.crt.pem" 21 | readonly VAULT_TLS_KEY_FILE="/opt/vault/tls/vault.key.pem" 22 | 23 | # Note that any variables below with are expected to be interpolated by Terraform. 24 | /opt/consul/bin/run-consul --client --cluster-tag-name "${consul_cluster_tag_name}" 25 | /opt/vault/bin/run-vault --gcs-bucket ${vault_cluster_tag_name} --tls-cert-file "$VAULT_TLS_CERT_FILE" --tls-key-file "$VAULT_TLS_KEY_FILE" ${enable_vault_ui} 26 | 27 | # Log the given message. All logs are written to stderr with a timestamp. 28 | function log { 29 | local -r message="$1" 30 | local readonly timestamp=$(date +"%Y-%m-%d %H:%M:%S") 31 | >&2 echo -e "$timestamp $message" 32 | } 33 | 34 | # A retry function that attempts to run a command a number of times and returns the output 35 | function retry { 36 | local -r cmd="$1" 37 | local -r description="$2" 38 | 39 | for i in $(seq 1 30); do 40 | log "$description" 41 | 42 | # The boolean operations with the exit status are there to temporarily circumvent the "set -e" at the 43 | # beginning of this script which exits the script immediatelly for error status while not losing the exit status code 44 | output=$(eval "$cmd") && exit_status=0 || exit_status=$? 45 | log "$output" 46 | if [[ $exit_status -eq 0 ]]; then 47 | echo "$output" 48 | return 49 | fi 50 | log "$description failed. Will sleep for 10 seconds and try again." 51 | sleep 10 52 | done; 53 | 54 | log "$description failed after 30 attempts." 55 | exit $exit_status 56 | } 57 | 58 | # Initializes a vault server 59 | # run-vault is running on the background and we have to wait for it to be done, 60 | # so in case this fails we retry. 61 | SERVER_OUTPUT=$(retry \ 62 | "/opt/vault/bin/vault operator init" \ 63 | "Trying to initialize vault") 64 | 65 | # The expected output should be similar to this: 66 | # ========================================================================== 67 | # Unseal Key 1: ddPRelXzh9BdgqIDqQO9K0ldtHIBmY9AqsTohM6zCRl7 68 | # Unseal Key 2: liSgypzdVrAxz73KbKyCMjVeSnRMuxCZMk1PWIZdjENS 69 | # Unseal Key 3: pmgeVu/fs8+jl8bOzf3Cq56BFufm4o7Sxt2oaUcvt6Dp 70 | # Unseal Key 4: i3W2xJEyUqUqcO1QSjTA+Ua0RUPxnNWM27AqaC8wW7Zh 71 | # Unseal Key 5: vHsQtCRgfblPeFYw1hhCVbji0MoNUP8zyIWhLWs3PebS 72 | # 73 | # Initial Root Token: cb076fc1-cc1f-6766-795f-b3822ba1ac57 74 | # 75 | # Vault initialized with 5 key shares and a key threshold of 3. Please securely 76 | # distribute the key shares printed above. When the Vault is re-sealed, 77 | # restarted, or stopped, you must supply at least 3 of these keys to unseal it 78 | # before it can start servicing requests. 79 | # 80 | # Vault does not store the generated master key. Without at least 3 key to 81 | # reconstruct the master key, Vault will remain permanently sealed! 82 | # 83 | # It is possible to generate new unseal keys, provided you have a quorum of 84 | # existing unseal keys shares. See "vault operator rekey" for more information. 85 | # ========================================================================== 86 | 87 | # Unseals the server with 3 keys from this output 88 | # Please note that this is not how it should be done in production as it is not 89 | # secure and and we are not storing any of the tokens, so in case it gets resealed, 90 | # the tokens are lost and we wouldn't be able to unseal it again. Normally, an 91 | # operator would SSH and unseal the server in each node manually or, ideally, it 92 | # should be auto unsealed https://www.vaultproject.io/docs/enterprise/auto-unseal/index.html 93 | # For this quick example specifically, we are just running one vault server and 94 | # unsealing it like this for simplicity as this example focuses on authentication 95 | # and not on unsealing. For a more detailed example on auto unsealing, check the 96 | # vault enterprise example at /examples/vault-cluster-enterprise 97 | FIRST_THREE_LINES=$(echo "$SERVER_OUTPUT" | head -n 3) 98 | UNSEAL_KEYS=$(echo "$FIRST_THREE_LINES" | awk '{ print $4; }') 99 | echo "$UNSEAL_KEYS" | xargs -l /opt/vault/bin/vault operator unseal 100 | 101 | # Exports the client token environment variable necessary for running the following vault commands 102 | SEVENTH_LINE=$(echo "$SERVER_OUTPUT" | head -n 7 | tail -n 1) 103 | export VAULT_TOKEN=$(echo "$SEVENTH_LINE" | awk '{ print $4; }') 104 | 105 | 106 | # ========================================================================== 107 | # BEGIN GCP IAM AUTH EXAMPLE 108 | # ========================================================================== 109 | # Auth methods must be configured in advance before users or machines can authenticate. 110 | 111 | # Enables authentication 112 | # This is an http request, and sometimes fails, hence we retry 113 | retry \ 114 | "/opt/vault/bin/vault auth enable gcp" \ 115 | "Trying to enable gcp authentication" 116 | 117 | # To be able to verify authentication attempts with the help of the Google API, 118 | # Vault needs to have access to a service account with the necessary roles. 119 | # In this example runs on a Google Compute Instance, which means that the credentials 120 | # are provided to Vault automatically. but the following command would be necessary 121 | # for using the GCP auth method outside of GCP, such as locally, for example. 122 | # 123 | # vault write auth/gcp/config credentials=@/path/to/credentials.json 124 | 125 | # Creates a policy that allows writing and reading from an "example_" prefix at "secret" backend 126 | /opt/vault/bin/vault policy write "example-policy" -< >(tee /var/log/startup-script.log|logger -t startup-script -s 2>/dev/console) 2>&1 12 | 13 | # Note that any variables below with are expected to be interpolated by Terraform. 14 | /opt/consul/bin/run-consul --server --cluster-tag-name "${cluster_tag_name}" 15 | -------------------------------------------------------------------------------- /examples/vault-cluster-enterprise/startup-script-vault-enterprise.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script is meant to be run as the Startup Script of each Compute Instance while it's booting. The script uses the 3 | # run-consul and run-vault scripts to configure and start both Vault and Consul in client mode. This script assumes it's 4 | # running in a Compute Instance based on a Google Image built from the Packer template in 5 | # examples/vault-consul-image/vault-consul.json. 6 | 7 | set -e 8 | 9 | # Send the log output from this script to startup-script.log, syslog, and the console 10 | # Inspired by https://alestic.com/2010/12/ec2-user-data-output/ 11 | exec > >(tee /var/log/startup-script.log|logger -t startup-script -s 2>/dev/console) 2>&1 12 | 13 | # The Packer template puts the TLS certs in these file paths 14 | readonly VAULT_TLS_CERT_FILE="/opt/vault/tls/vault.crt.pem" 15 | readonly VAULT_TLS_KEY_FILE="/opt/vault/tls/vault.key.pem" 16 | 17 | # Note that any variables below with are expected to be interpolated by Terraform. 18 | /opt/consul/bin/run-consul --client --cluster-tag-name "${consul_cluster_tag_name}" 19 | /opt/vault/bin/run-vault --gcs-bucket ${vault_cluster_tag_name} \ 20 | --tls-cert-file "$VAULT_TLS_CERT_FILE" \ 21 | --tls-key-file "$VAULT_TLS_KEY_FILE" \ 22 | --enable-auto-unseal \ 23 | --auto-unseal-key-project-id "${vault_auto_unseal_key_project_id}" \ 24 | --auto-unseal-key-region "${vault_auto_unseal_key_region}" \ 25 | --auto-unseal-key-ring "${vault_auto_unseal_key_ring}" \ 26 | --auto-unseal-crypto-key-name "${vault_auto_unseal_crypto_key_name}" 27 | 28 | # We run an nginx server to expose an HTTP endpoint that will be used solely for Vault health checks. This is because 29 | # Google Cloud only permits HTTP health checks to be associated with the Load Balancer. 30 | /opt/nginx/bin/run-nginx --port ${web_proxy_port} --proxy-pass-url "https://127.0.0.1:8200/v1/sys/health?standbyok=true" 31 | -------------------------------------------------------------------------------- /examples/vault-cluster-enterprise/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRED PARAMETERS 3 | # These parameters must be supplied when consuming this module. 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "gcp_project_id" { 7 | description = "The name of the GCP Project where all resources will be launched." 8 | type = string 9 | } 10 | 11 | variable "gcp_region" { 12 | description = "The Region in which all GCP resources will be launched." 13 | type = string 14 | } 15 | 16 | variable "subnet_ip_cidr_range" { 17 | description = "The cidr range for the subnetwork. Ex.: 10.1.0.0/16" 18 | type = string 19 | } 20 | 21 | variable "bastion_server_name" { 22 | description = "The name of the bastion server that can reach the private vault cluster" 23 | type = string 24 | } 25 | 26 | variable "vault_cluster_name" { 27 | description = "The name of the Vault Server cluster. All resources will be namespaced by this value. E.g. vault-server-prod" 28 | type = string 29 | } 30 | 31 | variable "vault_source_image" { 32 | description = "The Google Image used to launch each node in the Vault Server cluster." 33 | type = string 34 | } 35 | 36 | variable "vault_cluster_machine_type" { 37 | description = "The machine type of the Compute Instance to run for each node in the Vault cluster (e.g. n1-standard-1)." 38 | type = string 39 | } 40 | 41 | variable "consul_server_cluster_name" { 42 | description = "The name of the Consul Server cluster. All resources will be namespaced by this value. E.g. consul-server-prod" 43 | type = string 44 | } 45 | 46 | variable "consul_server_source_image" { 47 | description = "The Google Image used to launch each node in the Consul Server cluster." 48 | type = string 49 | } 50 | 51 | variable "consul_server_machine_type" { 52 | description = "The machine type of the Compute Instance to run for each node in the Consul Server cluster (e.g. n1-standard-1)." 53 | type = string 54 | } 55 | 56 | # Vault Auto Unseal Variables 57 | 58 | variable "vault_auto_unseal_key_project_id" { 59 | description = "The GCP Project ID to use for the Auto Unseal feature." 60 | type = string 61 | } 62 | 63 | variable "vault_auto_unseal_key_region" { 64 | description = "The GCP Region to use for the Auto Unseal feature." 65 | type = string 66 | } 67 | 68 | variable "vault_auto_unseal_key_ring" { 69 | description = "The GCP Cloud KMS Key Ring to use for the Auto Unseal feature." 70 | type = string 71 | } 72 | 73 | variable "vault_auto_unseal_crypto_key_name" { 74 | description = "The GCP Cloud KMS Crypto Key to use for the Auto Unseal feature. Note: if creating a new key using var.create_kms_crypto_key then use this key." 75 | type = string 76 | } 77 | 78 | # --------------------------------------------------------------------------------------------------------------------- 79 | # OPTIONAL PARAMETERS 80 | # These parameters have reasonable defaults. 81 | # --------------------------------------------------------------------------------------------------------------------- 82 | 83 | variable "network_name" { 84 | description = "The name of the VPC Network where all resources should be created." 85 | type = string 86 | default = "default" 87 | } 88 | 89 | variable "additional_allowed_inbound_tags_api" { 90 | type = list(string) 91 | description = "A list of additional tags that GCP project resources can have to be permitted access to the Vault API." 92 | default = [] 93 | } 94 | 95 | variable "gcs_bucket_location" { 96 | description = "The location of the Google Cloud Storage Bucket where Vault secrets will be stored. For details, see https://goo.gl/hk63jH." 97 | type = string 98 | default = "US" 99 | } 100 | 101 | variable "gcs_bucket_class" { 102 | description = "The Storage Class of the Google Cloud Storage Bucket where Vault secrets will be stored. Must be one of MULTI_REGIONAL, REGIONAL, NEARLINE, or COLDLINE. For details, see https://goo.gl/hk63jH." 103 | type = string 104 | default = "MULTI_REGIONAL" 105 | } 106 | 107 | variable "gcs_bucket_force_destroy" { 108 | description = "If true, Terraform will delete the Google Cloud Storage Bucket even if it's non-empty. WARNING! Never set this to true in a production setting. We only have this option here to facilitate testing." 109 | type = bool 110 | default = true 111 | } 112 | 113 | variable "vault_cluster_size" { 114 | description = "The number of nodes to have in the Vault Server cluster. We strongly recommended that you use either 3 or 5." 115 | type = number 116 | default = 3 117 | } 118 | 119 | variable "consul_server_cluster_size" { 120 | description = "The number of nodes to have in the Consul Server cluster. We strongly recommended that you use either 3 or 5." 121 | type = number 122 | default = 3 123 | } 124 | 125 | variable "web_proxy_port" { 126 | description = "The port at which the HTTP proxy server will listen for incoming HTTP requests that will be forwarded to the Vault Health Check URL. We must have an HTTP proxy server to work around the limitation that GCP only permits Health Checks via HTTP, not HTTPS." 127 | type = number 128 | default = 8000 129 | } 130 | 131 | variable "root_volume_disk_size_gb" { 132 | description = "The size, in GB, of the root disk volume on each Consul node." 133 | type = number 134 | default = 30 135 | } 136 | 137 | variable "root_volume_disk_type" { 138 | description = "The GCE disk type. Can be either pd-ssd, local-ssd, or pd-standard" 139 | type = string 140 | default = "pd-standard" 141 | } 142 | 143 | variable "enable_vault_ui" { 144 | description = "If true, enable the Vault UI" 145 | type = bool 146 | default = true 147 | } 148 | -------------------------------------------------------------------------------- /examples/vault-cluster-private-with-public-lb/README.md: -------------------------------------------------------------------------------- 1 | # Private Vault Cluster with Public Load Balancer Example 2 | 3 | This example deploys a publicly accessible [Vault](https://www.vaultproject.io/) cluster in [GCP](https://cloud.google.com/) 4 | fronted by a Regional External Load Balancer using the [vault-cluster](https://github.com/hashicorp/terraform-google-vault/tree/master/modules/vault-cluster) and [vault-lb-fr]( 5 | /modules/vault-lb-fr) modules. For an example of a private Vault cluster that is accessible only from inside the Google 6 | Cloud VPC, see [vault-cluster-private](https://github.com/hashicorp/terraform-google-vault/tree/master/examples/vault-cluster-private). **Deploying Vault in a publicly accessible way 7 | should be avoided if possible due to the increased security exposure. However, it may be unavoidable, if, for example, 8 | Vault is your system of record for identity.**. 9 | 10 | The Vault cluster uses [Consul](https://www.consul.io/) as a storage backend, so this example also deploys a separate 11 | Consul server cluster using the [consul-cluster module]( 12 | https://github.com/hashicorp/terraform-google-consul/tree/master/modules/consul-cluster) from the Consul GCP Module. 13 | 14 | You will need to create a [Google Image](https://cloud.google.com/compute/docs/images) that has Vault and Consul 15 | installed, which you can do using the [vault-consul-image example](https://github.com/hashicorp/terraform-google-vault/tree/master/examples/vault-consul-image)). 16 | 17 | Note that a Google Load Balancer requires a Health Check to confirm that the Vault nodes are healthy, but at this time, 18 | Google Cloud only supports [associating HTTP Health Checks with a Target Pool]( 19 | https://github.com/terraform-providers/terraform-provider-google/issues/18), not HTTPS Health Checks. The recommended 20 | workaround is to run a separate proxy server that listens over HTTP and forwards requests to the HTTPS Vault endpoint. 21 | We accomplish this by using the [run-nginx](https://github.com/hashicorp/terraform-google-vault/tree/master/modules/run-nginx) module to run the web server. 22 | 23 | For more info on how the Vault cluster works, check out the [vault-cluster](https://github.com/hashicorp/terraform-google-vault/tree/master/modules/vault-cluster) documentation. 24 | 25 | 26 | ## Quick start 27 | 28 | To deploy a Vault Cluster: 29 | 30 | 1. `git clone` this repo to your computer. 31 | 1. Build a Vault and Consul Google Image. See the [vault-consul-image example](https://github.com/hashicorp/terraform-google-vault/tree/master/examples/vault-consul-image) documentation 32 | for instructions. Make sure to note down the ID of the Google Image. 33 | 1. Install [Terraform](https://www.terraform.io/). 34 | 1. Make sure your local environment is authenticated to Google Cloud. 35 | 1. Open `variables.tf` and fill in any variables that don't have a default, including putting your Google Image ID into 36 | the `vault_source_image` and `consul_server_source_image` variables. Alternatively, initialize the variables by creating 37 | a `terraform.tfvars` file. 38 | 1. Run `terraform init`. 39 | 1. Run `terraform plan`. 40 | 1. If the plan looks good, run `terraform apply`. 41 | 1. To enable other Compute Instances in the same GCP Project to access the Vault Cluster, edit the `main.tf` file to 42 | modify the `allowed_inbound_tags_api` variables. To allow arbitary IP addresses to access the Vault cluster from 43 | within the VPC, modify the `allowed_inbound_cidr_blocks_api` variable. 44 | 45 | To see how to connect to the Vault cluster, initialize it, and start reading and writing secrets, head over to the 46 | [How do you use the Vault cluster?](https://github.com/hashicorp/terraform-google-vault/tree/master/modules/vault-cluster#how-do-you-use-the-vault-cluster) docs. 47 | -------------------------------------------------------------------------------- /examples/vault-cluster-private-with-public-lb/main.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # DEPLOY A PRIVATE VAULT CLUSTER WITH A PUBLIC LOAD BALANCER IN GOOGLE CLOUD 3 | # This is an example of how to use the vault-cluster to deploy a private Vault cluster in GCP with a Load Balancer in 4 | # front of it. This cluster uses Consul, running in a separate cluster, as its High Availability backend. 5 | # --------------------------------------------------------------------------------------------------------------------- 6 | 7 | provider "google" { 8 | project = var.gcp_project 9 | region = var.gcp_region 10 | } 11 | 12 | terraform { 13 | required_version = ">= 0.12" 14 | } 15 | 16 | 17 | # --------------------------------------------------------------------------------------------------------------------- 18 | # DEPLOY THE VAULT SERVER CLUSTER 19 | # --------------------------------------------------------------------------------------------------------------------- 20 | 21 | module "vault_cluster" { 22 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 23 | # to a specific version of the modules, such as the following example: 24 | # source = "git::git@github.com:hashicorp/terraform-google-vault.git//modules/vault-cluster?ref=v0.0.1" 25 | source = "../../modules/vault-cluster" 26 | 27 | gcp_project_id = var.gcp_project_id 28 | gcp_region = var.gcp_region 29 | 30 | cluster_name = var.vault_cluster_name 31 | cluster_size = var.vault_cluster_size 32 | cluster_tag_name = var.vault_cluster_name 33 | machine_type = var.vault_cluster_machine_type 34 | 35 | source_image = var.vault_source_image 36 | startup_script = data.template_file.startup_script_vault.rendered 37 | 38 | gcs_bucket_name = var.vault_cluster_name 39 | gcs_bucket_location = var.gcs_bucket_location 40 | gcs_bucket_storage_class = var.gcs_bucket_class 41 | gcs_bucket_force_destroy = var.gcs_bucket_force_destroy 42 | 43 | root_volume_disk_size_gb = var.root_volume_disk_size_gb 44 | root_volume_disk_type = var.root_volume_disk_type 45 | 46 | # Regrettably, GCE only supports HTTP health checks, not HTTPS Health Checks (https://github.com/terraform-providers/terraform-provider-google/issues/18) 47 | # But Vault is only configured to listen for HTTPS requests. Therefore, per GCE recommendations, we run a simple HTTP 48 | # proxy server that forwards all requests to the Vault Health Check URL specified in the startup-script-vault.sh 49 | enable_web_proxy = true 50 | 51 | web_proxy_port = var.web_proxy_port 52 | 53 | # Even when the Vault cluster is pubicly accessible via a Load Balancer, we still make the Vault nodes themselves 54 | # private to improve the overall security posture. Note that the only way to reach private nodes via SSH is to first 55 | # SSH into another node that is not private. 56 | assign_public_ip_addresses = false 57 | 58 | # To enable external access to the Vault Cluster, enter the approved CIDR Blocks or tags below. 59 | # We enable health checks from the Consul Server cluster to Vault. 60 | allowed_inbound_cidr_blocks_api = [] 61 | 62 | allowed_inbound_tags_api = [var.consul_server_cluster_name] 63 | 64 | # This property is only necessary when using a Load Balancer 65 | instance_group_target_pools = [module.vault_load_balancer.target_pool_url] 66 | } 67 | 68 | # Render the Startup Script that will run on each Vault Instance on boot. This script will configure and start Vault. 69 | data "template_file" "startup_script_vault" { 70 | template = file("${path.module}/startup-script-vault.sh") 71 | 72 | vars = { 73 | consul_cluster_tag_name = var.consul_server_cluster_name 74 | vault_cluster_tag_name = var.vault_cluster_name 75 | web_proxy_port = var.web_proxy_port 76 | enable_vault_ui = var.enable_vault_ui ? "--enable-ui" : "" 77 | } 78 | } 79 | 80 | # --------------------------------------------------------------------------------------------------------------------- 81 | # DEPLOY THE LOAD BALANCER 82 | # --------------------------------------------------------------------------------------------------------------------- 83 | 84 | module "vault_load_balancer" { 85 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 86 | # to a specific version of the modules, such as the following example: 87 | # source = "git::git@github.com:hashicorp/terraform-google-vault.git//modules/vault-lb-regional-ext?ref=v0.0.1" 88 | source = "../../modules/vault-lb-fr" 89 | 90 | cluster_name = var.vault_cluster_name 91 | cluster_tag_name = var.vault_cluster_name 92 | 93 | health_check_path = "/" 94 | health_check_port = var.web_proxy_port 95 | } 96 | 97 | # --------------------------------------------------------------------------------------------------------------------- 98 | # DEPLOY THE CONSUL SERVER CLUSTER 99 | # --------------------------------------------------------------------------------------------------------------------- 100 | 101 | module "consul_cluster" { 102 | source = "git::git@github.com:hashicorp/terraform-google-consul.git//modules/consul-cluster?ref=v0.4.0" 103 | 104 | gcp_region = var.gcp_region 105 | gcp_project_id = var.gcp_project_id 106 | 107 | cluster_name = var.consul_server_cluster_name 108 | cluster_tag_name = var.consul_server_cluster_name 109 | cluster_size = var.consul_server_cluster_size 110 | 111 | source_image = var.consul_server_source_image 112 | machine_type = var.consul_server_machine_type 113 | 114 | startup_script = data.template_file.startup_script_consul.rendered 115 | 116 | # In a production setting, we strongly recommend only launching a Consul Server cluster as private nodes. 117 | # Note that the only way to reach private nodes via SSH is to first SSH into another node that is not private. 118 | assign_public_ip_addresses = false 119 | 120 | allowed_inbound_tags_dns = [var.vault_cluster_name] 121 | allowed_inbound_tags_http_api = [var.vault_cluster_name] 122 | } 123 | 124 | # This Startup Script will run at boot configure and start Consul on the Consul Server cluster nodes 125 | data "template_file" "startup_script_consul" { 126 | template = file("${path.module}/startup-script-consul.sh") 127 | 128 | vars = { 129 | cluster_tag_name = var.consul_server_cluster_name 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /examples/vault-cluster-private-with-public-lb/outputs.tf: -------------------------------------------------------------------------------- 1 | output "gcp_project" { 2 | value = var.gcp_project 3 | } 4 | 5 | output "gcp_region" { 6 | value = var.gcp_region 7 | } 8 | 9 | output "vault_cluster_size" { 10 | value = var.vault_cluster_size 11 | } 12 | 13 | output "cluster_tag_name" { 14 | value = module.vault_cluster.cluster_tag_name 15 | } 16 | 17 | output "instance_group_id" { 18 | value = module.vault_cluster.instance_group_id 19 | } 20 | 21 | output "instance_group_name" { 22 | value = module.vault_cluster.instance_group_name 23 | } 24 | 25 | output "instance_group_url" { 26 | value = module.vault_cluster.instance_group_url 27 | } 28 | 29 | output "instance_template_url" { 30 | value = module.vault_cluster.instance_template_url 31 | } 32 | 33 | output "firewall_rule_allow_intracluster_vault_id" { 34 | value = module.vault_cluster.firewall_rule_allow_intracluster_vault_id 35 | } 36 | 37 | output "firewall_rule_allow_intracluster_vault_url" { 38 | value = module.vault_cluster.firewall_rule_allow_intracluster_vault_url 39 | } 40 | 41 | output "firewall_rule_allow_inbound_api_id" { 42 | value = module.vault_cluster.firewall_rule_allow_inbound_api_id 43 | } 44 | 45 | output "firewall_rule_allow_inbound_api_url" { 46 | value = module.vault_cluster.firewall_rule_allow_inbound_api_url 47 | } 48 | 49 | output "firewall_rule_allow_inbound_health_check_id" { 50 | value = module.vault_cluster.firewall_rule_allow_inbound_health_check_id 51 | } 52 | 53 | output "firewall_rule_allow_inbound_health_check_url" { 54 | value = module.vault_cluster.firewall_rule_allow_inbound_health_check_url 55 | } 56 | 57 | output "bucket_name_id" { 58 | value = module.vault_cluster.bucket_name_id 59 | } 60 | 61 | output "bucket_name_url" { 62 | value = module.vault_cluster.bucket_name_url 63 | } 64 | 65 | -------------------------------------------------------------------------------- /examples/vault-cluster-private-with-public-lb/startup-script-consul.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script is meant to be run as the Startup Script of each Compute Instance while it's booting. The script uses the 3 | # run-consul script to configure and start Consul in server mode. This script assumes it's running in a Compute Instance 4 | # based on a Google Image built from the Packer template in https://github.com/hashicorp/terraform-google-consul at 5 | # /examples/consul-image. 6 | 7 | set -e 8 | 9 | # Send the log output from this script to startup-script.log, syslog, and the console 10 | # Inspired by https://alestic.com/2010/12/ec2-user-data-output/ 11 | exec > >(tee /var/log/startup-script.log|logger -t startup-script -s 2>/dev/console) 2>&1 12 | 13 | # Note that any variables below with are expected to be interpolated by Terraform. 14 | /opt/consul/bin/run-consul --server --cluster-tag-name "${cluster_tag_name}" 15 | -------------------------------------------------------------------------------- /examples/vault-cluster-private-with-public-lb/startup-script-vault.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script is meant to be run as the Startup Script of each Compute Instance while it's booting. The script uses the 3 | # run-consul and run-vault scripts to configure and start both Vault and Consul in client mode. This script assumes it's 4 | # running in a Compute Instance based on a Google Image built from the Packer template in 5 | # examples/vault-consul-image/vault-consul.json. 6 | 7 | set -e 8 | 9 | # Send the log output from this script to startup-script.log, syslog, and the console 10 | # Inspired by https://alestic.com/2010/12/ec2-user-data-output/ 11 | exec > >(tee /var/log/startup-script.log|logger -t startup-script -s 2>/dev/console) 2>&1 12 | 13 | # The Packer template puts the TLS certs in these file paths 14 | readonly VAULT_TLS_CERT_FILE="/opt/vault/tls/vault.crt.pem" 15 | readonly VAULT_TLS_KEY_FILE="/opt/vault/tls/vault.key.pem" 16 | 17 | # Note that any variables below with are expected to be interpolated by Terraform. 18 | /opt/consul/bin/run-consul --client --cluster-tag-name "${consul_cluster_tag_name}" 19 | /opt/vault/bin/run-vault --gcs-bucket ${vault_cluster_tag_name} --tls-cert-file "$VAULT_TLS_CERT_FILE" --tls-key-file "$VAULT_TLS_KEY_FILE" ${enable_vault_ui} 20 | 21 | # We run an nginx server to expose an HTTP endpoint that will be used solely for Vault health checks. This is because 22 | # Google Cloud only permits HTTP health checks to be associated with the Load Balancer. 23 | /opt/nginx/bin/run-nginx --port ${web_proxy_port} --proxy-pass-url "https://127.0.0.1:8200/v1/sys/health?standbyok=true" 24 | -------------------------------------------------------------------------------- /examples/vault-cluster-private-with-public-lb/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRED PARAMETERS 3 | # These parameters must be supplied when consuming this module. 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "gcp_project" { 7 | description = "The name of the GCP Project where all resources will be launched." 8 | type = string 9 | } 10 | 11 | variable "gcp_region" { 12 | description = "The region in which all GCP resources will be launched." 13 | type = string 14 | } 15 | 16 | variable "vault_cluster_name" { 17 | description = "The name of the Vault Server cluster. All resources will be namespaced by this value. E.g. vault-server-prod" 18 | type = string 19 | } 20 | 21 | variable "vault_source_image" { 22 | description = "The Google Image used to launch each node in the Vault Server cluster." 23 | type = string 24 | } 25 | 26 | variable "vault_cluster_machine_type" { 27 | description = "The machine type of the Compute Instance to run for each node in the Vault cluster (e.g. n1-standard-1)." 28 | type = string 29 | } 30 | 31 | variable "consul_server_cluster_name" { 32 | description = "The name of the Consul Server cluster. All resources will be namespaced by this value. E.g. consul-server-prod" 33 | type = string 34 | } 35 | 36 | variable "consul_server_source_image" { 37 | description = "The Google Image used to launch each node in the Consul Server cluster." 38 | type = string 39 | } 40 | 41 | variable "consul_server_machine_type" { 42 | description = "The machine type of the Compute Instance to run for each node in the Consul Server cluster (e.g. n1-standard-1)." 43 | type = string 44 | } 45 | 46 | # --------------------------------------------------------------------------------------------------------------------- 47 | # OPTIONAL PARAMETERS 48 | # These parameters have reasonable defaults. 49 | # --------------------------------------------------------------------------------------------------------------------- 50 | 51 | variable "gcs_bucket_location" { 52 | description = "The location of the Google Cloud Storage Bucket where Vault secrets will be stored. For details, see https://goo.gl/hk63jH." 53 | type = string 54 | default = "US" 55 | } 56 | 57 | variable "gcs_bucket_class" { 58 | description = "The Storage Class of the Google Cloud Storage Bucket where Vault secrets will be stored. Must be one of MULTI_REGIONAL, REGIONAL, NEARLINE, or COLDLINE. For details, see https://goo.gl/hk63jH." 59 | type = string 60 | default = "MULTI_REGIONAL" 61 | } 62 | 63 | variable "gcs_bucket_force_destroy" { 64 | description = "If true, Terraform will delete the Google Cloud Storage Bucket even if it's non-empty. WARNING! Never set this to true in a production setting. We only have this option here to facilitate testing." 65 | type = bool 66 | default = true 67 | } 68 | 69 | variable "vault_cluster_size" { 70 | description = "The number of nodes to have in the Vault Server cluster. We strongly recommended that you use either 3 or 5." 71 | type = number 72 | default = 3 73 | } 74 | 75 | variable "consul_server_cluster_size" { 76 | description = "The number of nodes to have in the Consul Server cluster. We strongly recommended that you use either 3 or 5." 77 | type = number 78 | default = 3 79 | } 80 | 81 | variable "web_proxy_port" { 82 | description = "The port at which the HTTP proxy server will listen for incoming HTTP requests that will be forwarded to the Vault Health Check URL. We must have an HTTP proxy server to work around the limitation that GCP only permits Health Checks via HTTP, not HTTPS." 83 | type = number 84 | default = 8000 85 | } 86 | 87 | variable "root_volume_disk_size_gb" { 88 | description = "The size, in GB, of the root disk volume on each Consul node." 89 | type = number 90 | default = 30 91 | } 92 | 93 | variable "root_volume_disk_type" { 94 | description = "The GCE disk type. Can be either pd-ssd, local-ssd, or pd-standard" 95 | type = string 96 | default = "pd-standard" 97 | } 98 | 99 | variable "enable_vault_ui" { 100 | description = "If true, enable the Vault UI" 101 | type = bool 102 | default = true 103 | } 104 | -------------------------------------------------------------------------------- /examples/vault-cluster-private/README.md: -------------------------------------------------------------------------------- 1 | # Private Vault Cluster Example 2 | 3 | This example deploys a private [Vault](https://www.vaultproject.io/) cluster in [GCP](https://cloud.google.com/) 4 | using the [vault-cluster](https://github.com/hashicorp/terraform-google-vault/tree/master/modules/vault-cluster) module. For an example of a public Vault cluster that is accessible 5 | from the public Internet, see [vault-cluster-public](https://github.com/hashicorp/terraform-google-vault/tree/master/examples/vault-cluster-public). A private Vault cluster is only 6 | reachable from another Compute Instance, so this example does not provide any built-in way of reaching the Compute 7 | Instances that are launched. Instead, you will need to separately launch a public Compute Instance from which you can 8 | access these nodes. 9 | 10 | The Vault cluster uses [Consul](https://www.consul.io/) as a storage backend, so this example also deploys a separate 11 | Consul server cluster using the [consul-cluster module]( 12 | https://github.com/hashicorp/terraform-google-consul/tree/master/modules/consul-cluster) from the Consul GCP Module. 13 | 14 | You will need to create a [Google Image](https://cloud.google.com/compute/docs/images) that has Vault and Consul 15 | installed, which you can do using the [vault-consul-image example](https://github.com/hashicorp/terraform-google-vault/tree/master/examples/vault-consul-image)). 16 | 17 | For more info on how the Vault cluster works, check out the [vault-cluster](https://github.com/hashicorp/terraform-google-vault/tree/master/modules/vault-cluster) documentation. 18 | 19 | 20 | ## Quick start 21 | 22 | To deploy a Vault Cluster: 23 | 24 | 1. `git clone` this repo to your computer. 25 | 1. Build a Vault and Consul Google Image. See the [vault-consul-image example](https://github.com/hashicorp/terraform-google-vault/tree/master/examples/vault-consul-image) documentation 26 | for instructions. Make sure to note down the ID of the Google Image. 27 | 1. Install [Terraform](https://www.terraform.io/). 28 | 1. Make sure you local environment is authenticated to Google Cloud. 29 | 1. Open `variables.tf` and fill in any variables that don't have a default, including putting your Google Image ID into 30 | the `vault_source_image` and `consul_server_source_image` variables. 31 | 1. Run `terraform init`. 32 | 1. Run `terraform plan`. 33 | 1. If the plan looks good, run `terraform apply`. 34 | 1. To enable other Compute Instances in the same GCP Project to access the Vault Cluster, edit the `main.tf` file to 35 | modify the `allowed_inbound_tags_api` variables. To allow arbitary IP addresses to access the Vault cluster from 36 | within the VPC, modify the `allowed_inbound_cidr_blocks_api` variable. 37 | 38 | To see how to connect to the Vault cluster, initialize it, and start reading and writing secrets, head over to the 39 | [How do you use the Vault cluster?](https://github.com/hashicorp/terraform-google-vault/tree/master/modules/vault-cluster#how-do-you-use-the-vault-cluster) docs. 40 | -------------------------------------------------------------------------------- /examples/vault-cluster-private/main.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # DEPLOY A VAULT CLUSTER IN GOOGLE CLOUD 3 | # This is an example of how to use the vault-cluster module to deploy a private Vault cluster in GCP. A private Vault 4 | # cluster is the recommended approach for production usage. This cluster uses Consul, running in a separate cluster, as 5 | # its High Availability backend. 6 | # --------------------------------------------------------------------------------------------------------------------- 7 | 8 | provider "google" { 9 | project = var.gcp_project_id 10 | region = var.gcp_region 11 | } 12 | 13 | terraform { 14 | # The modules used in this example have been updated with 0.12 syntax, which means the example is no longer 15 | # compatible with any versions below 0.12. 16 | required_version = ">= 0.12" 17 | } 18 | 19 | # --------------------------------------------------------------------------------------------------------------------- 20 | # CREATES A SUBNETWORK WITH GOOGLE API ACCESS 21 | # Necessary because the private cluster doesn't have internet access 22 | # But consul needs to make requests to the Google API 23 | # --------------------------------------------------------------------------------------------------------------------- 24 | 25 | resource "google_compute_subnetwork" "private_subnet_with_google_api_access" { 26 | name = "${var.vault_cluster_name}-private-subnet-with-google-api-access" 27 | private_ip_google_access = true 28 | network = var.network_name 29 | ip_cidr_range = var.subnet_ip_cidr_range 30 | } 31 | 32 | # --------------------------------------------------------------------------------------------------------------------- 33 | # DEPLOY A BASTION HOST THAT CAN REACH THE CLUSTER 34 | # We can't ssh directly to the cluster because they don't have an external IP 35 | # address, but we can ssh to a bastion host inside the same subnet and then 36 | # access the cluster from there 37 | # --------------------------------------------------------------------------------------------------------------------- 38 | 39 | data "google_compute_zones" "available" { 40 | } 41 | 42 | resource "google_compute_instance" "bastion" { 43 | name = var.bastion_server_name 44 | zone = data.google_compute_zones.available.names[0] 45 | machine_type = "g1-small" 46 | 47 | boot_disk { 48 | initialize_params { 49 | image = "ubuntu-1810-cosmic-v20181114" 50 | } 51 | } 52 | 53 | network_interface { 54 | subnetwork = google_compute_subnetwork.private_subnet_with_google_api_access.self_link 55 | 56 | access_config { 57 | // Ephemeral IP - leaving this block empty will generate a new external IP and assign it to the machine 58 | } 59 | } 60 | } 61 | 62 | # --------------------------------------------------------------------------------------------------------------------- 63 | # DEPLOY THE VAULT SERVER CLUSTER 64 | # --------------------------------------------------------------------------------------------------------------------- 65 | 66 | module "vault_cluster" { 67 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 68 | # to a specific version of the modules, such as the following example: 69 | # source = "git::git@github.com:hashicorp/terraform-google-vault.git//modules/vault-cluster?ref=v0.0.1" 70 | source = "../../modules/vault-cluster" 71 | 72 | subnetwork_name = google_compute_subnetwork.private_subnet_with_google_api_access.name 73 | 74 | gcp_project_id = var.gcp_project_id 75 | gcp_region = var.gcp_region 76 | 77 | cluster_name = var.vault_cluster_name 78 | cluster_size = var.vault_cluster_size 79 | cluster_tag_name = var.vault_cluster_name 80 | machine_type = var.vault_cluster_machine_type 81 | 82 | source_image = var.vault_source_image 83 | startup_script = data.template_file.startup_script_vault.rendered 84 | 85 | gcs_bucket_name = var.vault_cluster_name 86 | gcs_bucket_location = var.gcs_bucket_location 87 | gcs_bucket_storage_class = var.gcs_bucket_class 88 | gcs_bucket_force_destroy = var.gcs_bucket_force_destroy 89 | 90 | root_volume_disk_size_gb = var.root_volume_disk_size_gb 91 | root_volume_disk_type = var.root_volume_disk_type 92 | 93 | # Note that the only way to reach private nodes via SSH is to first SSH into another node that is not private. 94 | assign_public_ip_addresses = false 95 | 96 | # To enable external access to the Vault Cluster, enter the approved CIDR Blocks or tags below. 97 | # We enable health checks from the Consul Server cluster to Vault. 98 | allowed_inbound_cidr_blocks_api = [] 99 | 100 | allowed_inbound_tags_api = [var.consul_server_cluster_name] 101 | } 102 | 103 | # Render the Startup Script that will run on each Vault Instance on boot. This script will configure and start Vault. 104 | data "template_file" "startup_script_vault" { 105 | template = file("${path.module}/startup-script-vault.sh") 106 | 107 | vars = { 108 | consul_cluster_tag_name = var.consul_server_cluster_name 109 | vault_cluster_tag_name = var.vault_cluster_name 110 | enable_vault_ui = var.enable_vault_ui ? "--enable-ui" : "" 111 | } 112 | } 113 | 114 | # --------------------------------------------------------------------------------------------------------------------- 115 | # DEPLOY THE CONSUL SERVER CLUSTER 116 | # Note that we make use of the terraform-google-consul module! 117 | # --------------------------------------------------------------------------------------------------------------------- 118 | 119 | module "consul_cluster" { 120 | source = "git::git@github.com:hashicorp/terraform-google-consul.git//modules/consul-cluster?ref=v0.4.0" 121 | 122 | subnetwork_name = google_compute_subnetwork.private_subnet_with_google_api_access.name 123 | gcp_project_id = var.gcp_project_id 124 | gcp_region = var.gcp_region 125 | cluster_name = var.consul_server_cluster_name 126 | cluster_tag_name = var.consul_server_cluster_name 127 | cluster_size = var.consul_server_cluster_size 128 | 129 | source_image = var.consul_server_source_image 130 | machine_type = var.consul_server_machine_type 131 | 132 | startup_script = data.template_file.startup_script_consul.rendered 133 | 134 | # Note that the only way to reach private nodes via SSH is to first SSH into another node that is not private. 135 | assign_public_ip_addresses = false 136 | 137 | allowed_inbound_tags_dns = [var.vault_cluster_name] 138 | allowed_inbound_tags_http_api = [var.vault_cluster_name] 139 | } 140 | 141 | # This Startup Script will run at boot to configure and start Consul on the Consul Server cluster nodes. 142 | data "template_file" "startup_script_consul" { 143 | template = file("${path.module}/startup-script-consul.sh") 144 | 145 | vars = { 146 | cluster_tag_name = var.consul_server_cluster_name 147 | } 148 | } 149 | -------------------------------------------------------------------------------- /examples/vault-cluster-private/outputs.tf: -------------------------------------------------------------------------------- 1 | output "gcp_project_id" { 2 | value = var.gcp_project_id 3 | } 4 | 5 | output "gcp_region" { 6 | value = var.gcp_region 7 | } 8 | 9 | output "vault_cluster_size" { 10 | value = var.vault_cluster_size 11 | } 12 | 13 | output "cluster_tag_name" { 14 | value = module.vault_cluster.cluster_tag_name 15 | } 16 | 17 | output "bastion_server_name" { 18 | value = var.bastion_server_name 19 | } 20 | 21 | output "instance_group_id" { 22 | value = module.vault_cluster.instance_group_id 23 | } 24 | 25 | output "instance_group_name" { 26 | value = module.vault_cluster.instance_group_name 27 | } 28 | 29 | output "instance_group_url" { 30 | value = module.vault_cluster.instance_group_url 31 | } 32 | 33 | output "instance_template_url" { 34 | value = module.vault_cluster.instance_template_url 35 | } 36 | 37 | output "firewall_rule_allow_intracluster_vault_id" { 38 | value = module.vault_cluster.firewall_rule_allow_intracluster_vault_id 39 | } 40 | 41 | output "firewall_rule_allow_intracluster_vault_url" { 42 | value = module.vault_cluster.firewall_rule_allow_intracluster_vault_url 43 | } 44 | 45 | output "firewall_rule_allow_inbound_api_id" { 46 | value = module.vault_cluster.firewall_rule_allow_inbound_api_id 47 | } 48 | 49 | output "firewall_rule_allow_inbound_api_url" { 50 | value = module.vault_cluster.firewall_rule_allow_inbound_api_url 51 | } 52 | 53 | output "firewall_rule_allow_inbound_health_check_id" { 54 | value = module.vault_cluster.firewall_rule_allow_inbound_health_check_id 55 | } 56 | 57 | output "firewall_rule_allow_inbound_health_check_url" { 58 | value = module.vault_cluster.firewall_rule_allow_inbound_health_check_url 59 | } 60 | 61 | output "bucket_name_id" { 62 | value = module.vault_cluster.bucket_name_id 63 | } 64 | 65 | output "bucket_name_url" { 66 | value = module.vault_cluster.bucket_name_url 67 | } 68 | 69 | -------------------------------------------------------------------------------- /examples/vault-cluster-private/startup-script-consul.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script is meant to be run as the Startup Script of each Compute Instance while it's booting. The script uses the 3 | # run-consul script to configure and start Consul in server mode. This script assumes it's running in a Compute Instance 4 | # based on a Google Image built from the Packer template in https://github.com/hashicorp/terraform-google-consul at 5 | # /examples/consul-image. 6 | 7 | set -e 8 | 9 | # Send the log output from this script to startup-script.log, syslog, and the console 10 | # Inspired by https://alestic.com/2010/12/ec2-user-data-output/ 11 | exec > >(tee /var/log/startup-script.log|logger -t startup-script -s 2>/dev/console) 2>&1 12 | 13 | # Note that any variables below with are expected to be interpolated by Terraform. 14 | /opt/consul/bin/run-consul --server --cluster-tag-name "${cluster_tag_name}" 15 | -------------------------------------------------------------------------------- /examples/vault-cluster-private/startup-script-vault.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script is meant to be run as the Startup Script of each Compute Instance while it's booting. The script uses the 3 | # run-consul and run-vault scripts to configure and start both Vault and Consul in client mode. This script assumes it's 4 | # running in a Compute Instance based on a Google Image built from the Packer template in 5 | # examples/vault-consul-image/vault-consul.json. 6 | 7 | set -e 8 | 9 | # Send the log output from this script to startup-script.log, syslog, and the console 10 | # Inspired by https://alestic.com/2010/12/ec2-user-data-output/ 11 | exec > >(tee /var/log/startup-script.log|logger -t startup-script -s 2>/dev/console) 2>&1 12 | 13 | # The Packer template puts the TLS certs in these file paths 14 | readonly VAULT_TLS_CERT_FILE="/opt/vault/tls/vault.crt.pem" 15 | readonly VAULT_TLS_KEY_FILE="/opt/vault/tls/vault.key.pem" 16 | 17 | # Note that any variables below with are expected to be interpolated by Terraform. 18 | /opt/consul/bin/run-consul --client --cluster-tag-name "${consul_cluster_tag_name}" 19 | /opt/vault/bin/run-vault --gcs-bucket ${vault_cluster_tag_name} --tls-cert-file "$VAULT_TLS_CERT_FILE" --tls-key-file "$VAULT_TLS_KEY_FILE" ${enable_vault_ui} 20 | -------------------------------------------------------------------------------- /examples/vault-cluster-private/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRED PARAMETERS 3 | # These parameters must be supplied when consuming this module. 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "gcp_project_id" { 7 | description = "The name of the GCP Project where all resources will be launched." 8 | type = string 9 | } 10 | 11 | variable "gcp_region" { 12 | description = "The Region in which all GCP resources will be launched." 13 | type = string 14 | } 15 | 16 | variable "subnet_ip_cidr_range" { 17 | description = "The cidr range for the subnetwork. Ex.: 10.1.0.0/16" 18 | type = string 19 | } 20 | 21 | variable "bastion_server_name" { 22 | description = "The name of the bastion server that can reach the private vault cluster" 23 | type = string 24 | } 25 | 26 | variable "vault_cluster_name" { 27 | description = "The name of the Vault Server cluster. All resources will be namespaced by this value. E.g. vault-server-prod" 28 | type = string 29 | } 30 | 31 | variable "vault_source_image" { 32 | description = "The Google Image used to launch each node in the Consul Server cluster." 33 | type = string 34 | } 35 | 36 | variable "vault_cluster_machine_type" { 37 | description = "The machine type of the Compute Instance to run for each node in the Vault cluster (e.g. n1-standard-1)." 38 | type = string 39 | } 40 | 41 | variable "consul_server_cluster_name" { 42 | description = "The name of the Consul Server cluster. All resources will be namespaced by this value. E.g. consul-server-prod" 43 | type = string 44 | } 45 | 46 | variable "consul_server_source_image" { 47 | description = "The Google Image used to launch each node in the Consul Server cluster." 48 | type = string 49 | } 50 | 51 | variable "consul_server_machine_type" { 52 | description = "The machine type of the Compute Instance to run for each node in the Consul Server cluster (e.g. n1-standard-1)." 53 | type = string 54 | } 55 | 56 | # --------------------------------------------------------------------------------------------------------------------- 57 | # OPTIONAL PARAMETERS 58 | # These parameters have reasonable defaults. 59 | # --------------------------------------------------------------------------------------------------------------------- 60 | 61 | variable "gcs_bucket_location" { 62 | description = "The location of the Google Cloud Storage Bucket where Vault secrets will be stored. For details, see https://goo.gl/hk63jH." 63 | type = string 64 | default = "US" 65 | } 66 | 67 | variable "gcs_bucket_class" { 68 | description = "The Storage Class of the Google Cloud Storage Bucket where Vault secrets will be stored. Must be one of MULTI_REGIONAL, REGIONAL, NEARLINE, or COLDLINE. For details, see https://goo.gl/hk63jH." 69 | type = string 70 | default = "MULTI_REGIONAL" 71 | } 72 | 73 | variable "gcs_bucket_force_destroy" { 74 | description = "If true, Terraform will delete the Google Cloud Storage Bucket even if it's non-empty. WARNING! Never set this to true in a production setting. We only have this option here to facilitate testing." 75 | type = bool 76 | default = true 77 | } 78 | 79 | variable "vault_cluster_size" { 80 | description = "The number of nodes to have in the Vault Server cluster. We strongly recommended that you use either 3 or 5." 81 | type = number 82 | default = 3 83 | } 84 | 85 | variable "consul_server_cluster_size" { 86 | description = "The number of nodes to have in the Consul Server cluster. We strongly recommended that you use either 3 or 5." 87 | type = number 88 | default = 3 89 | } 90 | 91 | variable "root_volume_disk_size_gb" { 92 | description = "The size, in GB, of the root disk volume on each Consul node." 93 | type = number 94 | default = 30 95 | } 96 | 97 | variable "root_volume_disk_type" { 98 | description = "The GCE disk type. Can be either pd-ssd, local-ssd, or pd-standard" 99 | type = string 100 | default = "pd-standard" 101 | } 102 | 103 | variable "enable_vault_ui" { 104 | description = "If true, enable the Vault UI" 105 | type = bool 106 | default = true 107 | } 108 | 109 | variable "network_name" { 110 | description = "The name of the VPC Network where all resources should be created." 111 | type = string 112 | default = "default" 113 | } 114 | -------------------------------------------------------------------------------- /examples/vault-consul-image/README.md: -------------------------------------------------------------------------------- 1 | # Vault and Consul Google Image 2 | 3 | This folder shows an example of how to use the [install-vault module](https://github.com/hashicorp/terraform-google-vault/tree/master/modules/install-vault) from this Module and 4 | the [install-consul](https://github.com/hashicorp/terraform-google-consul/tree/master/modules/install-consul) 5 | and [install-dnsmasq](https://github.com/hashicorp/terraform-google-consul/tree/master/modules/install-dnsmasq) modules 6 | from the Consul GCP Module with [Packer](https://www.packer.io/) to create a [Google Image]( 7 | https://cloud.google.com/compute/docs/images) that has Vault and Consul installed on top of: 8 | 9 | 1. Ubuntu 16.04 10 | 1. Ubuntu 18.04 11 | 12 | You can use this Google Image to deploy a [Vault cluster](https://www.vaultproject.io/) by using the [vault-cluster 13 | module](https://github.com/hashicorp/terraform-google-vault/tree/master/modules/vault-cluster). This Vault cluster will use Consul as its storage backend, so you can also use the 14 | same Google Image to deploy a separate [Consul server cluster](https://www.consul.io/) by using the [consul-cluster 15 | module](https://github.com/hashicorp/terraform-google-consul/tree/master/modules/consul-cluster). 16 | 17 | Check out the [vault-cluster-private](https://github.com/hashicorp/terraform-google-vault/tree/master/examples/vault-cluster-private) and 18 | [vault-cluster-public](https://github.com/hashicorp/terraform-google-vault/tree/master/examples/vault-cluster-public) examples for working sample code. For more info on Vault 19 | installation and configuration, check out the [install-vault](https://github.com/hashicorp/terraform-google-vault/tree/master/modules/install-vault) documentation. 20 | 21 | You can also use this example Packer template to build a Cloud Image that includes Enterprise versions of both Vault & Consul. See below for more 22 | information. 23 | 24 | ## Quick start 25 | 26 | To build the Vault and Consul Google Image: 27 | 28 | 1. `git clone` this repo to your computer. 29 | 30 | 1. Install [Packer](https://www.packer.io/). 31 | 32 | 1. Configure your environment's Google credentials using the [Google Cloud SDK](https://cloud.google.com/sdk/). 33 | 34 | 1. Use the [private-tls-cert module](https://github.com/hashicorp/terraform-google-vault/tree/master/modules/private-tls-cert) to generate a CA cert and public and private keys for a 35 | TLS cert: 36 | 37 | 1. Set the `dns_names` parameter to `vault.service.consul`. If you're using the [vault-cluster-public 38 | example](https://github.com/hashicorp/terraform-google-vault/tree/master/examples/vault-cluster-public) and want a public domain name (e.g. `vault.example.com`), add that 39 | domain name here too. 40 | 1. Set the `ip_addresses` to `127.0.0.1`. 41 | 1. For production usage, you should take care to protect the private key by encrypting it (see [Using TLS 42 | certs](https://github.com/hashicorp/terraform-google-vault/tree/master/modules/private-tls-cert#using-tls-certs) for more info). 43 | 44 | 1. Update the `variables` section of the `vault-consul.json` Packer template to configure the Project ID, Google Cloud Zone, 45 | and Consul and Vault versions you wish to use. Alternatively, you can pass in these values using `packer build vault-consul.json -var var_name=var_value ...` 46 | 47 | 1. Run `packer build vault-consul.json`. 48 | 49 | When the build finishes, it will output the ID of the new Google Image. To see how to deploy this Image, check out the 50 | [vault-cluster-private](https://github.com/hashicorp/terraform-google-vault/tree/master/examples/vault-cluster-private) and [vault-cluster-public](https://github.com/hashicorp/terraform-google-vault/tree/master/examples/vault-cluster-public) 51 | examples. 52 | 53 | 54 | 55 | 56 | ## Creating your own Packer template for production usage 57 | 58 | When creating your own Packer template for production usage, you can copy the example in this folder more or less 59 | exactly, except for one change: we recommend replacing the `file` provisioner with a call to `git clone` in the `shell` 60 | provisioner. Instead of: 61 | 62 | ```json 63 | { 64 | "provisioners": [{ 65 | "type": "file", 66 | "source": "{{template_dir}}/../../../terraform-google-vault", 67 | "destination": "/tmp" 68 | },{ 69 | "type": "shell", 70 | "inline": [ 71 | "/tmp/terraform-google-vault/modules/install-vault/install-vault --version {{user `vault_version`}}" 72 | ], 73 | "pause_before": "30s" 74 | }] 75 | } 76 | ``` 77 | 78 | Your code should look more like this: 79 | 80 | ```json 81 | { 82 | "provisioners": [{ 83 | "type": "shell", 84 | "inline": [ 85 | "git clone --branch https://github.com/hashicorp/terraform-google-vault.git /tmp/terraform-google-vault", 86 | "/tmp/terraform-google-vault/modules/install-vault/install-vault --version {{user `vault_version`}}" 87 | ], 88 | "pause_before": "30s" 89 | }] 90 | } 91 | ``` 92 | 93 | You should replace `` in the code above with the version of this Module that you want to use (see 94 | the [Releases Page](https://github.com/hashicorp/terraform-google-vault/releases) for all available versions). That's because for production usage, you should always 95 | use a fixed, known version of this Module, downloaded from the official Git repo. On the other hand, when you're 96 | just experimenting with the Module, it's OK to use a local checkout of the Module, uploaded from your own 97 | computer. 98 | 99 | ## Building an Image with the Enterprise versions installed 100 | 101 | To build the Enterprise Vault and Consul Google Image, simply repeat the steps described in the Quick Start, however make sure the following environment 102 | variables are set: 103 | 104 | - `VAULT_DOWNLOAD_URL` 105 | - `CONSUL_DOWNLOAD_URL` 106 | -------------------------------------------------------------------------------- /examples/vault-consul-image/tls/README.md: -------------------------------------------------------------------------------- 1 | # Example TLS Certificate Files 2 | 3 | ### Do NOT use these files in production! 4 | 5 | In a production setting, your TLS private key represents a critical secret. If it were stolen, its possessor could 6 | impersonate your Vault server! For that reason, do NOT use these TLS certificate files in a public setting. They are 7 | here only for convenience when building examples. 8 | 9 | ### Files 10 | 11 | The files in this folder are needed by Vault to accept HTTPS requests. They are: 12 | 13 | - **ca.crt.pem**: The public certificate of the Certificate Authority used to create these files. 14 | - **vault.crt.pem:** The TLS public certificate issued by the Certificate Authority of the Vault server. 15 | - **vault.key.pem:** The TLS private key that corresponds to the TLS public certificate. 16 | 17 | The TLS files are configured as follows: 18 | 19 | - The Vault Server may be reached via TLS at `vault.service.consul`, `vault.example.com`, or `127.0.0.1`. 20 | - The TLS certificate is valid until May 26, 2042. -------------------------------------------------------------------------------- /examples/vault-consul-image/tls/ca.crt.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDnTCCAoWgAwIBAgIRAMGFenE97vbqkhrN7fhHDxUwDQYJKoZIhvcNAQELBQAw 3 | aDEJMAcGA1UEBhMAMQkwBwYDVQQIEwAxCTAHBgNVBAcTADEJMAcGA1UEERMAMRIw 4 | EAYDVQQKEwlIYXNoaUNvcnAxCTAHBgNVBAsTADEbMBkGA1UEAxMSSGFzaGlDb3Jw 5 | IFZhdWx0IENBMB4XDTE3MDUyNzA0MjQzNVoXDTQyMDUyMTA0MjQzNVowaDEJMAcG 6 | A1UEBhMAMQkwBwYDVQQIEwAxCTAHBgNVBAcTADEJMAcGA1UEERMAMRIwEAYDVQQK 7 | EwlIYXNoaUNvcnAxCTAHBgNVBAsTADEbMBkGA1UEAxMSSGFzaGlDb3JwIFZhdWx0 8 | IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4+Ur+WKS8zypyF5l 9 | IbvuzIzrk4V77F6LaLbpcOvLZ16LGIwCnVRIQybCTRjSg75Cb3FDQofL8mbaHDwS 10 | G7JUnCXtHxlVUllIEDbJchva1RgGSBHJF2k4JEXI1O11amajoU1N9nmo8EMVu/tr 11 | cBNKp746i1DOSx23xmVVNSkNEFd91YcAyR51pwWOdv0zchCZrP39V6mFM2JF7/hU 12 | 7s0Y9hn5LCzU7jlsgxg8FI0O2wWvLZt2SjuW+g2oJnlYQjQHic7qyoEGmJGaBPTW 13 | KPcsH8IcUe1N33J2D5ejTQXwv3dRclLD5slzQbAEWVarWdZ69O5C3RIGu9gm0v7Q 14 | tMkJfQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAqQwDwYDVR0TAQH/BAUwAwEB/zAd 15 | BgNVHQ4EFgQUKr006sVeVWdjTWDzNV096v03sdswDQYJKoZIhvcNAQELBQADggEB 16 | AF9Il2A1Qoc6r1IQFBs2gE5bSHXVHiuU/ykFaBURU9WCPl9a7oIiKtqnHhhCHokm 17 | qAW4v5VoH7JvhgVxJx6DhgKw6GV1KLWmUIL12asBLfubjaCUeedNNFrO1VSkuA7l 18 | RDSnhzdPk6O4MFa71/Uwo3E91c3igHKfKdxQZ1CcR/iyx2duGTnQDTMkyO8iHzJI 19 | Cs9as+PZzibqlsnpgyUyviCkGYjqH2o/LTsTGKtSru+erPAN3smzfG/wZXRrHyyz 20 | wjid2nbi7gASZVXUocclU9zl253+8ALpGKlDkuvQUJSCEraC7jtMQx16W8ad2aXL 21 | LnnabgKz01Lzggpvfdal+a4= 22 | -----END CERTIFICATE----- -------------------------------------------------------------------------------- /examples/vault-consul-image/tls/vault.crt.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIID1zCCAr+gAwIBAgIQdQvxfTEZADMpi5RQmp1FCTANBgkqhkiG9w0BAQsFADBo 3 | MQkwBwYDVQQGEwAxCTAHBgNVBAgTADEJMAcGA1UEBxMAMQkwBwYDVQQREwAxEjAQ 4 | BgNVBAoTCUhhc2hpQ29ycDEJMAcGA1UECxMAMRswGQYDVQQDExJIYXNoaUNvcnAg 5 | VmF1bHQgQ0EwHhcNMTcwNTI3MDQyNDM1WhcNNDIwNTIxMDQyNDM1WjBqMQkwBwYD 6 | VQQGEwAxCTAHBgNVBAgTADEJMAcGA1UEBxMAMQkwBwYDVQQREwAxEjAQBgNVBAoT 7 | CUhhc2hpQ29ycDEJMAcGA1UECxMAMR0wGwYDVQQDExRIYXNoaUNvcnAgVmF1bHQg 8 | Q2VydDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKQT46pH/1Ljy+6V 9 | h6EKHsJoSBiC7DolQY4tPpPlZpNEEIgftYgq2KA9DJTYDzpLy3sHkUNQmc0eIfUy 10 | xNklHE5N6WcK9oozo4LNIPd4//Tn90maUGG3z4SIan8A+wVuAhz/t8BfuAdmOeax 11 | zjs9mqbqSPiKcBx5LFjWvt30Hq5idup4c/69v8ByimqHuS2PJtv/C6/b2NU6Bq/9 12 | Y5fC1Oxb2iONrhgTYHF44qHpxL6VeSwVxHzUhxe+EaCSbGvNuAe2i/yqffhy/LFu 13 | T15jsiaHfDhj4H3I16hLz/BN49NhNzhJeFG+S3S65JL4P35UJxaRLHu925myOntr 14 | 5QPJujsCAwEAAaN7MHkwDgYDVR0PAQH/BAQDAgWgMAwGA1UdEwEB/wQCMAAwHwYD 15 | VR0jBBgwFoAUKr006sVeVWdjTWDzNV096v03sdswOAYDVR0RBDEwL4IUdmF1bHQu 16 | c2VydmljZS5jb25zdWyCEXZhdWx0LmV4YW1wbGUuY29thwR/AAABMA0GCSqGSIb3 17 | DQEBCwUAA4IBAQCY3I2sbebudms8J9gSxbcTE0GoOpJJ7bBdYEKp7TmshrL8ni9L 18 | S9skHbKcpnn4ZHv9ofjtDJ1LfN1TWTHBTZP92GbAZPAl/TdNMOXdGRpsKBo2ElLy 19 | +RoQWm0YBofi9vkjaUSZMHN7LHZqOnaAEtHcBaRj7eTmOvitim+o5Tw6tpjuXEYf 20 | Y9BgwhlXsm5Y4n3ownbU7A5PsKn0G1/0xykR6UJqYlpEypiZWxuaWQNW/B1cJxQv 21 | ghDwg05HzCccmxMAJLgZrPpA6FclQD5mfJN6+0AaWv1iZFz7LaaXR2QV41PtZ83d 22 | EKoEz9uM8uqoRhQR874axQaBoC4CJNc09J4U 23 | -----END CERTIFICATE----- -------------------------------------------------------------------------------- /examples/vault-consul-image/tls/vault.key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEpAIBAAKCAQEApBPjqkf/UuPL7pWHoQoewmhIGILsOiVBji0+k+Vmk0QQiB+1 3 | iCrYoD0MlNgPOkvLeweRQ1CZzR4h9TLE2SUcTk3pZwr2ijOjgs0g93j/9Of3SZpQ 4 | YbfPhIhqfwD7BW4CHP+3wF+4B2Y55rHOOz2apupI+IpwHHksWNa+3fQermJ26nhz 5 | /r2/wHKKaoe5LY8m2/8Lr9vY1ToGr/1jl8LU7FvaI42uGBNgcXjioenEvpV5LBXE 6 | fNSHF74RoJJsa824B7aL/Kp9+HL8sW5PXmOyJod8OGPgfcjXqEvP8E3j02E3OEl4 7 | Ub5LdLrkkvg/flQnFpEse73bmbI6e2vlA8m6OwIDAQABAoIBAF0O8sb3QraOgHF2 8 | 2Y/an4t/fbR5POXVj5LF0oIrT7wilIXABkOOmYJ4XZRl3m0f4+6JYjgdlL2jY3sg 9 | KklGJQG8aq6Ipz/G/ewHz7TMKc+LaNOT9BcYG1h9znjt43E27XfpCRzQrR11O02+ 10 | dsteq0IUCwL78Y4Uo7RXR7W26VfZlEDKhIjbuNSxlU4BnAD7e/HsvB5pmPXsbj4O 11 | 7dkiW6/6XJPZdSpSdc5C9v0nToTw31KpFqODmxL2Iy0kpWvOLJZKIW20cKFSCjo8 12 | 4gu5eog4liQn+wpG9/Czhd/XfXxkg8MdOjpzIlkkC4lUM53rxPmlfbBtiA85um/6 13 | KpZ7M3ECgYEAzBRroR2hrKBo9lqx1Ein6PNEmrLbfhVOXHQZvlczcicO1WeLRbZ9 14 | lrHNzLlVRkY4UiS10VfYUhjX4uLU6GTHYotZ+dJ+B6eVr1qf/K8DWJjhX98CQ1hZ 15 | RdnnQEdIdC7SqPhDo9PKcc7WjOTq1LwawTQneNFDDol6PgBMOS8X9MUCgYEAzdIp 16 | DWzraU9EenUyNgkz55MEsxyryHefRSqtQB53d7Eic4kSJSKm2pcKtrKxJbgTSqAe 17 | MeWs+MT7i8QCScWSJJU7xpP6FhowTABEPrUxR8lFbLfxV9EwzCTKXWpBRwI5SkZ2 18 | YuxOClpZgRAdf8qlCjgEvSvCW0C4aKCA8XTr4v8CgYEAif380NKScYF9t6aXu+zs 19 | 7I0hhGEQHW8Wr1kp1xRrivJyC0aaW6cLwIu1lopy6LOufYypDEaT1N9LivTJ9eG/ 20 | GBkV2+DCqzZb9lgW+er1HkExk3vdsd/ZbWvr/AC3Myg99Vb5lZsttkqftGFNkE8o 21 | 4B792anV58x5xda5s6juT+kCgYEAuEIQDHvZGJMlO/gVdniwpf1hNLRTOYmV5Yb6 22 | SS+9RkMnE4W/38zw0TptFfhNTPZJFwLXZVY3jxJSG+LjJYYhemy7ceBiAE17tV57 23 | uiPeNWUDqPvXrQWTCP9ax1xrihV8knkYXSEuEGioPjneHtyc+dQCshQt0CHVtZ0T 24 | Mpa44qMCgYAExVJEJAsz5rL09ku9IcdUc/b72Xsl2S83esDmohKLt8M/HDaiT9nC 25 | j3pXg4UBIXN+emhR5Lbh+R4QBrclnz3kxuiTc3JEHXAkyrL+DOJnED1qggNKz1Dj 26 | dh5t7ysHNq31750ZssKMoaDj1V1mzbW+qZ3NVbpMZrKRkVs+ixe8Hg== 27 | -----END RSA PRIVATE KEY----- -------------------------------------------------------------------------------- /examples/vault-consul-image/vault-consul.json: -------------------------------------------------------------------------------- 1 | { 2 | "min_packer_version": "1.0.4", 3 | "variables": { 4 | "project_id": null, 5 | "zone": null, 6 | "vault_version": "0.11.5", 7 | "consul_module_version": "v0.3.1", 8 | "consul_version": "1.3.1", 9 | "consul_download_url": "{{env `CONSUL_DOWNLOAD_URL`}}", 10 | "vault_download_url": "{{env `VAULT_DOWNLOAD_URL`}}", 11 | "ca_public_key_path": null, 12 | "tls_public_key_path": null, 13 | "tls_private_key_path": null 14 | }, 15 | "builders": [{ 16 | "name": "ubuntu16-image", 17 | "type": "googlecompute", 18 | "project_id": "{{user `project_id`}}", 19 | "source_image_family": "ubuntu-1604-lts", 20 | "zone": "{{user `zone`}}", 21 | "image_name": "vault-consul-ubuntu16-{{uuid | clean_image_name}}", 22 | "image_family": "vault-consul", 23 | "ssh_username": "ubuntu" 24 | },{ 25 | "name": "ubuntu18-image", 26 | "type": "googlecompute", 27 | "project_id": "{{user `project_id`}}", 28 | "source_image_family": "ubuntu-1604-lts", 29 | "zone": "{{user `zone`}}", 30 | "image_name": "vault-consul-ubuntu18-{{uuid | clean_image_name}}", 31 | "image_family": "vault-consul", 32 | "ssh_username": "ubuntu" 33 | }], 34 | "provisioners": [{ 35 | "type": "shell", 36 | "inline": ["mkdir -p /tmp/terraform-google-vault/modules"] 37 | },{ 38 | "type": "file", 39 | "source": "{{template_dir}}/../../modules/", 40 | "destination": "/tmp/terraform-google-vault/modules", 41 | "pause_before": "30s" 42 | },{ 43 | "type": "shell", 44 | "inline": [ 45 | "if test -n \"{{user `vault_download_url`}}\"; then", 46 | " /tmp/terraform-google-vault/modules/install-vault/install-vault --download-url {{user `vault_download_url`}};", 47 | "else", 48 | " /tmp/terraform-google-vault/modules/install-vault/install-vault --version {{user `vault_version`}};", 49 | "fi", 50 | "sudo /tmp/terraform-google-vault/modules/install-nginx/install-nginx --signing-key /tmp/terraform-google-vault/modules/install-nginx/nginx_signing.key" 51 | ] 52 | },{ 53 | "type": "file", 54 | "source": "{{user `ca_public_key_path`}}", 55 | "destination": "/tmp/ca.crt.pem" 56 | },{ 57 | "type": "file", 58 | "source": "{{user `tls_public_key_path`}}", 59 | "destination": "/tmp/vault.crt.pem" 60 | },{ 61 | "type": "file", 62 | "source": "{{user `tls_private_key_path`}}", 63 | "destination": "/tmp/vault.key.pem" 64 | },{ 65 | "type": "shell", 66 | "inline": [ 67 | "sudo mv /tmp/ca.crt.pem /opt/vault/tls/", 68 | "sudo mv /tmp/vault.crt.pem /opt/vault/tls/", 69 | "sudo mv /tmp/vault.key.pem /opt/vault/tls/", 70 | "sudo chown vault:vault /opt/vault/tls/*", 71 | "sudo chmod 600 /opt/vault/tls/*", 72 | "sudo /tmp/terraform-google-vault/modules/update-certificate-store/update-certificate-store --cert-file-path /opt/vault/tls/ca.crt.pem" 73 | ] 74 | },{ 75 | "type": "shell", 76 | "inline": [ 77 | "sudo mkdir -p /opt/gruntwork", 78 | "git clone --branch v0.0.3 https://github.com/gruntwork-io/bash-commons.git /tmp/bash-commons", 79 | "sudo cp -r /tmp/bash-commons/modules/bash-commons/src /opt/gruntwork/bash-commons" 80 | ] 81 | },{ 82 | "type": "shell", 83 | "inline": [ 84 | "git clone --branch {{user `consul_module_version`}} https://@github.com/hashicorp/terraform-google-consul.git /tmp/terraform-google-consul", 85 | "if test -n \"{{user `consul_download_url`}}\"; then", 86 | " /tmp/terraform-google-consul/modules/install-consul/install-consul --download-url {{user `consul_download_url`}};", 87 | "else", 88 | " /tmp/terraform-google-consul/modules/install-consul/install-consul --version {{user `consul_version`}}", 89 | "fi", 90 | "/tmp/terraform-google-consul/modules/install-dnsmasq/install-dnsmasq" 91 | ] 92 | }] 93 | } 94 | -------------------------------------------------------------------------------- /examples/vault-examples-helper/README.md: -------------------------------------------------------------------------------- 1 | # Vault Examples Helper 2 | 3 | This folder contains a helper script called `vault-examples-helper.sh` for working with the 4 | [vault-cluster-private](https://github.com/hashicorp/terraform-google-vault/tree/master/examples/vault-cluster-private) and [vault-cluster-public](https://github.com/hashicorp/terraform-google-vault/tree/master/examples/vault-cluster-public) 5 | examples. After running `terraform apply` on one of the examples, if you run `vault-examples-helper.sh`, it will 6 | automatically: 7 | 8 | 1. Wait for the Vault server cluster to come up. 9 | 1. Print out the IP addresses of the Vault servers. 10 | 1. Print out some example commands you can run against your Vault servers. 11 | 12 | Please note that this helper script only works with the root example in this repo because that is the only example where 13 | Vault servers are publicly accessible by default. This is OK for testing and learning, but for production usage, we strongly 14 | recommend running Vault servers that are not accessible from the public Internet. 15 | -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # DEPLOY A VAULT CLUSTER IN GOOGLE CLOUD 3 | # This is an example of how to use the vault-cluster module to deploy a public Vault cluster in GCP. A public Vault 4 | # cluster is NOT recommended for production usage, but it's the easiest way to try things out. For production usage, 5 | # see the vault-cluster-private example, or if necessary, the vault-cluster-public example. Note that this Vault cluster 6 | # uses Consul, running in a separate cluster, as its High Availability backend. 7 | # --------------------------------------------------------------------------------------------------------------------- 8 | 9 | provider "google" { 10 | region = var.gcp_region 11 | project = var.gcp_project_id 12 | } 13 | 14 | terraform { 15 | # The modules used in this example have been updated with 0.12 syntax, which means the example is no longer 16 | # compatible with any versions below 0.12. 17 | required_version = ">= 0.12" 18 | } 19 | 20 | # --------------------------------------------------------------------------------------------------------------------- 21 | # DEPLOY THE VAULT SERVER CLUSTER 22 | # --------------------------------------------------------------------------------------------------------------------- 23 | 24 | module "vault_cluster" { 25 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 26 | # to a specific version of the modules, such as the following example: 27 | # source = "git::git@github.com:hashicorp/terraform-google-vault.git//modules/vault-cluster?ref=v0.0.1" 28 | source = "./modules/vault-cluster" 29 | 30 | gcp_project_id = var.gcp_project_id 31 | gcp_region = var.gcp_region 32 | 33 | cluster_name = var.vault_cluster_name 34 | cluster_size = var.vault_cluster_size 35 | cluster_tag_name = var.vault_cluster_name 36 | machine_type = var.vault_cluster_machine_type 37 | 38 | image_project_id = var.image_project_id 39 | source_image = var.vault_source_image 40 | startup_script = data.template_file.startup_script_vault.rendered 41 | 42 | gcs_bucket_name = var.vault_cluster_name 43 | gcs_bucket_location = var.gcs_bucket_location 44 | gcs_bucket_storage_class = var.gcs_bucket_class 45 | gcs_bucket_force_destroy = var.gcs_bucket_force_destroy 46 | 47 | root_volume_disk_size_gb = var.root_volume_disk_size_gb 48 | root_volume_disk_type = var.root_volume_disk_type 49 | 50 | # Even when the Vault cluster is pubicly accessible via a Load Balancer, we still make the Vault nodes themselves 51 | # private to improve the overall security posture. Note that the only way to reach private nodes via SSH is to first 52 | # SSH into another node that is not private. 53 | assign_public_ip_addresses = true 54 | 55 | # To enable external access to the Vault Cluster, enter the approved CIDR Blocks or tags below. 56 | # We enable health checks from the Consul Server cluster to Vault. 57 | allowed_inbound_cidr_blocks_api = ["0.0.0.0/0"] 58 | 59 | allowed_inbound_tags_api = [var.consul_server_cluster_name] 60 | } 61 | 62 | # Render the Startup Script that will run on each Vault Instance on boot. This script will configure and start Vault. 63 | data "template_file" "startup_script_vault" { 64 | template = file( 65 | "${path.module}/examples/root-example/startup-script-vault.sh", 66 | ) 67 | 68 | vars = { 69 | consul_cluster_tag_name = var.consul_server_cluster_name 70 | vault_cluster_tag_name = var.vault_cluster_name 71 | enable_vault_ui = var.enable_vault_ui ? "--enable-ui" : "" 72 | } 73 | } 74 | 75 | # --------------------------------------------------------------------------------------------------------------------- 76 | # DEPLOY THE CONSUL SERVER CLUSTER 77 | # --------------------------------------------------------------------------------------------------------------------- 78 | 79 | module "consul_cluster" { 80 | source = "git::git@github.com:hashicorp/terraform-google-consul.git//modules/consul-cluster?ref=v0.4.0" 81 | 82 | gcp_project_id = var.gcp_project_id 83 | gcp_region = var.gcp_region 84 | 85 | cluster_name = var.consul_server_cluster_name 86 | cluster_tag_name = var.consul_server_cluster_name 87 | cluster_size = var.consul_server_cluster_size 88 | 89 | source_image = var.consul_server_source_image 90 | machine_type = var.consul_server_machine_type 91 | 92 | startup_script = data.template_file.startup_script_consul.rendered 93 | 94 | # In a production setting, we strongly recommend only launching a Consul Server cluster as private nodes. 95 | # Note that the only way to reach private nodes via SSH is to first SSH into another node that is not private. 96 | assign_public_ip_addresses = true 97 | 98 | allowed_inbound_tags_dns = [var.vault_cluster_name] 99 | allowed_inbound_tags_http_api = [var.vault_cluster_name] 100 | } 101 | 102 | # This Startup Script will run at boot configure and start Consul on the Consul Server cluster nodes 103 | data "template_file" "startup_script_consul" { 104 | template = file( 105 | "${path.module}/examples/root-example/startup-script-consul.sh", 106 | ) 107 | 108 | vars = { 109 | cluster_tag_name = var.consul_server_cluster_name 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /modules/install-nginx/README.md: -------------------------------------------------------------------------------- 1 | # Nginx Install Script 2 | 3 | This folder contains a script for installing the [nginx](https://nginx.org) binary on a server. This script is motivated 4 | by the need to expose an HTTP health check endpoint for Vault while requiring that all other Vault endpoints are accessible 5 | via HTTPS only. This need arises from a [Google Cloud limitation]( 6 | https://github.com/terraform-providers/terraform-provider-google/issues/18) where only HTTP Health Checks can be associated 7 | with a Target Pool, not HTTPS Health Checks. 8 | 9 | You can use this script, along with the [run-nginx script](https://github.com/hashicorp/terraform-google-vault/tree/master/modules/run-nginx) it installs, to create a [Google Image]( 10 | https://cloud.google.com/compute/docs/images) that runs nginx alongside Vault. 11 | 12 | This script has been tested on the following operating systems: 13 | 14 | * Ubuntu 16.04 15 | * Ubuntu 18.04 16 | 17 | There is a good chance it will work on other flavors of Debian as well. 18 | 19 | ## Why nginx? 20 | 21 | Our use case requires that we setup a simple HTTP forwarding proxy, so we had several options available to us. 22 | 23 | We considered using the [Python SimpleHttpServer](https://docs.python.org/2/library/simplehttpserver.html) because we 24 | can expect many OS's to come pre-installed with Python. However, anecdotal experience taught us that this server may fail 25 | when receiving more than one request per second, so this was eliminated as being too brittle. 26 | 27 | We considered using the [HTTP Daemon included in the BusyBox package](https://wiki.openwrt.org/doc/howto/http.httpd), 28 | which has a minimal footprint and is optimized for embedded systems. But BusyBox httpd is not well-documented and not 29 | widely used, making it more likely to fall prey to a vulnerability. 30 | 31 | So we settled on nginx, a massively popular, mature http server as giving us a nice balance of usability, familiarity, 32 | performance, and minimal security exposure. The major downside of nginx for our use case is that Nginx comes built in 33 | with its own [process management](https://www.nginx.com/blog/inside-nginx-how-we-designed-for-performance-scale/), however 34 | we wish to have Nginx managed by our preferred process supervisor, supervisord. Getting nginx to work with supervisord 35 | is somewhat cumbersome, but ultimately gives us a clean management model. 36 | 37 | ## Quick start 38 | 39 | To install the Nginx binary, use `git` to clone this repository at a specific tag (see the [releases page]( 40 | ../../../../releases) for all available tags) and run the `install-nginx` script: 41 | 42 | ``` 43 | git clone --branch https://github.com/hashicorp/terraform-google-vault.git 44 | terraform-google-vault/modules/install-nginx/install-nginx --version 0.5.4 45 | ``` 46 | 47 | The `install-nginx` script will install the nginx binary and the [run-nginx script](https://github.com/hashicorp/terraform-google-vault/tree/master/modules/run-nginx). 48 | You can then run the `run-nginx` script when the server is booting to configure nginx for use with supervisord and as a 49 | simple HTTP proxy, and start the service. 50 | 51 | We recommend running the `install-nginx` script as part of a [Packer](https://www.packer.io/) template to create a 52 | Vault [Google Image](https://cloud.google.com/compute/docs/images) (see the [vault-consul-image example]( 53 | /examples/vault-consul-image) for sample code). You can then deploy the Image across a Managed Instance Group using the 54 | [vault-cluster module](https://github.com/hashicorp/terraform-google-vault/tree/master/modules/vault-cluster) (see the [vault-cluster-public](https://github.com/hashicorp/terraform-google-vault/tree/master/examples/vault-cluster-public) and 55 | [vault-cluster-private](https://github.com/hashicorp/terraform-google-vault/tree/master/examples/vault-cluster-private) examples for fully-working sample code). 56 | 57 | 58 | 59 | 60 | ## Command line Arguments 61 | 62 | The `install-nginx` script accepts the following REQUIRED arguments: 63 | 64 | * `signing-key PATH`: Verify the integrity of the nginx debian packages using the PGP key located at PATH. 65 | 66 | The `install-nginx` script accepts the following OPTIONAL arguments: 67 | 68 | * `path DIR`: Install nginx into folder DIR. 69 | * `user USER`: The install dirs will be owned by user USER. 70 | * `pid-folder DIR`: The PID file created and managed by Nginx will live in DIR. 71 | 72 | Example: 73 | 74 | ``` 75 | install-nginx --signing-key /path/to/nginx-signing-key 76 | ``` 77 | 78 | 79 | 80 | ## How it works 81 | 82 | The `install-nginx` script does the following: 83 | 84 | 1. [Create a user and folders for nginx](#create-a-user-and-folders-for-nginx) 85 | 1. [Create PID folder for nginx](#create-the-pid-folder-for-nginx) 86 | 1. [Download nginx binary](#download-nginx-binary) 87 | 1. [Install nginx](#install-nginx) 88 | 89 | 90 | ### Create a user and folders for nginx 91 | 92 | Create an OS user named `ngninx`. Create the following folders, all owned by user `nginx`: 93 | 94 | * `/opt/nginx`: base directory for nginx data (configurable via the `--path` argument). 95 | * `/opt/nginx/bin`: directory for nginx binaries. 96 | * `/opt/nginx/config`: directory where nginx looks up configuration. 97 | * `/opt/nginx/log`: directory where the nginx will store log files. Note that these logs pertain to "nginx startup" 98 | and "nginx shutdown." For nginx usage logs, see `/var/log/nginx`. 99 | 100 | ### Create the PID folder for nginx 101 | 102 | Because Nginx manages its own processes, it creates a file (usually in `/var/run`) that stores the ID of the nginx process. 103 | But `/var/run` is only writable by the `root` user, so we create a special folder owned by the `nginx` user where this 104 | file can be written. But since `/var/run` is mounted with a `tmpfs` file system, this entire directory will be cleared 105 | on boot, so the proper way to create this folder isn't to create it now, but to write an instruction that will run on 106 | boot that will create the desired folder. 107 | 108 | ### Download nginx binary 109 | 110 | Download the latest stable nginx package from the debian apt repo maintained by nginx, and extract the binary `nginx` 111 | from it. 112 | 113 | ### Install nginx 114 | 115 | Place the `nginx` binary in `/opt/nginx/bin` and make it accessible in the `PATH`. 116 | 117 | 118 | ## Why use Git to install this code? 119 | 120 | We needed an easy way to install these scripts that satisfied a number of requirements, including working on a variety 121 | of operating systems and supported versioning. Our current solution is to use `git`, but this may change in the future. 122 | See [Package Managers](https://github.com/hashicorp/terraform-aws-consul/blob/master/_docs/package-managers.md) for 123 | a full discussion of the requirements, trade-offs, and why we picked `git`. 124 | -------------------------------------------------------------------------------- /modules/install-nginx/nginx_signing.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | Version: GnuPG v2.0.22 (GNU/Linux) 3 | 4 | mQENBE5OMmIBCAD+FPYKGriGGf7NqwKfWC83cBV01gabgVWQmZbMcFzeW+hMsgxH 5 | W6iimD0RsfZ9oEbfJCPG0CRSZ7ppq5pKamYs2+EJ8Q2ysOFHHwpGrA2C8zyNAs4I 6 | QxnZZIbETgcSwFtDun0XiqPwPZgyuXVm9PAbLZRbfBzm8wR/3SWygqZBBLdQk5TE 7 | fDR+Eny/M1RVR4xClECONF9UBB2ejFdI1LD45APbP2hsN/piFByU1t7yK2gpFyRt 8 | 97WzGHn9MV5/TL7AmRPM4pcr3JacmtCnxXeCZ8nLqedoSuHFuhwyDnlAbu8I16O5 9 | XRrfzhrHRJFM1JnIiGmzZi6zBvH0ItfyX6ttABEBAAG0KW5naW54IHNpZ25pbmcg 10 | a2V5IDxzaWduaW5nLWtleUBuZ2lueC5jb20+iQE+BBMBAgAoAhsDBgsJCAcDAgYV 11 | CAIJCgsEFgIDAQIeAQIXgAUCV2K1+AUJGB4fQQAKCRCr9b2Ce9m/YloaB/9XGrol 12 | kocm7l/tsVjaBQCteXKuwsm4XhCuAQ6YAwA1L1UheGOG/aa2xJvrXE8X32tgcTjr 13 | KoYoXWcdxaFjlXGTt6jV85qRguUzvMOxxSEM2Dn115etN9piPl0Zz+4rkx8+2vJG 14 | F+eMlruPXg/zd88NvyLq5gGHEsFRBMVufYmHtNfcp4okC1klWiRIRSdp4QY1wdrN 15 | 1O+/oCTl8Bzy6hcHjLIq3aoumcLxMjtBoclc/5OTioLDwSDfVx7rWyfRhcBzVbwD 16 | oe/PD08AoAA6fxXvWjSxy+dGhEaXoTHjkCbz/l6NxrK3JFyauDgU4K4MytsZ1HDi 17 | MgMW8hZXxszoICTTiQEcBBABAgAGBQJOTkelAAoJEKZP1bF62zmo79oH/1XDb29S 18 | YtWp+MTJTPFEwlWRiyRuDXy3wBd/BpwBRIWfWzMs1gnCjNjk0EVBVGa2grvy9Jtx 19 | JKMd6l/PWXVucSt+U/+GO8rBkw14SdhqxaS2l14v6gyMeUrSbY3XfToGfwHC4sa/ 20 | Thn8X4jFaQ2XN5dAIzJGU1s5JA0tjEzUwCnmrKmyMlXZaoQVrmORGjCuH0I0aAFk 21 | RS0UtnB9HPpxhGVbs24xXZQnZDNbUQeulFxS4uP3OLDBAeCHl+v4t/uotIad8v6J 22 | SO93vc1evIje6lguE81HHmJn9noxPItvOvSMb2yPsE8mH4cJHRTFNSEhPW6ghmlf 23 | Wa9ZwiVX5igxcvaIRgQQEQIABgUCTk5b0gAKCRDs8OkLLBcgg1G+AKCnacLb/+W6 24 | cflirUIExgZdUJqoogCeNPVwXiHEIVqithAM1pdY/gcaQZmIRgQQEQIABgUCTk5f 25 | YQAKCRCpN2E5pSTFPnNWAJ9gUozyiS+9jf2rJvqmJSeWuCgVRwCcCUFhXRCpQO2Y 26 | Va3l3WuB+rgKjsQ= 27 | =EWWI 28 | -----END PGP PUBLIC KEY BLOCK----- 29 | -------------------------------------------------------------------------------- /modules/install-vault/README.md: -------------------------------------------------------------------------------- 1 | # Vault Install Script 2 | 3 | This folder contains a script for installing Vault and its dependencies. You can use this script, along with the 4 | [run-vault script](https://github.com/hashicorp/terraform-google-vault/tree/master/modules/run-vault) it installs, to create a Vault [Google Image]( 5 | https://cloud.google.com/compute/docs/images) that can be deployed in [Google Cloud](https://cloud.google.com) across a 6 | Managed Instance Group using the [vault-cluster module](https://github.com/hashicorp/terraform-google-vault/tree/master/modules/vault-cluster). 7 | 8 | This script has been tested on the following operating systems: 9 | 10 | * Ubuntu 16.04 11 | * Ubuntu 18.04 12 | 13 | There is a good chance it will work on other flavors of Debian as well. 14 | 15 | 16 | 17 | ## Quick start 18 | 19 | To install Vault, use `git` to clone this repository at a specific tag (see the [releases page](../../../../releases) 20 | for all available tags) and run the `install-vault` script: 21 | 22 | ``` 23 | git clone --branch https://github.com/hashicorp/terraform-google-vault.git 24 | terraform-google-vault/modules/install-vault/install-vault --version 0.5.4 25 | ``` 26 | 27 | The `install-vault` script will install Vault, its dependencies, and the [run-vault script](https://github.com/hashicorp/terraform-google-vault/tree/master/modules/run-vault). 28 | You can then run the `run-vault` script when the server is booting to start Vault. 29 | 30 | We recommend running the `install-vault` script as part of a [Packer](https://www.packer.io/) template to create a 31 | Vault [Google Image](https://cloud.google.com/compute/docs/images) (see the [vault-consul-image example]( 32 | /examples/vault-consul-image) for sample code). You can then deploy the Image across a Managed Instance Group using the 33 | [vault-cluster module](https://github.com/hashicorp/terraform-google-vault/tree/master/modules/vault-cluster) (see the [vault-cluster-public](https://github.com/hashicorp/terraform-google-vault/tree/master/examples/vault-cluster-public) and 34 | [vault-cluster-private](https://github.com/hashicorp/terraform-google-vault/tree/master/examples/vault-cluster-private) examples for fully-working sample code). 35 | 36 | 37 | 38 | 39 | ## Command line Arguments 40 | 41 | The `install-vault` script accepts the following arguments: 42 | 43 | * `version VERSION`: Install Vault version VERSION. Required. 44 | * `path DIR`: Install Vault into folder DIR. Optional. 45 | * `user USER`: The install dirs will be owned by user USER. Optional. 46 | 47 | Example: 48 | 49 | ``` 50 | install-vault --version 0.8.2 51 | ``` 52 | 53 | 54 | 55 | ## How it works 56 | 57 | The `install-vault` script does the following: 58 | 59 | 1. [Create a user and folders for Vault](#create-a-user-and-folders-for-vault) 60 | 1. [Install Vault binaries and scripts](#install-vault-binaries-and-scripts) 61 | 1. [Configure mlock](#configure-mlock) 62 | 1. [Install supervisord](#install-supervisord) 63 | 1. [Follow-up tasks](#follow-up-tasks) 64 | 65 | 66 | ### Create a user and folders for Vault 67 | 68 | Create an OS user named `vault`. Create the following folders, all owned by user `vault`: 69 | 70 | * `/opt/vault`: base directory for Vault data (configurable via the `--path` argument). 71 | * `/opt/vault/bin`: directory for Vault binaries. 72 | * `/opt/vault/data`: directory where the Vault agent can store state. 73 | * `/opt/vault/config`: directory where the Vault agent looks up configuration. 74 | * `/opt/vault/log`: directory where the Vault agent will store log files. 75 | * `/opt/vault/tls`: directory where the Vault will look for TLS certs. 76 | 77 | 78 | ### Install Vault binaries and scripts 79 | 80 | Install the following: 81 | 82 | * `vault`: Download the Vault zip file from the [downloads page](https://www.vaultproject.io/downloads.html) (the 83 | version number is configurable via the `--version` argument), and extract the `vault` binary into 84 | `/opt/vault/bin`. Add a symlink to the `vault` binary in `/usr/local/bin`. 85 | * `run-vault`: Copy the [run-vault script](https://github.com/hashicorp/terraform-google-vault/tree/master/modules/run-vault) into `/opt/vault/bin`. 86 | 87 | 88 | ### Configure mlock 89 | 90 | Give Vault permissions to make the `mlock` (memory lock) syscall. This syscall is used to prevent the OS from swapping 91 | Vault's memory to disk. For more info, see: https://www.vaultproject.io/docs/configuration/#disable_mlock. 92 | 93 | 94 | ### Install supervisord 95 | 96 | Install [supervisord](http://supervisord.org/). We use it as a cross-platform supervisor to ensure Vault is started 97 | whenever the system boots and restarted if the Vault process crashes. 98 | 99 | 100 | ### Follow-up tasks 101 | 102 | After the `install-vault` script finishes running, you may wish to do the following: 103 | 104 | 1. If you have custom Vault config (`.hcl`) files, you may want to copy them into the config directory (default: 105 | `/opt/vault/config`). 106 | 1. If `/usr/local/bin` isn't already part of `PATH`, you should add it so you can run the `vault` command without 107 | specifying the full path. 108 | 109 | 110 | 111 | ## Why use Git to install this code? 112 | 113 | 114 | 115 | We needed an easy way to install these scripts that satisfied a number of requirements, including working on a variety 116 | of operating systems and supported versioning. Our current solution is to use `git`, but this may change in the future. 117 | See [Package Managers](https://github.com/hashicorp/terraform-aws-consul/blob/master/_docs/package-managers.md) for 118 | a full discussion of the requirements, trade-offs, and why we picked `git`. 119 | -------------------------------------------------------------------------------- /modules/install-vault/supervisor-initd-script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # supervisord Startup script for the Supervisor process control system 4 | # 5 | # Author: Mike McGrath (based off yumupdatesd) 6 | # Jason Koppe adjusted to read sysconfig, 7 | # use supervisord tools to start/stop, conditionally wait 8 | # for child processes to shutdown, and startup later 9 | # Erwan Queffelec 10 | # make script LSB-compliant 11 | # 12 | # chkconfig: 345 83 04 13 | # description: Supervisor is a client/server system that allows \ 14 | # its users to monitor and control a number of processes on \ 15 | # UNIX-like operating systems. 16 | # processname: supervisord 17 | # config: /etc/supervisord.conf 18 | # config: /etc/sysconfig/supervisord 19 | # pidfile: /var/run/supervisord.pid 20 | # 21 | ### BEGIN INIT INFO 22 | # Provides: supervisord 23 | # Required-Start: $all 24 | # Required-Stop: $all 25 | # Short-Description: start and stop Supervisor process control system 26 | # Description: Supervisor is a client/server system that allows 27 | # its users to monitor and control a number of processes on 28 | # UNIX-like operating systems. 29 | ### END INIT INFO 30 | 31 | # Source function library 32 | . /etc/rc.d/init.d/functions 33 | 34 | # Source system settings 35 | if [ -f /etc/sysconfig/supervisord ]; then 36 | . /etc/sysconfig/supervisord 37 | fi 38 | 39 | # Path to the supervisorctl script, server binary, 40 | # and short-form for messages. 41 | supervisorctl=/usr/local/bin/supervisorctl 42 | supervisord=${SUPERVISORD-/usr/local/bin/supervisord} 43 | prog=supervisord 44 | pidfile=${PIDFILE-/tmp/supervisord.pid} 45 | lockfile=${LOCKFILE-/var/lock/subsys/supervisord} 46 | STOP_TIMEOUT=${STOP_TIMEOUT-60} 47 | OPTIONS="${OPTIONS--c /etc/supervisor/supervisord.conf}" 48 | RETVAL=0 49 | 50 | start() { 51 | echo -n $"Starting $prog: " 52 | daemon --pidfile=${pidfile} $supervisord $OPTIONS 53 | RETVAL=$? 54 | echo 55 | if [ $RETVAL -eq 0 ]; then 56 | touch ${lockfile} 57 | $supervisorctl $OPTIONS status 58 | fi 59 | return $RETVAL 60 | } 61 | 62 | stop() { 63 | echo -n $"Stopping $prog: " 64 | killproc -p ${pidfile} -d ${STOP_TIMEOUT} $supervisord 65 | RETVAL=$? 66 | echo 67 | [ $RETVAL -eq 0 ] && rm -rf ${lockfile} ${pidfile} 68 | } 69 | 70 | reload() { 71 | echo -n $"Reloading $prog: " 72 | LSB=1 killproc -p $pidfile $supervisord -HUP 73 | RETVAL=$? 74 | echo 75 | if [ $RETVAL -eq 7 ]; then 76 | failure $"$prog reload" 77 | else 78 | $supervisorctl $OPTIONS status 79 | fi 80 | } 81 | 82 | restart() { 83 | stop 84 | start 85 | } 86 | 87 | case "$1" in 88 | start) 89 | start 90 | ;; 91 | stop) 92 | stop 93 | ;; 94 | status) 95 | status -p ${pidfile} $supervisord 96 | RETVAL=$? 97 | [ $RETVAL -eq 0 ] && $supervisorctl $OPTIONS status 98 | ;; 99 | restart) 100 | restart 101 | ;; 102 | condrestart|try-restart) 103 | if status -p ${pidfile} $supervisord >&/dev/null; then 104 | stop 105 | start 106 | fi 107 | ;; 108 | force-reload|reload) 109 | reload 110 | ;; 111 | *) 112 | echo $"Usage: $prog {start|stop|restart|condrestart|try-restart|force-reload|reload}" 113 | RETVAL=2 114 | esac 115 | 116 | exit $RETVAL -------------------------------------------------------------------------------- /modules/install-vault/supervisord.conf: -------------------------------------------------------------------------------- 1 | ; supervisor config file 2 | ; 3 | ; For more information on the config file, please see: 4 | ; http://supervisord.org/configuration.html 5 | ; 6 | ; Notes: 7 | ; - Shell expansion ("~" or "$HOME") is not supported. Environment 8 | ; variables can be expanded using this syntax: "%(ENV_HOME)s". 9 | ; - Comments must have a leading space: "a=b ;comment" not "a=b;comment". 10 | 11 | [unix_http_server] 12 | file=/var/run/supervisor.sock ; (the path to the socket file) 13 | chmod=0700 ; sockef file mode (default 0700) 14 | 15 | [supervisord] 16 | logfile=/var/log/supervisor/supervisord.log ; (main log file;default $CWD/supervisord.log) 17 | pidfile=/var/run/supervisord.pid ; (supervisord pidfile;default supervisord.pid) 18 | childlogdir=/var/log/supervisor ; ('AUTO' child log dir, default $TEMP) 19 | logfile_maxbytes=50MB ; (max main logfile bytes b4 rotation;default 50MB) 20 | logfile_backups=10 ; (num of main logfile rotation backups;default 10) 21 | loglevel=info ; (log level;default info; others: debug,warn,trace) 22 | 23 | ; the below section must remain in the config file for RPC 24 | ; (supervisorctl/web interface) to work, additional interfaces may be 25 | ; added by defining them in separate rpcinterface: sections 26 | [rpcinterface:supervisor] 27 | supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface 28 | 29 | [supervisorctl] 30 | serverurl=unix:///var/run/supervisor.sock ; use a unix:// URL for a unix socket 31 | 32 | ; The [include] section can just contain the "files" setting. This 33 | ; setting can list multiple files (separated by whitespace or 34 | ; newlines). It can also contain wildcards. The filenames are 35 | ; interpreted as relative to this file. Included files *cannot* 36 | ; include files themselves. 37 | 38 | [include] 39 | files = /etc/supervisor/conf.d/*.conf 40 | -------------------------------------------------------------------------------- /modules/private-tls-cert/main.tf: -------------------------------------------------------------------------------- 1 | 2 | terraform { 3 | # This module has been updated with 0.12 syntax, which means the example is no longer 4 | # compatible with any versions below 0.12. 5 | required_version = ">= 0.12" 6 | } 7 | 8 | # --------------------------------------------------------------------------------------------------------------------- 9 | # CREATE A CA CERTIFICATE 10 | # --------------------------------------------------------------------------------------------------------------------- 11 | 12 | resource "tls_private_key" "ca" { 13 | algorithm = var.private_key_algorithm 14 | ecdsa_curve = var.private_key_ecdsa_curve 15 | rsa_bits = var.private_key_rsa_bits 16 | } 17 | 18 | resource "tls_self_signed_cert" "ca" { 19 | key_algorithm = tls_private_key.ca.algorithm 20 | private_key_pem = tls_private_key.ca.private_key_pem 21 | is_ca_certificate = true 22 | 23 | validity_period_hours = var.validity_period_hours 24 | allowed_uses = var.ca_allowed_uses 25 | 26 | subject { 27 | common_name = var.ca_common_name 28 | organization = var.organization_name 29 | } 30 | 31 | # Store the CA public key in a file. 32 | # Store the CA public key in a file. 33 | provisioner "local-exec" { 34 | command = "echo '${tls_self_signed_cert.ca.cert_pem}' > '${var.ca_public_key_file_path}' && chmod ${var.permissions} '${var.ca_public_key_file_path}' && chown ${var.owner} '${var.ca_public_key_file_path}'" 35 | } 36 | } 37 | 38 | # --------------------------------------------------------------------------------------------------------------------- 39 | # CREATE A TLS CERTIFICATE SIGNED USING THE CA CERTIFICATE 40 | # --------------------------------------------------------------------------------------------------------------------- 41 | 42 | resource "tls_private_key" "cert" { 43 | algorithm = var.private_key_algorithm 44 | ecdsa_curve = var.private_key_ecdsa_curve 45 | rsa_bits = var.private_key_rsa_bits 46 | 47 | # Store the certificate's private key in a file. 48 | # Store the certificate's private key in a file. 49 | provisioner "local-exec" { 50 | command = "echo '${tls_private_key.cert.private_key_pem}' > '${var.private_key_file_path}' && chmod ${var.permissions} '${var.private_key_file_path}' && chown ${var.owner} '${var.private_key_file_path}'" 51 | } 52 | } 53 | 54 | resource "tls_cert_request" "cert" { 55 | key_algorithm = tls_private_key.cert.algorithm 56 | private_key_pem = tls_private_key.cert.private_key_pem 57 | 58 | dns_names = var.dns_names 59 | ip_addresses = var.ip_addresses 60 | 61 | subject { 62 | common_name = var.common_name 63 | organization = var.organization_name 64 | } 65 | } 66 | 67 | resource "tls_locally_signed_cert" "cert" { 68 | cert_request_pem = tls_cert_request.cert.cert_request_pem 69 | 70 | ca_key_algorithm = tls_private_key.ca.algorithm 71 | ca_private_key_pem = tls_private_key.ca.private_key_pem 72 | ca_cert_pem = tls_self_signed_cert.ca.cert_pem 73 | 74 | validity_period_hours = var.validity_period_hours 75 | allowed_uses = var.allowed_uses 76 | 77 | # Store the certificate's public key in a file. 78 | # Store the certificate's public key in a file. 79 | provisioner "local-exec" { 80 | command = "echo '${tls_locally_signed_cert.cert.cert_pem}' > '${var.public_key_file_path}' && chmod ${var.permissions} '${var.public_key_file_path}' && chown ${var.owner} '${var.public_key_file_path}'" 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /modules/private-tls-cert/outputs.tf: -------------------------------------------------------------------------------- 1 | output "ca_public_key_file_path" { 2 | value = var.ca_public_key_file_path 3 | } 4 | 5 | output "public_key_file_path" { 6 | value = var.public_key_file_path 7 | } 8 | 9 | output "private_key_file_path" { 10 | value = var.private_key_file_path 11 | } 12 | 13 | -------------------------------------------------------------------------------- /modules/private-tls-cert/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRED PARAMETERS 3 | # You must provide a value for each of these parameters. 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "ca_public_key_file_path" { 7 | description = "Write the PEM-encoded CA certificate public key to this path (e.g. /etc/tls/ca.crt.pem)." 8 | type = string 9 | } 10 | 11 | variable "public_key_file_path" { 12 | description = "Write the PEM-encoded certificate public key to this path (e.g. /etc/tls/vault.crt.pem)." 13 | type = string 14 | } 15 | 16 | variable "private_key_file_path" { 17 | description = "Write the PEM-encoded certificate private key to this path (e.g. /etc/tls/vault.key.pem)." 18 | type = string 19 | } 20 | 21 | variable "owner" { 22 | description = "The OS user who should be given ownership over the certificate files." 23 | type = string 24 | } 25 | 26 | variable "organization_name" { 27 | description = "The name of the organization to associate with the certificates (e.g. Acme Co)." 28 | type = string 29 | } 30 | 31 | variable "ca_common_name" { 32 | description = "The common name to use in the subject of the CA certificate (e.g. acme.co cert)." 33 | type = string 34 | } 35 | 36 | variable "common_name" { 37 | description = "The common name to use in the subject of the certificate (e.g. acme.co cert)." 38 | type = string 39 | } 40 | 41 | variable "dns_names" { 42 | description = "List of DNS names for which the certificate will be valid (e.g. vault.service.consul, foo.example.com)." 43 | type = list(string) 44 | } 45 | 46 | variable "ip_addresses" { 47 | description = "List of IP addresses for which the certificate will be valid (e.g. 127.0.0.1)." 48 | type = list(string) 49 | } 50 | 51 | variable "validity_period_hours" { 52 | description = "The number of hours after initial issuing that the certificate will become invalid." 53 | type = number 54 | } 55 | 56 | # --------------------------------------------------------------------------------------------------------------------- 57 | # OPTIONAL PARAMETERS 58 | # These parameters have reasonable defaults. 59 | # --------------------------------------------------------------------------------------------------------------------- 60 | 61 | variable "ca_allowed_uses" { 62 | description = "List of keywords from RFC5280 describing a use that is permitted for the CA certificate. For more info and the list of keywords, see https://www.terraform.io/docs/providers/tls/r/self_signed_cert.html#allowed_uses." 63 | type = list(string) 64 | 65 | default = [ 66 | "cert_signing", 67 | "key_encipherment", 68 | "digital_signature", 69 | ] 70 | } 71 | 72 | variable "allowed_uses" { 73 | description = "List of keywords from RFC5280 describing a use that is permitted for the issued certificate. For more info and the list of keywords, see https://www.terraform.io/docs/providers/tls/r/self_signed_cert.html#allowed_uses." 74 | type = list(string) 75 | 76 | default = [ 77 | "key_encipherment", 78 | "digital_signature", 79 | ] 80 | } 81 | 82 | variable "permissions" { 83 | description = "The Unix file permission to assign to the cert files (e.g. 0600)." 84 | type = number 85 | default = 0600 86 | } 87 | 88 | variable "private_key_algorithm" { 89 | description = "The name of the algorithm to use for private keys. Must be one of: RSA or ECDSA." 90 | type = string 91 | default = "RSA" 92 | } 93 | 94 | variable "private_key_ecdsa_curve" { 95 | description = "The name of the elliptic curve to use. Should only be used if var.private_key_algorithm is ECDSA. Must be one of P224, P256, P384 or P521." 96 | type = string 97 | default = "P256" 98 | } 99 | 100 | variable "private_key_rsa_bits" { 101 | description = "The size of the generated RSA key in bits. Should only be used if var.private_key_algorithm is RSA." 102 | type = number 103 | default = 2048 104 | } 105 | -------------------------------------------------------------------------------- /modules/run-nginx/README.md: -------------------------------------------------------------------------------- 1 | # Nginx Run Script 2 | 3 | This folder contains a script for configuring and running Nginx on a Vault [Google Cloud](https://cloud.google.com/) 4 | server. This script has been tested on the following operating systems: 5 | 6 | * Ubuntu 16.04 7 | * Ubuntu 18.04 8 | 9 | There is a good chance it will work on other flavors of Debian as well. 10 | 11 | 12 | 13 | 14 | ## Quick start 15 | 16 | This script assumes you installed it, plus all of its dependencies (including nginx itself), using the [install-nginx 17 | module](https://github.com/hashicorp/terraform-google-vault/tree/master/modules/install-nginx). The default install path is `/opt/nginx/bin`, so to configure and start nginx, you run: 18 | 19 | ``` 20 | /opt/vault/bin/run-nginx --port 8000 21 | ``` 22 | 23 | This will: 24 | 25 | 1. Generate an nginx configuration file called `nginx.conf` in the nginx config dir (default: `/opt/nginx/config`). 26 | See [nginx configuration](#nginx-configuration) for details on what this configuration file will contain. 27 | 28 | 1. Generate a [Supervisor](http://supervisord.org/) configuration file called `run-nginx.conf` in the Supervisor 29 | config dir (default: `/etc/supervisor/conf.d`) with a command that will run nginx: 30 | `/opt/nginx/bin/nginx -c $nginx_config_dir/nginx.conf`. 31 | 32 | 1. Tell Supervisor to load the new configuration file, thereby starting nginx. 33 | 34 | We recommend using the `run-nginx` command as part of the [Startup Script](https://cloud.google.com/compute/docs/startupscript), 35 | so that it executes when the Compute Instance is first booting. After running `run-nginx` on that initial boot, the 36 | `supervisord` configuration will automatically restart nginx if it crashes or the Compute Instance reboots. 37 | 38 | See the [startup-script-vault.sh](https://github.com/hashicorp/terraform-google-vault/tree/master/examples/vault-cluster-public/startup-script-vault.sh) example for fully-working 39 | sample code. 40 | 41 | 42 | 43 | ## Command line Arguments 44 | 45 | The `run-nginx` script accepts the following arguments. All arguments are optional. See the script for default values. 46 | 47 | | Argument | Description | Default | 48 | | ------------------ | ------------| ------- | 49 | | `--port` | The port on which the HTTP server
accepts inbound connections | `8000` | 50 | | `--proxy-pass-url` | The URL to which all inbound requests
will be forwarded. | `https://127.0.0.1:8200/v1/sys/health?standbyok=true`| 51 | | `--pid-folder` | The local folder that should contain
the PID file to be used by nginx. | `/var/run/nginx` | 52 | | `--config-dir` | The path to the nginx config folder. | absolute path of `../config`, relative to this script | 53 | | `--bin-dir` | The path to the folder with the nginx binary. | absolute path of the parent folder of this script | 54 | | `--log-dir` | The path to the Vault log folder. | absolute path of `../log`, relative to this script. | 55 | | `--log-level` | The log verbosity to use with Nginx. | `info` | 56 | | `--user` | The user to run nginx as. | owner of `--config-dir` | 57 | 58 | Example: 59 | 60 | ``` 61 | /opt/vault/bin/run-nginx --port 8000 62 | ``` 63 | 64 | 65 | ## Nginx configuration 66 | 67 | `run-vault` generates a configuration file for nginx in `/opt/nginx/config/nginx.conf` that configures nginx to forward 68 | all inbound HTTP requests, regardless of their path or URL, to the HTTPS endpoint for the Vault health check. 69 | -------------------------------------------------------------------------------- /modules/update-certificate-store/README.md: -------------------------------------------------------------------------------- 1 | # Update Certificate Store 2 | 3 | This folder contains a script for adding a trusted, Certificate Authority (CA) certificate to an OS's certificate 4 | store. This allows you to establish TLS connections to services that use TLS certs signed by that CA without getting 5 | x509 certificate errors. This script has been tested on the following operating systems: 6 | 7 | * Ubuntu 16.04 8 | * Ubuntu 18.04 9 | 10 | There is a good chance it will work on other flavors of Debian as well. 11 | 12 | If you're unfamiliar with how TLS certificates work, check out the [Background section](https://github.com/hashicorp/terraform-google-vault/tree/master/modules/private-tls-cert#background). 13 | 14 | 15 | 16 | 17 | 18 | ## Motivation 19 | 20 | Let's say you deployed a server (e.g. a Vault server) with a self-signed TLS certificate. If you try to make a request 21 | to that server using some sort of TLS client (e.g. a Vault client), you will get an error: 22 | 23 | ``` 24 | vault read secret/foo 25 | 26 | Error initializing Vault: Get https://127.0.0.1:8200/v1/secret/foo: x509: certificate signed by unknown authority 27 | ``` 28 | 29 | You can get around this error by explicitly telling the client to trust the public key of the CA that signed that TLS 30 | certificate: 31 | 32 | ``` 33 | vault read -ca-cert=/opt/vault/tls/ca.crt.pem secret/foo 34 | 35 | Key Value 36 | --- ----- 37 | refresh_interval 768h0m0s 38 | value bar 39 | ``` 40 | 41 | Having to pass the `-ca-cert` argument every time gets tedious. This module offers a way to configure the entire OS 42 | to trust this certificate. 43 | 44 | 45 | 46 | 47 | ## Quick start 48 | 49 | To use the `update-certificate-script`, use `git` to clone this repository at a specific tag (see the 50 | [releases page](../../../../releases) for all available tags) and run the `update-certificate-script` script: 51 | 52 | ``` 53 | git clone --branch https://github.com/hashicorp/terraform-google-vault.git 54 | terraform-google-vault/modules/update-certificate-script/update-certificate-script --cert-file-path /opt/vault/tls/ca.cert.pem 55 | ``` 56 | 57 | That's it! 58 | 59 | Now you can make calls to services that use TLS cert signed by this CA, and you won't get any errors: 60 | 61 | ``` 62 | vault read secret/foo 63 | 64 | Key Value 65 | --- ----- 66 | refresh_interval 768h0m0s 67 | value bar 68 | ``` 69 | 70 | See the [vault-consul-ami example](https://github.com/hashicorp/terraform-google-vault/tree/master/examples/vault-consul-image) for working sample code. 71 | 72 | 73 | 74 | 75 | 76 | ## Command line Arguments 77 | 78 | The `run-vault` script accepts the following arguments: 79 | 80 | * `--cert-file-path` (required): The path to the CA certificate public key to add to the OS certificate store. 81 | * `--dest-file-name` (optional): This script will copy `--cert-file-path` to a file with this name in a shared 82 | certificate folder on the OS. The default file name is `custom.crt`, but you can use this parameter to customize 83 | it. The extension MUST be `.crt` or the OS will ignore the file. 84 | 85 | Example: 86 | 87 | ``` 88 | terraform-google-vault/modules/update-certificate-script/update-certificate-script --cert-file-path /opt/vault/tls/ca.cert.pem 89 | ``` 90 | -------------------------------------------------------------------------------- /modules/update-certificate-store/update-certificate-store: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script is used to add a trusted, private CA certificate to an OS's certificate store. This allows you to 3 | # establish TLS connections to services that use TLS certs signed by that CA without getting x509 certificate errors. 4 | # This script has been tested with the following operating systems: 5 | # 6 | # 1. Ubuntu 16.04 7 | # 2. Ubuntu 18.04 8 | 9 | set -e 10 | 11 | readonly DEFAULT_DEST_FILE_NAME="custom.crt" 12 | 13 | readonly UPDATE_CA_CERTS_PATH="/usr/local/share/ca-certificates" 14 | readonly UPDATE_CA_TRUST_PATH="/etc/pki/ca-trust/source/anchors" 15 | 16 | readonly SCRIPT_NAME="$(basename "$0")" 17 | 18 | function print_usage { 19 | echo 20 | echo "Usage: update-certificate-store [OPTIONS]" 21 | echo 22 | echo "Add a trusted, private CA certificate to an OS's certificate store. This script has been tested with Ubuntu 16.04." 23 | echo 24 | echo "Options:" 25 | echo 26 | echo -e " --cert-file-path\tThe path to the CA certificate public key to add to the OS certificate store. Required." 27 | echo -e " --dest-file-name\tCopy --cert-file-path to a file with this name in a shared cert folder. The extension MUST be .crt. Optional. Default: $DEFAULT_DEST_FILE_NAME." 28 | echo 29 | echo "Example:" 30 | echo 31 | echo " update-certificate-store --cert-file-path /opt/vault/tls/ca.crt.pem" 32 | } 33 | 34 | function log { 35 | local readonly level="$1" 36 | local readonly message="$2" 37 | local readonly timestamp=$(date +"%Y-%m-%d %H:%M:%S") 38 | >&2 echo -e "${timestamp} [${level}] [$SCRIPT_NAME] ${message}" 39 | } 40 | 41 | function log_info { 42 | local readonly message="$1" 43 | log "INFO" "$message" 44 | } 45 | 46 | function log_warn { 47 | local readonly message="$1" 48 | log "WARN" "$message" 49 | } 50 | 51 | function log_error { 52 | local readonly message="$1" 53 | log "ERROR" "$message" 54 | } 55 | 56 | function command_exists { 57 | local readonly command_name="$1" 58 | [[ -n "$(command -v $command_name)" ]] 59 | } 60 | 61 | function update_certificate_store { 62 | local readonly cert_file_path="$1" 63 | local readonly dest_file_name="$2" 64 | 65 | log_info "Adding CA public key $cert_file_path to OS certificate store" 66 | 67 | if $(command_exists "update-ca-certificates"); then 68 | cp "$cert_file_path" "$UPDATE_CA_CERTS_PATH/$dest_file_name" 69 | update-ca-certificates 70 | elif $(command_exists "update-ca-trust"); then 71 | update-ca-trust enable 72 | cp "$cert_file_path" "$UPDATE_CA_TRUST_PATH/$dest_file_name" 73 | update-ca-trust extract 74 | else 75 | log_warn "Did not find the update-ca-certificates or update-ca-trust commands. Cannot update OS certificate store." 76 | fi 77 | } 78 | 79 | function assert_not_empty { 80 | local readonly arg_name="$1" 81 | local readonly arg_value="$2" 82 | 83 | if [[ -z "$arg_value" ]]; then 84 | log_error "The value for '$arg_name' cannot be empty" 85 | print_usage 86 | exit 1 87 | fi 88 | } 89 | 90 | function update { 91 | local cert_file_path 92 | local dest_file_name="$DEFAULT_DEST_FILE_NAME" 93 | 94 | while [[ $# > 0 ]]; do 95 | local key="$1" 96 | 97 | case "$key" in 98 | --cert-file-path) 99 | cert_file_path="$2" 100 | shift 101 | ;; 102 | --dest-file-name) 103 | dest_file_name="$2" 104 | shift 105 | ;; 106 | --help) 107 | print_usage 108 | exit 109 | ;; 110 | *) 111 | log_error "Unrecognized argument: $key" 112 | print_usage 113 | exit 1 114 | ;; 115 | esac 116 | 117 | shift 118 | done 119 | 120 | assert_not_empty "--cert-file-path" "$cert_file_path" 121 | assert_not_empty "--dest-file-name" "$dest_file_name" 122 | 123 | update_certificate_store "$cert_file_path" "$dest_file_name" 124 | } 125 | 126 | update "$@" 127 | -------------------------------------------------------------------------------- /modules/vault-cluster/outputs.tf: -------------------------------------------------------------------------------- 1 | output "cluster_tag_name" { 2 | value = var.cluster_name 3 | } 4 | 5 | output "instance_group_id" { 6 | value = google_compute_region_instance_group_manager.vault.id 7 | } 8 | 9 | output "instance_group_name" { 10 | value = google_compute_region_instance_group_manager.vault.name 11 | } 12 | 13 | output "cluster_service_account" { 14 | value = local.service_account_email 15 | } 16 | 17 | output "instance_group_url" { 18 | value = google_compute_region_instance_group_manager.vault.self_link 19 | } 20 | 21 | output "instance_template_url" { 22 | value = data.template_file.compute_instance_template_self_link.rendered 23 | } 24 | 25 | output "firewall_rule_allow_intracluster_vault_url" { 26 | value = google_compute_firewall.allow_intracluster_vault.self_link 27 | } 28 | 29 | output "firewall_rule_allow_intracluster_vault_id" { 30 | value = google_compute_firewall.allow_intracluster_vault.id 31 | } 32 | 33 | output "firewall_rule_allow_inbound_api_url" { 34 | value = google_compute_firewall.allow_inbound_api.*.self_link 35 | } 36 | 37 | output "firewall_rule_allow_inbound_api_id" { 38 | value = google_compute_firewall.allow_inbound_api.*.id 39 | } 40 | 41 | output "firewall_rule_allow_inbound_health_check_url" { 42 | value = element( 43 | concat( 44 | google_compute_firewall.allow_inbound_health_check.*.self_link, 45 | [""], 46 | ), 47 | 0, 48 | ) 49 | } 50 | 51 | output "firewall_rule_allow_inbound_health_check_id" { 52 | value = element( 53 | concat( 54 | google_compute_firewall.allow_inbound_health_check.*.id, 55 | [""], 56 | ), 57 | 0, 58 | ) 59 | } 60 | 61 | output "bucket_name_url" { 62 | value = google_storage_bucket.vault_storage_backend.self_link 63 | } 64 | 65 | output "bucket_name_id" { 66 | value = google_storage_bucket.vault_storage_backend.id 67 | } 68 | 69 | -------------------------------------------------------------------------------- /modules/vault-lb-fr/README.md: -------------------------------------------------------------------------------- 1 | # Regional External Load Balancer for Vault 2 | 3 | This folder contains a [Terraform](https://www.terraform.io/) module that can be used to deploy a regional external 4 | [Network Load Balancer](https://cloud.google.com/compute/docs/load-balancing/network/) that fronts a [Vault]( 5 | https://www.vaultproject.io/) cluster in [Google Cloud](https://cloud.google.com/). 6 | 7 | In GCP, you do not actually create a new Network Load Balancer; rather you create a [Forwarding Rule]( 8 | https://cloud.google.com/compute/docs/load-balancing/network/forwarding-rules) which enables you to access an existing 9 | region-wide Load Balancer already created by Google. This is why the name of this module is `vault-lb-fr`. In addition, 10 | you must specify a [Target Pool](https://cloud.google.com/compute/docs/load-balancing/network/target-pools) that contains 11 | all your "Targets", which are the Compute Instances to which the Load Balancer ultimately forwards traffic. Finally, you 12 | must define a [Health Check](https://cloud.google.com/compute/docs/load-balancing/health-checks) that tells the Forwarding 13 | Rule which of the Compute Instances in your Target Pool is healthy and able to receive traffic. 14 | 15 | ## When should you use this module? 16 | 17 | We strongly recommend that you not expose Vault to the public Internet, however if you must, then the preferred way to 18 | do so is to keep the Vault nodes themselves hidden from the public Internet, but to place a Load Balancer like the one 19 | created by this module in front. 20 | 21 | Some teams may wish to create an *internal* Load Balancer to have a single Vault endpoint. While there may be some use 22 | cases that necessitate this, a 23 | 24 | 25 | ## How do you use this module? 26 | 27 | This folder defines a [Terraform module](https://www.terraform.io/docs/modules/usage.html), which you can use in your 28 | code by adding a `module` configuration and setting its `source` parameter to URL of this folder: 29 | 30 | ```hcl 31 | module "vault_lb" { 32 | # Use version v0.0.1 of the vault-cluster module 33 | source = "github.com/hashicorp/terraform-google-vault//modules/vault-lb-fr?ref=v0.0.1" 34 | 35 | # This is the tag name that the Vault Compute Instances use to automatically discover each other. Knowing this, we 36 | # can create a Firewall Rule that permits access from the Load Balancer to the Vault Cluster 37 | cluster_tag_name = "vault-test" 38 | 39 | # The Health Check will send an HTTP request to each of our Compute Instances. What path should it attempt to access 40 | # for a Vault Health check? Normally we'd want to use "/v1/sys/health?standbyok=true", however GCP only supports HTTP 41 | # Health Checks, not HTTPS Health Checks, so we must setup a forward proxy on the Vault server that forwards all inbound 42 | # traffic to the Vault Health Check endpoint. Therefore, what we specify here doesn't really matter as long as it's 43 | # non-empty. 44 | health_check_path = "/" 45 | 46 | # See the above comment. The forward proxy's port is 8000 by default 47 | health_check_port = 8000 48 | 49 | # ... See variables.tf for the other parameters you can define for the vault-lb-fr module 50 | } 51 | ``` 52 | 53 | See [variables.tf](variables.tf) for additional information. -------------------------------------------------------------------------------- /modules/vault-lb-fr/main.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # THESE TEMPLATES REQUIRE TERRAFORM VERSION 0.12.0 AND ABOVE 3 | # This module has been updated with 0.12 syntax, which means the example is no longer 4 | # compatible with any versions below 0.12. 5 | # --------------------------------------------------------------------------------------------------------------------- 6 | 7 | terraform { 8 | required_version = ">= 0.12" 9 | } 10 | 11 | # --------------------------------------------------------------------------------------------------------------------- 12 | # CREATE THE LOAD BALANCER FORWARDING RULE 13 | # In GCP, Google has already created the load balancer itself so there is no new load balancer resource to create. However, 14 | # to leverage this load balancer, we must create a Forwarding Rule specially for our Compute Instances. By creating a 15 | # Forwarding Rule, we automatically create an external (public-facing) Load Balancer in the GCP console. 16 | # --------------------------------------------------------------------------------------------------------------------- 17 | 18 | # A Forwarding Rule receives inbound requests and forwards them to the specified Target Pool 19 | resource "google_compute_forwarding_rule" "vault" { 20 | name = "${var.cluster_name}-fr" 21 | description = var.forwarding_rule_description 22 | ip_address = var.forwarding_rule_ip_address 23 | ip_protocol = "TCP" 24 | load_balancing_scheme = "EXTERNAL" 25 | network = var.network_name 26 | port_range = var.api_port 27 | target = google_compute_target_pool.vault.self_link 28 | } 29 | 30 | # The Load Balancer (Forwarding rule) will only forward requests to Compute Instances in the associated Target Pool. 31 | # Note that this Target Pool is populated by modifying the Instance Group containing the Vault nodes to add its member 32 | # Instances to this Target Pool. 33 | resource "google_compute_target_pool" "vault" { 34 | name = "${var.cluster_name}-tp" 35 | description = var.target_pool_description 36 | session_affinity = var.target_pool_session_affinity 37 | health_checks = [google_compute_http_health_check.vault.name] 38 | } 39 | 40 | # Add a Health Check so that the Load Balancer will only route to healthy Compute Instances. Note that this Health 41 | # Check has no effect on whether GCE will attempt to reboot the Compute Instance. Note also that the Google API will 42 | # only allow a Target Pool to reference an HTTP Health Check. HTTPS or TCP Health Checks are not yet supported. 43 | resource "google_compute_http_health_check" "vault" { 44 | name = "${var.cluster_name}-hc" 45 | description = var.health_check_description 46 | check_interval_sec = var.health_check_interval_sec 47 | timeout_sec = var.health_check_timeout_sec 48 | healthy_threshold = var.health_check_healthy_threshold 49 | unhealthy_threshold = var.health_check_unhealthy_threshold 50 | 51 | port = var.health_check_port 52 | request_path = var.health_check_path 53 | } 54 | 55 | # The Load Balancer may need explicit permission to forward traffic to our Vault Cluster. 56 | resource "google_compute_firewall" "load_balancer" { 57 | name = "${var.cluster_name}-rule-lb" 58 | description = var.firewall_rule_description 59 | network = var.network_name == null ? "default" : var.network_name 60 | 61 | allow { 62 | protocol = "tcp" 63 | ports = [var.api_port] 64 | } 65 | 66 | # "130.211.0.0/22" - Enable inbound traffic from the Google Cloud Load Balancer (https://goo.gl/xULu8U) 67 | # "35.191.0.0/16" - Enable inbound traffic from the Google Cloud Health Checkers (https://goo.gl/xULu8U) 68 | # "0.0.0.0/0" - Enable any IP address to reach our nodes 69 | source_ranges = concat( 70 | ["130.211.0.0/22", "35.191.0.0/16"], 71 | var.allow_access_from_cidr_blocks, 72 | ) 73 | 74 | target_tags = [var.cluster_tag_name] 75 | } 76 | -------------------------------------------------------------------------------- /modules/vault-lb-fr/outputs.tf: -------------------------------------------------------------------------------- 1 | output "forwarding_rule_id" { 2 | value = google_compute_forwarding_rule.vault.id 3 | } 4 | 5 | output "forwarding_rule_url" { 6 | value = google_compute_forwarding_rule.vault.self_link 7 | } 8 | 9 | output "target_pool_id" { 10 | value = google_compute_target_pool.vault.id 11 | } 12 | 13 | output "target_pool_url" { 14 | value = google_compute_target_pool.vault.self_link 15 | } 16 | 17 | output "health_check_id" { 18 | value = google_compute_http_health_check.vault.id 19 | } 20 | 21 | output "health_check_url" { 22 | value = google_compute_http_health_check.vault.self_link 23 | } 24 | 25 | output "firewall_rule_id" { 26 | value = google_compute_firewall.load_balancer.id 27 | } 28 | 29 | output "firewall_rule_url" { 30 | value = google_compute_firewall.load_balancer.self_link 31 | } 32 | 33 | -------------------------------------------------------------------------------- /modules/vault-lb-fr/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRED PARAMETERS 3 | # You must provide a value for each of these parameters. 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "cluster_name" { 7 | description = "The name of the Vault cluster (e.g. vault-stage). This variable is used to namespace all resources created by this module." 8 | type = string 9 | } 10 | 11 | variable "cluster_tag_name" { 12 | description = "The tag name that the Vault Compute Instances use to automatically discover each other and form a cluster." 13 | type = string 14 | } 15 | 16 | variable "health_check_path" { 17 | description = "The URL path the Health Check will query. Must return a 200 OK when the service is ready to receive requests from the Load Balancer." 18 | type = string 19 | } 20 | 21 | variable "health_check_port" { 22 | description = "The port to be used by the Health Check." 23 | type = number 24 | } 25 | 26 | # --------------------------------------------------------------------------------------------------------------------- 27 | # OPTIONAL PARAMETERS 28 | # These parameters have reasonable defaults. 29 | # --------------------------------------------------------------------------------------------------------------------- 30 | 31 | variable "api_port" { 32 | description = "The port used by clients to talk to the Vault Server API" 33 | type = number 34 | default = 8200 35 | } 36 | 37 | variable "network_name" { 38 | description = "The URL of the VPC Network where all resources should be created. If left blank, we will use the default VPC network." 39 | type = string 40 | default = null 41 | } 42 | 43 | # Health Check options 44 | 45 | variable "health_check_description" { 46 | description = "A description to add to the Health Check created by this module." 47 | type = string 48 | default = null 49 | } 50 | 51 | variable "health_check_interval_sec" { 52 | description = "The number of seconds between each Health Check attempt." 53 | type = number 54 | default = 15 55 | } 56 | 57 | variable "health_check_timeout_sec" { 58 | description = "The number of seconds to wait before the Health Check declares failure." 59 | type = number 60 | default = 5 61 | } 62 | 63 | variable "health_check_healthy_threshold" { 64 | description = "The number of consecutive successes required to consider the Compute Instance healthy." 65 | type = number 66 | default = 2 67 | } 68 | 69 | variable "health_check_unhealthy_threshold" { 70 | description = "The number of consecutive failures required to consider the Compute Instance unhealthy." 71 | type = number 72 | default = 2 73 | } 74 | 75 | # Forwarding Rule Options 76 | 77 | variable "forwarding_rule_description" { 78 | description = "The description added to the Forwarding Rule created by this module." 79 | type = string 80 | default = null 81 | } 82 | 83 | variable "forwarding_rule_ip_address" { 84 | description = "The static IP address to assign to the Forwarding Rule. If not set, an ephemeral IP address is used." 85 | type = string 86 | default = null 87 | } 88 | 89 | # Target Pool Options 90 | 91 | variable "target_pool_description" { 92 | description = "The description added to the Target Pool created by this module." 93 | type = string 94 | default = null 95 | } 96 | 97 | variable "target_pool_session_affinity" { 98 | description = "How to distribute load across the Target Pool. Options are NONE (no affinity), CLIENT_IP (hash of the source/dest addresses/ports), and CLIENT_IP_PROTO also includes the protocol." 99 | type = string 100 | default = "NONE" 101 | } 102 | 103 | # Firewall Rule Options 104 | 105 | variable "firewall_rule_description" { 106 | description = "A description to add to the Firewall Rule created by this module." 107 | type = string 108 | default = null 109 | } 110 | 111 | variable "allow_access_from_cidr_blocks" { 112 | description = "The list of CIDR-formatted IP address ranges from which access to the Vault load balancer will be allowed." 113 | type = list(string) 114 | default = ["0.0.0.0/0"] 115 | } 116 | -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | output "gcp_project_id" { 2 | value = var.gcp_project_id 3 | } 4 | 5 | output "vault_cluster_size" { 6 | value = var.vault_cluster_size 7 | } 8 | 9 | output "cluster_tag_name" { 10 | value = module.vault_cluster.cluster_tag_name 11 | } 12 | 13 | output "instance_group_id" { 14 | value = module.vault_cluster.instance_group_id 15 | } 16 | 17 | output "instance_group_name" { 18 | value = module.vault_cluster.instance_group_name 19 | } 20 | 21 | output "instance_group_url" { 22 | value = module.vault_cluster.instance_group_url 23 | } 24 | 25 | output "instance_template_url" { 26 | value = module.vault_cluster.instance_template_url 27 | } 28 | 29 | output "firewall_rule_allow_intracluster_vault_id" { 30 | value = module.vault_cluster.firewall_rule_allow_intracluster_vault_id 31 | } 32 | 33 | output "firewall_rule_allow_intracluster_vault_url" { 34 | value = module.vault_cluster.firewall_rule_allow_intracluster_vault_url 35 | } 36 | 37 | output "firewall_rule_allow_inbound_api_id" { 38 | value = module.vault_cluster.firewall_rule_allow_inbound_api_id 39 | } 40 | 41 | output "firewall_rule_allow_inbound_api_url" { 42 | value = module.vault_cluster.firewall_rule_allow_inbound_api_url 43 | } 44 | 45 | output "firewall_rule_allow_inbound_health_check_id" { 46 | value = module.vault_cluster.firewall_rule_allow_inbound_health_check_id 47 | } 48 | 49 | output "firewall_rule_allow_inbound_health_check_url" { 50 | value = module.vault_cluster.firewall_rule_allow_inbound_health_check_url 51 | } 52 | 53 | output "bucket_name_id" { 54 | value = module.vault_cluster.bucket_name_id 55 | } 56 | 57 | output "bucket_name_url" { 58 | value = module.vault_cluster.bucket_name_url 59 | } 60 | 61 | -------------------------------------------------------------------------------- /test/Gopkg.toml: -------------------------------------------------------------------------------- 1 | 2 | # Gopkg.toml example 3 | # 4 | # Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md 5 | # for detailed Gopkg.toml documentation. 6 | # 7 | # required = ["github.com/user/thing/cmd/thing"] 8 | # ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] 9 | # 10 | # [[constraint]] 11 | # name = "github.com/user/project" 12 | # version = "1.0.0" 13 | # 14 | # [[constraint]] 15 | # name = "github.com/user/project2" 16 | # branch = "dev" 17 | # source = "github.com/myfork/project2" 18 | # 19 | # [[override]] 20 | # name = "github.com/x/y" 21 | # version = "2.4.0" 22 | 23 | 24 | 25 | [[constraint]] 26 | name = "github.com/gruntwork-io/terratest" 27 | version = "0.17.4" 28 | 29 | [prune] 30 | go-tests = true 31 | unused-packages = true 32 | -------------------------------------------------------------------------------- /test/README.md: -------------------------------------------------------------------------------- 1 | # Tests 2 | 3 | This folder contains automated tests for this Module. All of the tests are written in [Go](https://golang.org/). 4 | Most of these are "integration tests" that deploy real infrastructure using Terraform and verify that infrastructure 5 | works as expected using a helper library called [Terratest](https://github.com/gruntwork-io/terratest). 6 | 7 | 8 | 9 | ## WARNING: These Tests May Cost You Money! 10 | 11 | **Note #1**: Many of these tests create real resources in a GCP account and then try to clean those resources up at 12 | the end of a test run. That means these tests may cost you money to run! When adding tests, please be considerate of 13 | the resources you create and take extra care to clean everything up when you're done! 14 | 15 | **Note #2**: Never forcefully shut the tests down (e.g. by hitting `CTRL + C`) or the cleanup tasks won't run! 16 | 17 | **Note #3**: We set `-timeout 60m` on all tests not because they necessarily take that long, but because Go has a 18 | default test timeout of 10 minutes, after which it forcefully kills the tests with a `SIGQUIT`, preventing the cleanup 19 | tasks from running. Therefore, we set an overlying long timeout to make sure all tests have enough time to finish and 20 | clean up. 21 | 22 | 23 | 24 | ## Running the tests 25 | 26 | ### Prerequisites 27 | 28 | - Install the latest version of [Go](https://golang.org/). 29 | - Install [dep](https://github.com/golang/dep) for Go dependency management. 30 | - Install [Terraform](https://www.terraform.io/downloads.html). 31 | - Configure your GCP credentials using one of the [options supported by the Google Cloud 32 | SDK](https://cloud.google.com/sdk/docs/authorizing). 33 | 34 | 35 | ### One-time setup 36 | 37 | Download Go dependencies using dep: 38 | 39 | ``` 40 | cd test 41 | dep ensure 42 | ``` 43 | 44 | 45 | ### Run all the tests 46 | 47 | ```bash 48 | cd test 49 | go test -v -timeout 60m 50 | ``` 51 | 52 | 53 | ### Run a specific test 54 | 55 | To run a specific test called `TestFoo`: 56 | 57 | ```bash 58 | cd test 59 | go test -v -timeout 60m -run TestFoo 60 | ``` 61 | -------------------------------------------------------------------------------- /test/terratest_helpers.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "fmt" 5 | "math/rand" 6 | "os" 7 | "testing" 8 | "time" 9 | 10 | "github.com/gruntwork-io/terratest/modules/gcp" 11 | "github.com/gruntwork-io/terratest/modules/logger" 12 | "github.com/gruntwork-io/terratest/modules/packer" 13 | "github.com/gruntwork-io/terratest/modules/retry" 14 | "github.com/gruntwork-io/terratest/modules/ssh" 15 | "github.com/gruntwork-io/terratest/modules/test-structure" 16 | ) 17 | 18 | // Terratest saved value names 19 | const SAVED_GCP_PROJECT_ID = "GcpProjectId" 20 | const SAVED_GCP_REGION_NAME = "GcpRegionName" 21 | const SAVED_GCP_ZONE_NAME = "GcpZoneName" 22 | 23 | // PACKER_VAR_GCP_PROJECT_ID represents the Project ID variable in the Packer template 24 | const PACKER_VAR_GCP_PROJECT_ID = "project_id" 25 | 26 | // PACKER_VAR_GCP_ZONE represents the Zone variable in the Packer template 27 | const PACKER_VAR_GCP_ZONE = "zone" 28 | 29 | const PACKER_VAR_CA_PUBLIC_KEY = "ca_public_key_path" 30 | const PACKER_VAR_TLS_PUBLIC_KEY = "tls_public_key_path" 31 | const PAKCER_VAR_TLS_PRIVATE_KEY = "tls_private_key_path" 32 | const PACKER_VAR_VAULT_DOWNLOAD_URL = "VAULT_DOWNLOAD_URL" 33 | 34 | const PACKER_TEMPLATE_PATH = "../examples/vault-consul-image/vault-consul.json" 35 | 36 | const SAVED_TLS_CERT = "TlsCert" 37 | const SAVED_KEYPAIR = "KeyPair" 38 | 39 | // Checks if a required environment variable is set 40 | func getUrlFromEnv(t *testing.T, key string) string { 41 | url := os.Getenv(key) 42 | if url == "" { 43 | t.Fatalf("Please set the environment variable: %s\n", key) 44 | } 45 | return url 46 | } 47 | 48 | // Compose packer image options 49 | func composeImageOptions(t *testing.T, packerBuildName string, testDir string, useEnterpriseVault bool, vaultDownloadUrl string) *packer.Options { 50 | projectId := test_structure.LoadString(t, testDir, SAVED_GCP_PROJECT_ID) 51 | zone := test_structure.LoadString(t, testDir, SAVED_GCP_ZONE_NAME) 52 | tlsCert := loadTLSCert(t, WORK_DIR) 53 | 54 | environmentVariables := map[string]string{} 55 | if useEnterpriseVault == true { 56 | environmentVariables[PACKER_VAR_VAULT_DOWNLOAD_URL] = vaultDownloadUrl 57 | } 58 | 59 | return &packer.Options{ 60 | Template: PACKER_TEMPLATE_PATH, 61 | Only: packerBuildName, 62 | Vars: map[string]string{ 63 | PACKER_VAR_GCP_PROJECT_ID: projectId, 64 | PACKER_VAR_GCP_ZONE: zone, 65 | PACKER_VAR_CA_PUBLIC_KEY: tlsCert.CAPublicKeyPath, 66 | PACKER_VAR_TLS_PUBLIC_KEY: tlsCert.PublicKeyPath, 67 | PAKCER_VAR_TLS_PRIVATE_KEY: tlsCert.PrivateKeyPath, 68 | }, 69 | Env: environmentVariables, 70 | } 71 | } 72 | 73 | func deleteVaultImage(t *testing.T, testDir string, projectId string, imageFileName string) { 74 | imageName := test_structure.LoadString(t, testDir, imageFileName) 75 | image := gcp.FetchImage(t, projectId, imageName) 76 | image.DeleteImage(t) 77 | } 78 | 79 | func saveTLSCert(t *testing.T, testFolder string, tlsCert TlsCert) { 80 | test_structure.SaveTestData(t, test_structure.FormatTestDataPath(testFolder, SAVED_TLS_CERT), tlsCert) 81 | } 82 | 83 | func loadTLSCert(t *testing.T, testFolder string) TlsCert { 84 | var tlsCert TlsCert 85 | test_structure.LoadTestData(t, test_structure.FormatTestDataPath(testFolder, SAVED_TLS_CERT), &tlsCert) 86 | return tlsCert 87 | } 88 | 89 | func saveKeyPair(t *testing.T, testFolder string, keyPair *ssh.KeyPair) { 90 | test_structure.SaveTestData(t, test_structure.FormatTestDataPath(testFolder, SAVED_KEYPAIR), keyPair) 91 | } 92 | 93 | func loadKeyPair(t *testing.T, testFolder string) ssh.KeyPair { 94 | var keyPair ssh.KeyPair 95 | test_structure.LoadTestData(t, test_structure.FormatTestDataPath(testFolder, SAVED_KEYPAIR), &keyPair) 96 | return keyPair 97 | } 98 | 99 | func getFilesFromInstance(t *testing.T, instance *gcp.Instance, keyPair *ssh.KeyPair, filePaths ...string) map[string]string { 100 | publicIp := instance.GetPublicIp(t) 101 | 102 | host := ssh.Host{ 103 | SshUserName: "terratest", 104 | SshKeyPair: keyPair, 105 | Hostname: publicIp, 106 | } 107 | 108 | useSudo := false 109 | filesFromtInstance, err := ssh.FetchContentsOfFilesE(t, host, useSudo, filePaths...) 110 | if err != nil { 111 | logger.Logf(t, fmt.Sprintf("Error getting log file from instance: %s", err.Error())) 112 | } 113 | 114 | return filesFromtInstance 115 | } 116 | 117 | func writeLogFile(t *testing.T, buffer string, destination string) { 118 | logger.Logf(t, fmt.Sprintf("Writing log file to %s", destination)) 119 | file, err := os.Create(destination) 120 | if err != nil { 121 | logger.Logf(t, fmt.Sprintf("Error creating log file on disk: %s", err.Error())) 122 | } 123 | defer file.Close() 124 | 125 | file.WriteString(buffer) 126 | } 127 | 128 | func addKeyPairToInstancesInGroup(t *testing.T, projectId string, region string, instanceGroupName string, keyPair *ssh.KeyPair, sshUserName string, expectedInstances int) []*gcp.Instance { 129 | instanceGroup := gcp.FetchRegionalInstanceGroup(t, projectId, region, instanceGroupName) 130 | instances := getInstancesFromGroup(t, projectId, instanceGroup, expectedInstances) 131 | 132 | for _, instance := range instances { 133 | instance.AddSshKey(t, sshUserName, keyPair.PublicKey) 134 | } 135 | return instances 136 | } 137 | 138 | func getInstancesFromGroup(t *testing.T, projectId string, instanceGroup *gcp.RegionalInstanceGroup, expectedInstances int) []*gcp.Instance { 139 | instances := []*gcp.Instance{} 140 | 141 | retry.DoWithRetry(t, "Getting instances", 30, 10*time.Second, func() (string, error) { 142 | instances = instanceGroup.GetInstances(t, projectId) 143 | 144 | if len(instances) != expectedInstances { 145 | return "", fmt.Errorf("Expected to get %d instances, but got %d: %v", expectedInstances, len(instances), instances) 146 | } 147 | return "", nil 148 | }) 149 | 150 | return instances 151 | } 152 | 153 | func runCommand(t *testing.T, bastionHost *ssh.Host, targetHost *ssh.Host, command string) (string, error) { 154 | if bastionHost == nil { 155 | return ssh.CheckSshCommandE(t, *targetHost, command) 156 | } 157 | return ssh.CheckPrivateSshConnectionE(t, *bastionHost, *targetHost, command) 158 | } 159 | 160 | func getRandomCidr() string { 161 | return fmt.Sprintf("10.%d.%d.%d/28", rand.Intn(128), rand.Intn(256), rand.Intn(16)*16) 162 | } 163 | -------------------------------------------------------------------------------- /test/tls_helpers.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "io/ioutil" 5 | "os" 6 | "os/user" 7 | "strings" 8 | "testing" 9 | 10 | "github.com/gruntwork-io/terratest/modules/terraform" 11 | "github.com/gruntwork-io/terratest/modules/test-structure" 12 | ) 13 | 14 | // 15 | // This file was copy and pasted verbatim from https://github.com/hashicorp/terraform-aws-vault/blob/master/test/tls_helpers.go 16 | // 17 | 18 | type TlsCert struct { 19 | CAPublicKeyPath string 20 | PublicKeyPath string 21 | PrivateKeyPath string 22 | } 23 | 24 | const REPO_ROOT = "../" 25 | 26 | const PRIVATE_TLS_CERT_PATH = "modules/private-tls-cert" 27 | 28 | const VAR_CA_PUBLIC_KEY_FILE_PATH = "ca_public_key_file_path" 29 | const VAR_PUBLIC_KEY_FILE_PATH = "public_key_file_path" 30 | const VAR_PRIVATE_KEY_FILE_PATH = "private_key_file_path" 31 | const VAR_OWNER = "owner" 32 | const VAR_ORGANIZATION_NAME = "organization_name" 33 | const VAR_CA_COMMON_NAME = "ca_common_name" 34 | const VAR_COMMON_NAME = "common_name" 35 | const VAR_DNS_NAMES = "dns_names" 36 | const VAR_IP_ADDRESSES = "ip_addresses" 37 | const VAR_VALIDITY_PERIOD_HOURS = "validity_period_hours" 38 | 39 | // Use the private-tls-cert module to generate a self-signed TLS certificate 40 | func generateSelfSignedTlsCert(t *testing.T) TlsCert { 41 | t.Logf("Generating self-signed TLS certs") 42 | 43 | currentUser, err := user.Current() 44 | if err != nil { 45 | t.Fatalf("Couldn't get current OS user: %v", err) 46 | } 47 | 48 | caPublicKeyFilePath, err := ioutil.TempFile("", "ca-public-key") 49 | if err != nil { 50 | t.Fatalf("Couldn't create temp file: %v", err) 51 | } 52 | 53 | publicKeyFilePath, err := ioutil.TempFile("", "tls-public-key") 54 | if err != nil { 55 | t.Fatalf("Couldn't create temp file: %v", err) 56 | } 57 | 58 | privateKeyFilePath, err := ioutil.TempFile("", "tls-private-key") 59 | if err != nil { 60 | t.Fatalf("Couldn't create temp file: %v", err) 61 | } 62 | 63 | examplesDir := test_structure.CopyTerraformFolderToTemp(t, REPO_ROOT, PRIVATE_TLS_CERT_PATH) 64 | 65 | terraformOptions := &terraform.Options{ 66 | TerraformDir: examplesDir, 67 | Vars: map[string]interface{}{ 68 | VAR_CA_PUBLIC_KEY_FILE_PATH: caPublicKeyFilePath.Name(), 69 | VAR_PUBLIC_KEY_FILE_PATH: publicKeyFilePath.Name(), 70 | VAR_PRIVATE_KEY_FILE_PATH: privateKeyFilePath.Name(), 71 | VAR_OWNER: currentUser.Username, 72 | VAR_ORGANIZATION_NAME: "Gruntwork", 73 | VAR_CA_COMMON_NAME: "Vault Module Test CA", 74 | VAR_COMMON_NAME: "Vault Module Test", 75 | VAR_DNS_NAMES: []string{"vault.service.consul"}, 76 | VAR_IP_ADDRESSES: []string{"127.0.0.1"}, 77 | VAR_VALIDITY_PERIOD_HOURS: 1000, 78 | }, 79 | } 80 | 81 | defer terraform.Destroy(t, terraformOptions) 82 | 83 | terraform.InitAndApply(t, terraformOptions) 84 | 85 | assertFileNotEmpty(t, caPublicKeyFilePath.Name()) 86 | assertFileNotEmpty(t, publicKeyFilePath.Name()) 87 | assertFileNotEmpty(t, privateKeyFilePath.Name()) 88 | 89 | return TlsCert{ 90 | CAPublicKeyPath: caPublicKeyFilePath.Name(), 91 | PublicKeyPath: publicKeyFilePath.Name(), 92 | PrivateKeyPath: privateKeyFilePath.Name(), 93 | } 94 | } 95 | 96 | // Delete the temporary self-signed cert files we created 97 | func cleanupTLSCertFiles(tlsCert TlsCert) { 98 | os.Remove(tlsCert.CAPublicKeyPath) 99 | os.Remove(tlsCert.PrivateKeyPath) 100 | os.Remove(tlsCert.PublicKeyPath) 101 | } 102 | 103 | // This is an attempt to catch a strange issue where the private-tls-cert module seems to occasionally create a private 104 | // key file that is completely empty. This doesn't fix the issue, but it at least helps us confirm that this is the 105 | // issue that is causing intermittent test failures. 106 | func assertFileNotEmpty(t *testing.T, path string) { 107 | bytes, err := ioutil.ReadFile(path) 108 | if err != nil { 109 | t.Fatal(err) 110 | } 111 | 112 | fileContents := string(bytes) 113 | if strings.TrimSpace(fileContents) == "" { 114 | t.Fatalf("Expected file at %s to not be empty", path) 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /test/vault_cluster_auth_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | "testing" 7 | "time" 8 | 9 | "github.com/gruntwork-io/terratest/modules/http-helper" 10 | "github.com/gruntwork-io/terratest/modules/random" 11 | "github.com/gruntwork-io/terratest/modules/terraform" 12 | "github.com/gruntwork-io/terratest/modules/test-structure" 13 | ) 14 | 15 | const ( 16 | TFVAR_NAME_CLIENT_NAME = "web_client_name" 17 | TFVAR_NAME_EXAMPLE_SECRET = "example_secret" 18 | 19 | TFOUT_WEB_CLIENT_PUBLIC_IP = "web_client_public_ip" 20 | 21 | EXAMPLE_SECRET = "42" 22 | ) 23 | 24 | func runVaultIamAuthTest(t *testing.T, packerBuildSaveName string) { 25 | exampleDir := test_structure.CopyTerraformFolderToTemp(t, "../", "examples/vault-cluster-authentication-iam") 26 | 27 | defer test_structure.RunTestStage(t, "teardown", func() { 28 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 29 | terraform.Destroy(t, terraformOptions) 30 | }) 31 | 32 | defer test_structure.RunTestStage(t, "log", func() { 33 | //ToDo: Modify log retrieval to go through a bastion host 34 | // Requires adding feature to terratest 35 | //writeVaultLogs(t, "vaultAuthIam", exampleDir) 36 | }) 37 | 38 | test_structure.RunTestStage(t, "deploy", func() { 39 | projectId := test_structure.LoadString(t, WORK_DIR, SAVED_GCP_PROJECT_ID) 40 | region := test_structure.LoadString(t, WORK_DIR, SAVED_GCP_REGION_NAME) 41 | imageID := test_structure.LoadString(t, WORK_DIR, packerBuildSaveName) 42 | 43 | // GCP only supports lowercase names for some resources 44 | uniqueID := strings.ToLower(random.UniqueId()) 45 | 46 | terraformOptions := &terraform.Options{ 47 | TerraformDir: exampleDir, 48 | Vars: map[string]interface{}{ 49 | TFVAR_NAME_GCP_PROJECT_ID: projectId, 50 | TFVAR_NAME_GCP_REGION: region, 51 | TFVAR_NAME_CONSUL_SERVER_CLUSTER_NAME: fmt.Sprintf("consul-test-%s", uniqueID), 52 | TFVAR_NAME_CONSUL_SOURCE_IMAGE: imageID, 53 | TFVAR_NAME_CONSUL_SERVER_CLUSTER_MACHINE_TYPE: "g1-small", 54 | TFVAR_NAME_VAULT_CLUSTER_NAME: fmt.Sprintf("vault-test-%s", uniqueID), 55 | TFVAR_NAME_VAULT_SOURCE_IMAGE: imageID, 56 | TFVAR_NAME_VAULT_CLUSTER_MACHINE_TYPE: "g1-small", 57 | TFVAR_NAME_CLIENT_NAME: fmt.Sprintf("vault-client-test-%s", uniqueID), 58 | TFVAR_NAME_SUBNET_CIDR: getRandomCidr(), 59 | }, 60 | } 61 | 62 | test_structure.SaveTerraformOptions(t, exampleDir, terraformOptions) 63 | terraform.InitAndApply(t, terraformOptions) 64 | }) 65 | 66 | test_structure.RunTestStage(t, "validate", func() { 67 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 68 | testRequestSecret(t, terraformOptions, EXAMPLE_SECRET) 69 | }) 70 | } 71 | 72 | func runVaultGceAuthTest(t *testing.T, packerBuildSaveName string) { 73 | exampleDir := test_structure.CopyTerraformFolderToTemp(t, "../", "examples/vault-cluster-authentication-gce") 74 | 75 | defer test_structure.RunTestStage(t, "teardown", func() { 76 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 77 | terraform.Destroy(t, terraformOptions) 78 | }) 79 | 80 | defer test_structure.RunTestStage(t, "log", func() { 81 | //ToDo: Modify log retrieval to go through a bastion host 82 | // Requires adding feature to terratest 83 | //writeVaultLogs(t, "vaultAuthGce", exampleDir) 84 | }) 85 | 86 | test_structure.RunTestStage(t, "deploy", func() { 87 | projectId := test_structure.LoadString(t, WORK_DIR, SAVED_GCP_PROJECT_ID) 88 | region := test_structure.LoadString(t, WORK_DIR, SAVED_GCP_REGION_NAME) 89 | imageID := test_structure.LoadString(t, WORK_DIR, packerBuildSaveName) 90 | 91 | // GCP only supports lowercase names for some resources 92 | uniqueID := strings.ToLower(random.UniqueId()) 93 | 94 | terraformOptions := &terraform.Options{ 95 | TerraformDir: exampleDir, 96 | Vars: map[string]interface{}{ 97 | TFVAR_NAME_GCP_PROJECT_ID: projectId, 98 | TFVAR_NAME_GCP_REGION: region, 99 | TFVAR_NAME_CONSUL_SERVER_CLUSTER_NAME: fmt.Sprintf("consul-test-%s", uniqueID), 100 | TFVAR_NAME_CONSUL_SOURCE_IMAGE: imageID, 101 | TFVAR_NAME_CONSUL_SERVER_CLUSTER_MACHINE_TYPE: "g1-small", 102 | TFVAR_NAME_VAULT_CLUSTER_NAME: fmt.Sprintf("vault-test-%s", uniqueID), 103 | TFVAR_NAME_VAULT_SOURCE_IMAGE: imageID, 104 | TFVAR_NAME_VAULT_CLUSTER_MACHINE_TYPE: "g1-small", 105 | TFVAR_NAME_CLIENT_NAME: fmt.Sprintf("vault-client-test-%s", uniqueID), 106 | TFVAR_NAME_SUBNET_CIDR: getRandomCidr(), 107 | }, 108 | } 109 | 110 | test_structure.SaveTerraformOptions(t, exampleDir, terraformOptions) 111 | terraform.InitAndApply(t, terraformOptions) 112 | }) 113 | 114 | test_structure.RunTestStage(t, "validate", func() { 115 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 116 | testRequestSecret(t, terraformOptions, EXAMPLE_SECRET) 117 | 118 | }) 119 | } 120 | 121 | func testRequestSecret(t *testing.T, terraformOptions *terraform.Options, expectedResponse string) { 122 | webClientPublicIp := terraform.OutputRequired(t, terraformOptions, TFOUT_WEB_CLIENT_PUBLIC_IP) 123 | url := fmt.Sprintf("http://%s:%s", webClientPublicIp, "8080") 124 | http_helper.HttpGetWithRetry(t, url, 200, expectedResponse, 30, 10*time.Second) 125 | } 126 | -------------------------------------------------------------------------------- /test/vault_cluster_enterprise_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | "testing" 7 | "time" 8 | 9 | "github.com/gruntwork-io/terratest/modules/gcp" 10 | "github.com/gruntwork-io/terratest/modules/logger" 11 | "github.com/gruntwork-io/terratest/modules/random" 12 | "github.com/gruntwork-io/terratest/modules/retry" 13 | "github.com/gruntwork-io/terratest/modules/ssh" 14 | "github.com/gruntwork-io/terratest/modules/terraform" 15 | test_structure "github.com/gruntwork-io/terratest/modules/test-structure" 16 | ) 17 | 18 | const ( 19 | TFVAR_NAME_AUTOUNSEAL_KEY_PROJECT = "vault_auto_unseal_key_project_id" 20 | TFVAR_NAME_AUTOUNSEAL_KEY_REGION = "vault_auto_unseal_key_region" 21 | TFVAR_NAME_AUTOUNSEAL_KEY_RING_NAME = "vault_auto_unseal_key_ring" 22 | TFVAR_NAME_AUTOUNSEAL_CRYPTO_KEY_NAME = "vault_auto_unseal_crypto_key_name" 23 | 24 | AUTOUNSEAL_KEY_REGION = "global" 25 | AUTOUNSEAL_KEY_RING_NAME = "vault-cluster-automated-tests" 26 | AUTOUNSEAL_CRYPTO_KEY_NAME = "circle-ci" 27 | ) 28 | 29 | // Test the Vault enterprise cluster example by: 30 | // 31 | // 1. Copy the code in this repo to a temp folder so tests on the Terraform code can run in parallel without the 32 | // state files overwriting each other. 33 | // 2. Build the Cloud Image in the vault-consul-image example with the given build name and the enterprise packages 34 | // 3. Deploy that Image using the example Terraform code 35 | // 4. TODO - SSH to a Vault node and check if Vault enterprise is installed properly 36 | // 5. SSH into a Vault node and initialize the Vault cluster 37 | // 6. SSH to each other Vault node, restart vault and test that it is unsealed 38 | // 7. SSH to a Vault node and make sure you can communicate with the nodes via Consul-managed DNS 39 | func runVaultEnterpriseClusterTest(t *testing.T, packerBuildSaveName string) { 40 | exampleDir := test_structure.CopyTerraformFolderToTemp(t, "../", "examples/vault-cluster-enterprise") 41 | 42 | defer test_structure.RunTestStage(t, "teardown", func() { 43 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 44 | terraform.Destroy(t, terraformOptions) 45 | }) 46 | 47 | defer test_structure.RunTestStage(t, "log", func() { 48 | //ToDo: Modify log retrieval to go through bastion host 49 | // Requires adding feature to terratest 50 | //writeVaultLogs(t, "vaultEnterpriseCluster", exampleDir) 51 | }) 52 | 53 | test_structure.RunTestStage(t, "deploy", func() { 54 | projectId := test_structure.LoadString(t, WORK_DIR, SAVED_GCP_PROJECT_ID) 55 | region := test_structure.LoadString(t, WORK_DIR, SAVED_GCP_REGION_NAME) 56 | imageID := test_structure.LoadString(t, WORK_DIR, packerBuildSaveName) 57 | 58 | // GCP only supports lowercase names for some resources 59 | uniqueID := strings.ToLower(random.UniqueId()) 60 | 61 | consulClusterName := fmt.Sprintf("consul-test-%s", uniqueID) 62 | vaultClusterName := fmt.Sprintf("vault-test-%s", uniqueID) 63 | 64 | terraformOptions := &terraform.Options{ 65 | TerraformDir: exampleDir, 66 | Vars: map[string]interface{}{ 67 | TFVAR_NAME_GCP_PROJECT_ID: projectId, 68 | TFVAR_NAME_GCP_REGION: region, 69 | TFVAR_NAME_CONSUL_SERVER_CLUSTER_NAME: consulClusterName, 70 | TFVAR_NAME_CONSUL_SOURCE_IMAGE: imageID, 71 | TFVAR_NAME_CONSUL_SERVER_CLUSTER_MACHINE_TYPE: "g1-small", 72 | TFVAR_NAME_VAULT_CLUSTER_NAME: vaultClusterName, 73 | TFVAR_NAME_VAULT_SOURCE_IMAGE: imageID, 74 | TFVAR_NAME_VAULT_CLUSTER_MACHINE_TYPE: "g1-small", 75 | TFVAR_NAME_BASTION_SERVER_NAME: fmt.Sprintf("bastion-test-%s", uniqueID), 76 | TFVAR_NAME_AUTOUNSEAL_KEY_PROJECT: projectId, 77 | TFVAR_NAME_AUTOUNSEAL_KEY_REGION: AUTOUNSEAL_KEY_REGION, 78 | TFVAR_NAME_AUTOUNSEAL_KEY_RING_NAME: AUTOUNSEAL_KEY_RING_NAME, 79 | TFVAR_NAME_AUTOUNSEAL_CRYPTO_KEY_NAME: AUTOUNSEAL_CRYPTO_KEY_NAME, 80 | TFVAR_NAME_SUBNET_CIDR: getRandomCidr(), 81 | }, 82 | } 83 | test_structure.SaveTerraformOptions(t, exampleDir, terraformOptions) 84 | 85 | terraform.InitAndApply(t, terraformOptions) 86 | }) 87 | 88 | test_structure.RunTestStage(t, "validate", func() { 89 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 90 | projectId := test_structure.LoadString(t, WORK_DIR, SAVED_GCP_PROJECT_ID) 91 | region := test_structure.LoadString(t, WORK_DIR, SAVED_GCP_REGION_NAME) 92 | instanceGroupName := terraform.OutputRequired(t, terraformOptions, TFOUT_INSTANCE_GROUP_NAME) 93 | 94 | sshUserName := "terratest" 95 | keyPair := ssh.GenerateRSAKeyPair(t, 2048) 96 | saveKeyPair(t, exampleDir, keyPair) 97 | addKeyPairToInstancesInGroup(t, projectId, region, instanceGroupName, keyPair, sshUserName, 3) 98 | 99 | bastionName := terraform.OutputRequired(t, terraformOptions, TFVAR_NAME_BASTION_SERVER_NAME) 100 | bastionInstance := gcp.FetchInstance(t, projectId, bastionName) 101 | bastionInstance.AddSshKey(t, sshUserName, keyPair.PublicKey) 102 | bastionHost := ssh.Host{ 103 | Hostname: bastionInstance.GetPublicIp(t), 104 | SshUserName: sshUserName, 105 | SshKeyPair: keyPair, 106 | } 107 | 108 | cluster := testVaultInitializeAutoUnseal(t, projectId, region, instanceGroupName, sshUserName, keyPair, &bastionHost) 109 | testVaultUsesConsulForDns(t, cluster, &bastionHost) 110 | }) 111 | } 112 | 113 | func testVaultInitializeAutoUnseal(t *testing.T, projectId string, region string, instanceGroupName string, sshUserName string, sshKeyPair *ssh.KeyPair, bastionHost *ssh.Host) *VaultCluster { 114 | cluster := findVaultClusterNodes(t, projectId, region, instanceGroupName, sshUserName, sshKeyPair, bastionHost) 115 | 116 | verifyCanSsh(t, cluster, bastionHost) 117 | testVaultIsEnterprise(t, cluster.Leader, bastionHost) 118 | 119 | initializeVault(t, cluster, bastionHost) 120 | assertNodeStatus(t, cluster.Leader, bastionHost, Leader) 121 | 122 | //Testing that other members of cluster will be unsealed after restarting 123 | assertNodeStatus(t, cluster.Standby1, bastionHost, Sealed) 124 | restartVault(t, cluster.Standby1, bastionHost) 125 | assertNodeStatus(t, cluster.Standby1, bastionHost, Standby) 126 | 127 | assertNodeStatus(t, cluster.Standby2, bastionHost, Sealed) 128 | restartVault(t, cluster.Standby2, bastionHost) 129 | assertNodeStatus(t, cluster.Standby2, bastionHost, Standby) 130 | return cluster 131 | } 132 | 133 | func testVaultIsEnterprise(t *testing.T, targetHost ssh.Host, bastionHost *ssh.Host) { 134 | retry.DoWithRetry(t, "Testing Vault Version", 3, 5*time.Second, func() (string, error) { 135 | output, err := runCommand(t, bastionHost, &targetHost, "vault --version") 136 | if !strings.Contains(output, "+ent") { 137 | return "", fmt.Errorf("This vault package is not the expected enterprise version. Actual version: %s", output) 138 | } 139 | return "", err 140 | }) 141 | } 142 | 143 | func restartVault(t *testing.T, targetHost ssh.Host, bastionHost *ssh.Host) { 144 | retry.DoWithRetry(t, "Restarting vault", 3, 5*time.Second, func() (string, error) { 145 | output, err := runCommand(t, bastionHost, &targetHost, "sudo supervisorctl restart vault") 146 | logger.Logf(t, "Vault Restarting output: %s", output) 147 | return output, err 148 | }) 149 | } 150 | -------------------------------------------------------------------------------- /test/vault_cluster_private_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | "testing" 7 | 8 | "github.com/gruntwork-io/terratest/modules/gcp" 9 | "github.com/gruntwork-io/terratest/modules/random" 10 | "github.com/gruntwork-io/terratest/modules/ssh" 11 | "github.com/gruntwork-io/terratest/modules/terraform" 12 | "github.com/gruntwork-io/terratest/modules/test-structure" 13 | ) 14 | 15 | const ( 16 | TFVAR_NAME_BASTION_SERVER_NAME = "bastion_server_name" 17 | TFVAR_NAME_SUBNET_CIDR = "subnet_ip_cidr_range" 18 | ) 19 | 20 | func runVaultPrivateClusterTest(t *testing.T, packerBuildSaveName string) { 21 | exampleDir := test_structure.CopyTerraformFolderToTemp(t, "../", "examples/vault-cluster-private") 22 | 23 | defer test_structure.RunTestStage(t, "teardown", func() { 24 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 25 | terraform.Destroy(t, terraformOptions) 26 | }) 27 | 28 | defer test_structure.RunTestStage(t, "log", func() { 29 | //ToDo: Modify log retrieval to go through bastion host 30 | // Requires adding feature to terratest 31 | //writeVaultLogs(t, "vaultPrivateCluster", exampleDir) 32 | }) 33 | 34 | test_structure.RunTestStage(t, "deploy", func() { 35 | projectId := test_structure.LoadString(t, WORK_DIR, SAVED_GCP_PROJECT_ID) 36 | region := test_structure.LoadString(t, WORK_DIR, SAVED_GCP_REGION_NAME) 37 | imageID := test_structure.LoadString(t, WORK_DIR, packerBuildSaveName) 38 | 39 | // GCP only supports lowercase names for some resources 40 | uniqueID := strings.ToLower(random.UniqueId()) 41 | 42 | consulClusterName := fmt.Sprintf("consul-test-%s", uniqueID) 43 | vaultClusterName := fmt.Sprintf("vault-test-%s", uniqueID) 44 | 45 | terraformOptions := &terraform.Options{ 46 | TerraformDir: exampleDir, 47 | Vars: map[string]interface{}{ 48 | TFVAR_NAME_GCP_PROJECT_ID: projectId, 49 | TFVAR_NAME_GCP_REGION: region, 50 | TFVAR_NAME_CONSUL_SERVER_CLUSTER_NAME: consulClusterName, 51 | TFVAR_NAME_CONSUL_SOURCE_IMAGE: imageID, 52 | TFVAR_NAME_CONSUL_SERVER_CLUSTER_MACHINE_TYPE: "g1-small", 53 | TFVAR_NAME_VAULT_CLUSTER_NAME: vaultClusterName, 54 | TFVAR_NAME_VAULT_SOURCE_IMAGE: imageID, 55 | TFVAR_NAME_VAULT_CLUSTER_MACHINE_TYPE: "g1-small", 56 | TFVAR_NAME_BASTION_SERVER_NAME: fmt.Sprintf("bastion-test-%s", uniqueID), 57 | TFVAR_NAME_SUBNET_CIDR: getRandomCidr(), 58 | }, 59 | } 60 | 61 | test_structure.SaveTerraformOptions(t, exampleDir, terraformOptions) 62 | 63 | terraform.InitAndApply(t, terraformOptions) 64 | }) 65 | 66 | test_structure.RunTestStage(t, "validate", func() { 67 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 68 | projectId := test_structure.LoadString(t, WORK_DIR, SAVED_GCP_PROJECT_ID) 69 | region := test_structure.LoadString(t, WORK_DIR, SAVED_GCP_REGION_NAME) 70 | instanceGroupName := terraform.OutputRequired(t, terraformOptions, TFOUT_INSTANCE_GROUP_NAME) 71 | 72 | sshUserName := "terratest" 73 | keyPair := ssh.GenerateRSAKeyPair(t, 2048) 74 | saveKeyPair(t, exampleDir, keyPair) 75 | addKeyPairToInstancesInGroup(t, projectId, region, instanceGroupName, keyPair, sshUserName, 3) 76 | 77 | bastionName := terraform.OutputRequired(t, terraformOptions, TFVAR_NAME_BASTION_SERVER_NAME) 78 | bastionInstance := gcp.FetchInstance(t, projectId, bastionName) 79 | bastionInstance.AddSshKey(t, sshUserName, keyPair.PublicKey) 80 | bastionHost := ssh.Host{ 81 | Hostname: bastionInstance.GetPublicIp(t), 82 | SshUserName: sshUserName, 83 | SshKeyPair: keyPair, 84 | } 85 | 86 | cluster := initializeAndUnsealVaultCluster(t, projectId, region, instanceGroupName, sshUserName, keyPair, &bastionHost) 87 | testVaultUsesConsulForDns(t, cluster, &bastionHost) 88 | }) 89 | } 90 | -------------------------------------------------------------------------------- /test/vault_cluster_public_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | "testing" 7 | 8 | "github.com/gruntwork-io/terratest/modules/random" 9 | "github.com/gruntwork-io/terratest/modules/ssh" 10 | "github.com/gruntwork-io/terratest/modules/terraform" 11 | "github.com/gruntwork-io/terratest/modules/test-structure" 12 | ) 13 | 14 | // Terraform module vars 15 | const TFVAR_NAME_GCP_PROJECT_ID = "gcp_project_id" 16 | const TFVAR_NAME_GCP_REGION = "gcp_region" 17 | 18 | const TFVAR_NAME_VAULT_CLUSTER_NAME = "vault_cluster_name" 19 | const TFVAR_NAME_VAULT_SOURCE_IMAGE = "vault_source_image" 20 | const TFVAR_NAME_VAULT_CLUSTER_MACHINE_TYPE = "vault_cluster_machine_type" 21 | 22 | const TFVAR_NAME_CONSUL_SOURCE_IMAGE = "consul_server_source_image" 23 | const TFVAR_NAME_CONSUL_SERVER_CLUSTER_NAME = "consul_server_cluster_name" 24 | const TFVAR_NAME_CONSUL_SERVER_CLUSTER_MACHINE_TYPE = "consul_server_machine_type" 25 | 26 | func runVaultPublicClusterTest(t *testing.T, packerBuildSaveName string) { 27 | exampleDir := test_structure.CopyTerraformFolderToTemp(t, "../", ".") 28 | 29 | defer test_structure.RunTestStage(t, "teardown", func() { 30 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 31 | terraform.Destroy(t, terraformOptions) 32 | }) 33 | 34 | defer test_structure.RunTestStage(t, "log", func() { 35 | writeVaultLogs(t, "vaultPublicCluster", exampleDir) 36 | }) 37 | 38 | test_structure.RunTestStage(t, "deploy", func() { 39 | projectId := test_structure.LoadString(t, WORK_DIR, SAVED_GCP_PROJECT_ID) 40 | region := test_structure.LoadString(t, WORK_DIR, SAVED_GCP_REGION_NAME) 41 | imageID := test_structure.LoadString(t, WORK_DIR, packerBuildSaveName) 42 | 43 | // GCP only supports lowercase names for some resources 44 | uniqueID := strings.ToLower(random.UniqueId()) 45 | 46 | consulClusterName := fmt.Sprintf("consul-test-%s", uniqueID) 47 | vaultClusterName := fmt.Sprintf("vault-test-%s", uniqueID) 48 | 49 | terraformOptions := &terraform.Options{ 50 | TerraformDir: exampleDir, 51 | Vars: map[string]interface{}{ 52 | TFVAR_NAME_GCP_PROJECT_ID: projectId, 53 | TFVAR_NAME_GCP_REGION: region, 54 | TFVAR_NAME_CONSUL_SERVER_CLUSTER_NAME: consulClusterName, 55 | TFVAR_NAME_CONSUL_SOURCE_IMAGE: imageID, 56 | TFVAR_NAME_CONSUL_SERVER_CLUSTER_MACHINE_TYPE: "g1-small", 57 | TFVAR_NAME_VAULT_CLUSTER_NAME: vaultClusterName, 58 | TFVAR_NAME_VAULT_SOURCE_IMAGE: imageID, 59 | TFVAR_NAME_VAULT_CLUSTER_MACHINE_TYPE: "g1-small", 60 | }, 61 | } 62 | 63 | test_structure.SaveTerraformOptions(t, exampleDir, terraformOptions) 64 | 65 | terraform.InitAndApply(t, terraformOptions) 66 | }) 67 | 68 | test_structure.RunTestStage(t, "validate", func() { 69 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 70 | projectId := test_structure.LoadString(t, WORK_DIR, SAVED_GCP_PROJECT_ID) 71 | region := test_structure.LoadString(t, WORK_DIR, SAVED_GCP_REGION_NAME) 72 | instanceGroupName := terraform.OutputRequired(t, terraformOptions, TFOUT_INSTANCE_GROUP_NAME) 73 | 74 | sshUserName := "terratest" 75 | keyPair := ssh.GenerateRSAKeyPair(t, 2048) 76 | saveKeyPair(t, exampleDir, keyPair) 77 | addKeyPairToInstancesInGroup(t, projectId, region, instanceGroupName, keyPair, sshUserName, 3) 78 | 79 | cluster := initializeAndUnsealVaultCluster(t, projectId, region, instanceGroupName, sshUserName, keyPair, nil) 80 | testVault(t, cluster.Leader.Hostname) 81 | }) 82 | } 83 | -------------------------------------------------------------------------------- /test/vault_main_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "fmt" 5 | "math/rand" 6 | "testing" 7 | "time" 8 | 9 | "github.com/gruntwork-io/terratest/modules/gcp" 10 | "github.com/gruntwork-io/terratest/modules/packer" 11 | test_structure "github.com/gruntwork-io/terratest/modules/test-structure" 12 | ) 13 | 14 | const ( 15 | IMAGE_EXAMPLE_PATH = "../examples/vault-consul-ami/vault-consul.json" 16 | WORK_DIR = "./" 17 | ) 18 | 19 | type testCase struct { 20 | Name string // Name of the test 21 | Func func(*testing.T, string) // Function that runs the test 22 | testWithEnterpriseVault bool 23 | } 24 | 25 | type packerBuild struct { 26 | SaveName string // Name of the test data save file 27 | PackerBuildName string // Name of the packer build 28 | useEnterpriseVault bool // Use Vault Enterprise or not 29 | } 30 | 31 | var testCases = []testCase{ 32 | { 33 | "TestVaultPrivateCluster", 34 | runVaultPrivateClusterTest, 35 | false, 36 | }, 37 | { 38 | "TestVaultPublicCluster", 39 | runVaultPublicClusterTest, 40 | false, 41 | }, 42 | { 43 | "TestVaultEnterpriseClusterAutoUnseal", 44 | runVaultEnterpriseClusterTest, 45 | true, 46 | }, 47 | { 48 | "TestVaultIamAuthentication", 49 | runVaultIamAuthTest, 50 | false, 51 | }, 52 | { 53 | "TestVaultGceAuthentication", 54 | runVaultGceAuthTest, 55 | false, 56 | }, 57 | } 58 | 59 | var packerBuilds = []packerBuild{ 60 | { 61 | "OpenSourceVaultOnUbuntu16ImageID", 62 | "ubuntu16-image", 63 | false, 64 | }, 65 | { 66 | "OpenSourceVaultOnUbuntu18ImageID", 67 | "ubuntu18-image", 68 | false, 69 | }, 70 | { 71 | "EnterpriseVaultOnUbuntu16ImageID", 72 | "ubuntu16-image", 73 | true, 74 | }, 75 | { 76 | "EnterpriseVaultOnUbuntu18ImageID", 77 | "ubuntu18-image", 78 | true, 79 | }, 80 | } 81 | 82 | // To test this on CircleCI you need two URLs set a environment variables(VAULT_PACKER_TEMPLATE_VAR_VAULT_DOWNLOAD_URL) 83 | // so the Vault Enterprise versions can be downloaded. You would also need to set these two variables locally to run the 84 | // tests. The reason behind this is to prevent the actual url from being visible in the code and logs. 85 | func TestMainVaultCluster(t *testing.T) { 86 | t.Parallel() 87 | 88 | test_structure.RunTestStage(t, "build_images", func() { 89 | vaultDownloadUrl := getUrlFromEnv(t, "VAULT_PACKER_TEMPLATE_VAR_VAULT_DOWNLOAD_URL") 90 | 91 | projectId := gcp.GetGoogleProjectIDFromEnvVar(t) 92 | // GCP sets quotas at a low limit for In-use IP addresses and CPUs which fail the tests 93 | // these have to be requested manually for each region and will break tests every time 94 | // a new region is introduced. For this reason, I am limiting the tests to us-east1 95 | region := gcp.GetRandomRegion(t, projectId, []string{"us-east1"}, nil) 96 | zone := gcp.GetRandomZoneForRegion(t, projectId, region) 97 | 98 | test_structure.SaveString(t, WORK_DIR, SAVED_GCP_PROJECT_ID, projectId) 99 | test_structure.SaveString(t, WORK_DIR, SAVED_GCP_REGION_NAME, region) 100 | test_structure.SaveString(t, WORK_DIR, SAVED_GCP_ZONE_NAME, zone) 101 | 102 | tlsCert := generateSelfSignedTlsCert(t) 103 | saveTLSCert(t, WORK_DIR, tlsCert) 104 | 105 | packerImageOptions := map[string]*packer.Options{} 106 | for _, packerBuildItem := range packerBuilds { 107 | packerImageOptions[packerBuildItem.SaveName] = composeImageOptions(t, packerBuildItem.PackerBuildName, WORK_DIR, packerBuildItem.useEnterpriseVault, vaultDownloadUrl) 108 | } 109 | 110 | imageIds := packer.BuildArtifacts(t, packerImageOptions) 111 | for imageKey, imageId := range imageIds { 112 | test_structure.SaveString(t, WORK_DIR, imageKey, imageId) 113 | } 114 | }) 115 | 116 | defer test_structure.RunTestStage(t, "delete_images", func() { 117 | projectID := test_structure.LoadString(t, WORK_DIR, SAVED_GCP_PROJECT_ID) 118 | 119 | for _, packerBuildItem := range packerBuilds { 120 | deleteVaultImage(t, WORK_DIR, projectID, packerBuildItem.SaveName) 121 | } 122 | 123 | tlsCert := loadTLSCert(t, WORK_DIR) 124 | cleanupTLSCertFiles(tlsCert) 125 | }) 126 | 127 | t.Run("group", func(t *testing.T) { 128 | runAllTests(t) 129 | }) 130 | } 131 | 132 | func runAllTests(t *testing.T) { 133 | rand.Seed(time.Now().UnixNano()) 134 | for _, testCase := range testCases { 135 | // This re-assignment necessary, because the variable testCase is defined and set outside the forloop. 136 | // As such, it gets overwritten on each iteration of the forloop. This is fine if you don't have concurrent code in the loop, 137 | // but in this case, because you have a t.Parallel, the t.Run completes before the test function exits, 138 | // which means that the value of testCase might change. 139 | // More information at: 140 | // "Be Careful with Table Driven Tests and t.Parallel()" 141 | // https://gist.github.com/posener/92a55c4cd441fc5e5e85f27bca008721 142 | testCase := testCase 143 | for _, packerBuildItem := range packerBuilds { 144 | packerBuildItem := packerBuildItem 145 | if packerBuildItem.useEnterpriseVault == testCase.testWithEnterpriseVault { 146 | t.Run(fmt.Sprintf("%sWith%s", testCase.Name, packerBuildItem.SaveName), func(t *testing.T) { 147 | t.Parallel() 148 | testCase.Func(t, packerBuildItem.SaveName) 149 | }) 150 | } 151 | } 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRED PARAMETERS 3 | # These parameters must be supplied when consuming this module. 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "gcp_project_id" { 7 | description = "The name of the GCP Project where all resources will be launched." 8 | type = string 9 | } 10 | 11 | variable "gcp_region" { 12 | description = "The region in which all GCP resources will be launched." 13 | type = string 14 | } 15 | 16 | variable "vault_cluster_name" { 17 | description = "The name of the Vault Server cluster. All resources will be namespaced by this value. E.g. vault-server-prod" 18 | type = string 19 | } 20 | 21 | variable "vault_source_image" { 22 | description = "The Google Image used to launch each node in the Vault Server cluster. You can build this Google Image yourself at /examples/vault-consul-image." 23 | type = string 24 | } 25 | 26 | variable "consul_server_cluster_name" { 27 | description = "The name of the Consul Server cluster. All resources will be namespaced by this value. E.g. consul-server-prod" 28 | type = string 29 | } 30 | 31 | variable "consul_server_source_image" { 32 | description = "The Google Image used to launch each node in the Consul Server cluster. You can build this Google Image yourself at /examples/vault-consul-image." 33 | type = string 34 | } 35 | 36 | # --------------------------------------------------------------------------------------------------------------------- 37 | # OPTIONAL PARAMETERS 38 | # These parameters have reasonable defaults. 39 | # --------------------------------------------------------------------------------------------------------------------- 40 | 41 | variable "network_project_id" { 42 | description = "The name of the GCP Project where the network is located. Useful when using networks shared between projects. If empty, var.gcp_project_id will be used." 43 | type = string 44 | default = null 45 | } 46 | 47 | variable "vault_cluster_machine_type" { 48 | description = "The machine type of the Compute Instance to run for each node in the Vault cluster (e.g. n1-standard-1)." 49 | type = string 50 | default = "g1-small" 51 | } 52 | 53 | variable "consul_server_machine_type" { 54 | description = "The machine type of the Compute Instance to run for each node in the Consul Server cluster (e.g. n1-standard-1)." 55 | type = string 56 | default = "g1-small" 57 | } 58 | 59 | variable "gcs_bucket_location" { 60 | description = "The location of the Google Cloud Storage Bucket where Vault secrets will be stored. For details, see https://goo.gl/hk63jH." 61 | type = string 62 | default = "US" 63 | } 64 | 65 | variable "gcs_bucket_class" { 66 | description = "The Storage Class of the Google Cloud Storage Bucket where Vault secrets will be stored. Must be one of MULTI_REGIONAL, REGIONAL, NEARLINE, or COLDLINE. For details, see https://goo.gl/hk63jH." 67 | type = string 68 | default = "MULTI_REGIONAL" 69 | } 70 | 71 | variable "gcs_bucket_force_destroy" { 72 | description = "If true, Terraform will delete the Google Cloud Storage Bucket even if it's non-empty. WARNING! Never set this to true in a production setting. We only have this option here to facilitate testing." 73 | type = bool 74 | default = true 75 | } 76 | 77 | variable "vault_cluster_size" { 78 | description = "The number of nodes to have in the Vault Server cluster. We strongly recommended that you use either 3 or 5." 79 | type = number 80 | default = 3 81 | } 82 | 83 | variable "consul_server_cluster_size" { 84 | description = "The number of nodes to have in the Consul Server cluster. We strongly recommended that you use either 3 or 5." 85 | type = number 86 | default = 3 87 | } 88 | 89 | variable "web_proxy_port" { 90 | description = "The port at which the HTTP proxy server will listen for incoming HTTP requests that will be forwarded to the Vault Health Check URL. We must have an HTTP proxy server to work around the limitation that GCP only permits Health Checks via HTTP, not HTTPS." 91 | type = number 92 | default = 8000 93 | } 94 | 95 | variable "root_volume_disk_size_gb" { 96 | description = "The size, in GB, of the root disk volume on each Consul node." 97 | type = number 98 | default = 30 99 | } 100 | 101 | variable "root_volume_disk_type" { 102 | description = "The GCE disk type. Can be either pd-ssd, local-ssd, or pd-standard" 103 | type = string 104 | default = "pd-standard" 105 | } 106 | 107 | variable "enable_vault_ui" { 108 | description = "If true, enable the Vault UI" 109 | type = bool 110 | default = true 111 | } 112 | 113 | variable "image_project_id" { 114 | description = "The name of the GCP Project where the image is located. Useful when using a separate project for custom images. If empty, var.gcp_project_id will be used." 115 | type = string 116 | default = null 117 | } 118 | --------------------------------------------------------------------------------