├── .circleci └── config.yml ├── .env.aws.example ├── .gitignore ├── .pre-commit-config.yaml ├── CODEOWNERS ├── CONTRIBUTING.md ├── LICENSE ├── Makefile ├── NOTICE ├── README.md ├── _ci ├── publish-amis-in-new-account.md └── publish-amis.sh ├── _docs ├── architecture.png ├── consul-ui-screenshot.png └── package-managers.md ├── examples ├── README.md ├── consul-ami │ ├── README.md │ └── consul.json ├── consul-examples-helper │ ├── README.md │ └── consul-examples-helper.sh ├── example-with-custom-asg-role │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── user-data-client.sh │ ├── user-data-server.sh │ └── variables.tf ├── example-with-encryption │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── packer │ │ ├── README.md │ │ ├── ca.crt.pem │ │ ├── consul-with-certs.json │ │ ├── consul.crt.pem │ │ └── consul.key.pem │ ├── user-data-client.sh │ ├── user-data-server.sh │ └── variables.tf └── root-example │ ├── README.md │ ├── user-data-client.sh │ └── user-data-server.sh ├── main.tf ├── modules ├── README.md ├── consul-client-security-group-rules │ ├── README.md │ ├── main.tf │ └── variables.tf ├── consul-cluster │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── consul-iam-policies │ ├── README.md │ ├── main.tf │ └── variables.tf ├── consul-security-group-rules │ ├── README.md │ ├── main.tf │ └── variables.tf ├── install-consul │ ├── README.md │ └── install-consul ├── install-dnsmasq │ ├── README.md │ └── install-dnsmasq ├── run-consul │ ├── README.md │ └── run-consul └── setup-systemd-resolved │ ├── README.md │ └── setup-systemd-resolved ├── outputs.tf ├── test ├── README.md ├── aws_helpers.go ├── consul_cluster_test.go ├── consul_cluster_with_custom_asg_role_test.go ├── consul_cluster_with_encryption_test.go ├── consul_enterprise_test.go ├── consul_helpers.go ├── go.mod ├── go.sum └── terratest_helpers.go └── variables.tf /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | defaults: &defaults 3 | docker: 4 | - image: 087285199408.dkr.ecr.us-east-1.amazonaws.com/circle-ci-test-image-base:tf13 5 | 6 | version: 2 7 | jobs: 8 | test: 9 | <<: *defaults 10 | steps: 11 | - checkout 12 | - run: echo 'export PATH=$HOME/terraform:$HOME/packer:$PATH' >> $BASH_ENV 13 | - run: 14 | # Fail the build if the pre-commit hooks don't pass. Note: if you run $ pre-commit install locally within this repo, these hooks will 15 | # execute automatically every time before you commit, ensuring the build never fails at this step! 16 | name: run pre-commit hooks 17 | command: | 18 | pip install pre-commit==1.21.0 cfgv==2.0.1 19 | pre-commit install 20 | pre-commit run --all-files 21 | - run: 22 | command: | 23 | mkdir -p /tmp/logs 24 | run-go-tests --path test | tee /tmp/logs/test-all.log 25 | - run: 26 | command: terratest_log_parser --testlog /tmp/logs/test-all.log --outputdir /tmp/logs 27 | when: always 28 | - store_artifacts: 29 | path: /tmp/logs 30 | - store_test_results: 31 | path: /tmp/logs 32 | 33 | deploy: 34 | <<: *defaults 35 | steps: 36 | - checkout 37 | - run: echo 'export PATH=$HOME/terraform:$HOME/packer:$PATH' >> $BASH_ENV 38 | - run: sudo -E gruntwork-install --module-name "aws-helpers" --repo "https://github.com/gruntwork-io/module-ci" --tag "v0.12.2" 39 | - run: sudo -E gruntwork-install --module-name "git-helpers" --repo "https://github.com/gruntwork-io/module-ci" --tag "v0.12.2" 40 | - run: sudo -E gruntwork-install --module-name "build-helpers" --repo "https://github.com/gruntwork-io/module-ci" --tag "v0.12.2" 41 | 42 | # We generally only want to build AMIs on new releases, but when we are setting up AMIs in a new account for the 43 | # first time, we want to build the AMIs but NOT run automated tests, since those tests will fail without an existing 44 | # AMI already in the AWS Account. 45 | - run: _ci/publish-amis.sh "ubuntu16-ami" 46 | - run: _ci/publish-amis.sh "ubuntu18-ami" 47 | - run: _ci/publish-amis.sh "amazon-linux-2-ami" 48 | 49 | workflows: 50 | version: 2 51 | build-and-test: 52 | jobs: 53 | - test: 54 | filters: 55 | branches: 56 | ignore: publish-amis 57 | - deploy: 58 | filters: 59 | branches: 60 | only: publish-amis 61 | tags: 62 | only: /^v.*/ 63 | nightly-test: 64 | triggers: 65 | - schedule: 66 | cron: "0 0 * * *" 67 | filters: 68 | branches: 69 | only: 70 | - master 71 | jobs: 72 | - build 73 | - test: 74 | requires: 75 | - build 76 | -------------------------------------------------------------------------------- /.env.aws.example: -------------------------------------------------------------------------------- 1 | AWS_ACCESS_KEY_ID= 2 | AWS_SECRET_ACCESS_KEY= 3 | AWS_DEFAULT_REGION= -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Terraform files 2 | .terraform 3 | terraform.tfstate 4 | terraform.tfvars 5 | *.tfstate* 6 | 7 | # OS X files 8 | .history 9 | .DS_Store 10 | 11 | # IntelliJ files 12 | .idea_modules 13 | *.iml 14 | *.iws 15 | *.ipr 16 | .idea/ 17 | build/ 18 | */build/ 19 | out/ 20 | # VIM Swap files 21 | *.swp 22 | 23 | # Go best practices dictate that libraries should not include the vendor directory 24 | vendor 25 | 26 | # Folder used to store temporary test data by Terratest 27 | .test-data 28 | 29 | # secrets 30 | .env.aws -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/gruntwork-io/pre-commit 3 | rev: v0.1.10 4 | hooks: 5 | - id: terraform-fmt 6 | - id: gofmt -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @robmorgan @Etiene 2 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contribution Guidelines 2 | 3 | Contributions to this Module are very welcome! We follow a fairly standard [pull request 4 | process](https://help.github.com/articles/about-pull-requests/) for contributions, subject to the following guidelines: 5 | 6 | 1. [File a GitHub issue](#file-a-github-issue) 7 | 1. [Update the documentation](#update-the-documentation) 8 | 1. [Update the tests](#update-the-tests) 9 | 1. [Update the code](#update-the-code) 10 | 1. [Create a pull request](#create-a-pull-request) 11 | 1. [Merge and release](#merge-and-release) 12 | 13 | ## File a GitHub issue 14 | 15 | Before starting any work, we recommend filing a GitHub issue in this repo. This is your chance to ask questions and 16 | get feedback from the maintainers and the community before you sink a lot of time into writing (possibly the wrong) 17 | code. If there is anything you're unsure about, just ask! 18 | 19 | ## Update the documentation 20 | 21 | We recommend updating the documentation *before* updating any code (see [Readme Driven 22 | Development](http://tom.preston-werner.com/2010/08/23/readme-driven-development.html)). This ensures the documentation 23 | stays up to date and allows you to think through the problem at a high level before you get lost in the weeds of 24 | coding. 25 | 26 | ## Update the tests 27 | 28 | We also recommend updating the automated tests *before* updating any code (see [Test Driven 29 | Development](https://en.wikipedia.org/wiki/Test-driven_development)). That means you add or update a test case, 30 | verify that it's failing with a clear error message, and *then* make the code changes to get that test to pass. This 31 | ensures the tests stay up to date and verify all the functionality in this Module, including whatever new 32 | functionality you're adding in your contribution. Check out the [tests](https://github.com/hashicorp/terraform-aws-consul/tree/master/test) folder for instructions on running the 33 | automated tests. 34 | 35 | ## Update the code 36 | 37 | At this point, make your code changes and use your new test case to verify that everything is working. As you work, 38 | keep in mind two things: 39 | 40 | 1. Backwards compatibility 41 | 1. Downtime 42 | 43 | ### Backwards compatibility 44 | 45 | Please make every effort to avoid unnecessary backwards incompatible changes. With Terraform code, this means: 46 | 47 | 1. Do not delete, rename, or change the type of input variables. 48 | 1. If you add an input variable, it should have a `default`. 49 | 1. Do not delete, rename, or change the type of output variables. 50 | 1. Do not delete or rename a module in the `modules` folder. 51 | 52 | If a backwards incompatible change cannot be avoided, please make sure to call that out when you submit a pull request, 53 | explaining why the change is absolutely necessary. 54 | 55 | ### Downtime 56 | 57 | Bear in mind that the Terraform code in this Module is used by real companies to run real infrastructure in 58 | production, and certain types of changes could cause downtime. For example, consider the following: 59 | 60 | 1. If you rename a resource (e.g. `aws_instance "foo"` -> `aws_instance "bar"`), Terraform will see that as deleting 61 | the old resource and creating a new one. 62 | 1. If you change certain attributes of a resource (e.g. the `name` of an `aws_elb`), the cloud provider (e.g. AWS) may 63 | treat that as an instruction to delete the old resource and a create a new one. 64 | 65 | Deleting certain types of resources (e.g. virtual servers, load balancers) can cause downtime, so when making code 66 | changes, think carefully about how to avoid that. For example, can you avoid downtime by using 67 | [create_before_destroy](https://www.terraform.io/docs/configuration/resources.html#create_before_destroy)? Or via 68 | the `terraform state` command? If so, make sure to note this in our pull request. If downtime cannot be avoided, 69 | please make sure to call that out when you submit a pull request. 70 | 71 | ## Create a pull request 72 | 73 | [Create a pull request](https://help.github.com/articles/creating-a-pull-request/) with your changes. Please make sure 74 | to include the following: 75 | 76 | 1. A description of the change, including a link to your GitHub issue. 77 | 1. The output of your automated test run, preferably in a [GitHub Gist](https://gist.github.com/). We cannot run 78 | automated tests for pull requests automatically due to [security 79 | concerns](https://circleci.com/docs/fork-pr-builds/#security-implications), so we need you to manually provide this 80 | test output so we can verify that everything is working. 81 | 1. Any notes on backwards incompatibility or downtime. 82 | 83 | ## Merge and release 84 | 85 | The maintainers for this repo will review your code and provide feedback. If everything looks good, they will merge the 86 | code and release a new version, which you'll be able to find in the [releases page](../../releases). -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | cnf ?= .env.aws 2 | include $(cnf) 3 | export $(shell sed 's/=.*//' $(cnf)) 4 | 5 | # Get the latest tag 6 | TAG=$(shell git describe --tags --abbrev=0) 7 | GIT_COMMIT=$(shell git log -1 --format=%h) 8 | AWS_ACCOUNT=178520105998 9 | TERRAFORM_VERSION=0.12.28 10 | 11 | # HELP 12 | # This will output the help for each task 13 | # thanks to https://marmelab.com/blog/2016/02/29/auto-documented-makefile.html 14 | .PHONY: help 15 | 16 | help: ## This help. 17 | @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) 18 | 19 | .DEFAULT_GOAL := help 20 | 21 | terraform-init: ## Run terraform init to download all necessary plugins 22 | docker run --rm -v $$PWD:/app -v $$HOME/.ssh/:/root/.ssh/ -w /app/ -e AWS_ACCESS_KEY_ID=$$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$$AWS_SECRET_ACCESS_KEY -e AWS_DEFAULT_REGION=$$AWS_DEFAULT_REGION -e TF_VAR_APP_VERSION=$(GIT_COMMIT) hashicorp/terraform:$(TERRAFORM_VERSION) init -upgrade=true 23 | 24 | terraform-plan: ## Exec a terraform plan and puts it on a file called tfplan 25 | docker run --rm -v $$PWD:/app -v $$HOME/.ssh/:/root/.ssh/ -w /app/ -e AWS_ACCESS_KEY_ID=$$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$$AWS_SECRET_ACCESS_KEY -e AWS_DEFAULT_REGION=$$AWS_DEFAULT_REGION -e TF_VAR_APP_VERSION=$(GIT_COMMIT) hashicorp/terraform:$(TERRAFORM_VERSION) plan -out=tfplan 26 | 27 | terraform-apply: ## Uses tfplan to apply the changes on AWS. 28 | docker run --rm -v $$PWD:/app -v $$HOME/.ssh/:/root/.ssh/ -w /app/ -e AWS_ACCESS_KEY_ID=$$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$$AWS_SECRET_ACCESS_KEY -e AWS_DEFAULT_REGION=$$AWS_DEFAULT_REGION -e TF_VAR_APP_VERSION=$(GIT_COMMIT) hashicorp/terraform:$(TERRAFORM_VERSION) apply -auto-approve 29 | 30 | terraform-destroy: ## Destroy all resources created by the terraform file in this repo. 31 | docker run --rm -v $$PWD:/app -v $$HOME/.ssh/:/root/.ssh/ -w /app/ -e AWS_ACCESS_KEY_ID=$$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$$AWS_SECRET_ACCESS_KEY -e AWS_DEFAULT_REGION=$$AWS_DEFAULT_REGION -e TF_VAR_APP_VERSION=$(GIT_COMMIT) hashicorp/terraform:$(TERRAFORM_VERSION) destroy -auto-approve 32 | 33 | terraform-set-workspace-dev: ## Set workspace dev 34 | docker run --rm -v $$PWD:/app -v $$HOME/.ssh/:/root/.ssh/ -w /app/ -e AWS_ACCESS_KEY_ID=$$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$$AWS_SECRET_ACCESS_KEY -e AWS_DEFAULT_REGION=$$AWS_DEFAULT_REGION -e TF_VAR_APP_VERSION=$(GIT_COMMIT) hashicorp/terraform:$(TERRAFORM_VERSION) workspace select dev 35 | 36 | terraform-set-workspace-prod: ## Set workspace production 37 | docker run --rm -v $$PWD:/app -v $$HOME/.ssh/:/root/.ssh/ -w /app/ -e AWS_ACCESS_KEY_ID=$$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$$AWS_SECRET_ACCESS_KEY -e AWS_DEFAULT_REGION=$$AWS_DEFAULT_REGION -e TF_VAR_APP_VERSION=$(GIT_COMMIT) hashicorp/terraform:$(TERRAFORM_VERSION) workspace select prod 38 | 39 | terraform-set-workspace-staging: ## Set workspace staging 40 | docker run --rm -v $$PWD:/app -v $$HOME/.ssh/:/root/.ssh/ -w /app/ -e AWS_ACCESS_KEY_ID=$$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$$AWS_SECRET_ACCESS_KEY -e AWS_DEFAULT_REGION=$$AWS_DEFAULT_REGION -e TF_VAR_APP_VERSION=$(GIT_COMMIT) hashicorp/terraform:$(TERRAFORM_VERSION) workspace select staging 41 | 42 | terraform-new-workspace-staging: ## Create workspace staging 43 | docker run --rm -v $$PWD:/app -v $$HOME/.ssh/:/root/.ssh/ -w /app/ -e AWS_ACCESS_KEY_ID=$$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$$AWS_SECRET_ACCESS_KEY -e AWS_DEFAULT_REGION=$$AWS_DEFAULT_REGION -e TF_VAR_APP_VERSION=$(GIT_COMMIT) hashicorp/terraform:$(TERRAFORM_VERSION) workspace new staging 44 | 45 | terraform-sh: ## terraform console 46 | docker run -it --rm -v $$PWD:/app -v $$HOME/.ssh/:/root/.ssh/ -w /app/ -e AWS_ACCESS_KEY_ID=$$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$$AWS_SECRET_ACCESS_KEY -e AWS_DEFAULT_REGION=$$AWS_DEFAULT_REGION -e TF_VAR_APP_VERSION=$(GIT_COMMIT) --entrypoint "" hashicorp/terraform:$(TERRAFORM_VERSION) sh 47 | 48 | packer-build: ## packer build 49 | docker run -it --rm -v $$PWD:/app -v $$HOME/.ssh/:/root/.ssh/ -w /app/ -e AWS_ACCESS_KEY_ID=$$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$$AWS_SECRET_ACCESS_KEY -e AWS_DEFAULT_REGION=$$AWS_DEFAULT_REGION -e TF_VAR_APP_VERSION=$(GIT_COMMIT) hashicorp/packer build examples/consul-ami/consul.json 50 | 51 | packer-sh: ## packer console 52 | docker run -it --rm -v $$PWD:/app -v $$HOME/.ssh/:/root/.ssh/ -w /app/ -e AWS_ACCESS_KEY_ID=$$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$$AWS_SECRET_ACCESS_KEY -e AWS_DEFAULT_REGION=$$AWS_DEFAULT_REGION -e TF_VAR_APP_VERSION=$(GIT_COMMIT) --entrypoint "" hashicorp/packer sh 53 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | terraform-aws-consul 2 | Copyright 2017 Gruntwork, Inc. 3 | 4 | This product includes software developed at Gruntwork (http://www.gruntwork.io/). -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Maintained by Gruntwork.io](https://img.shields.io/badge/maintained%20by-gruntwork.io-%235849a6.svg)](https://gruntwork.io/?ref=repo_aws_consul) 2 | # Consul AWS Module 3 | 4 | This repo contains a set of modules in the [modules folder](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules) for deploying a [Consul](https://www.consul.io/) cluster on 5 | [AWS](https://aws.amazon.com/) using [Terraform](https://www.terraform.io/). Consul is a distributed, highly-available 6 | tool that you can use for service discovery and key/value storage. A Consul cluster typically includes a small number 7 | of server nodes, which are responsible for being part of the [consensus 8 | quorum](https://www.consul.io/docs/internals/consensus.html), and a larger number of client nodes, which you typically 9 | run alongside your apps: 10 | 11 | ![Consul architecture](https://github.com/hashicorp/terraform-aws-consul/blob/master/_docs/architecture.png?raw=true) 12 | 13 | 14 | 15 | ## How to use this Module 16 | 17 | This repo has the following folder structure: 18 | 19 | * [modules](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules): This folder contains several standalone, reusable, production-grade modules that you can use to deploy Consul. 20 | * [examples](https://github.com/hashicorp/terraform-aws-consul/tree/master/examples): This folder shows examples of different ways to combine the modules in the `modules` folder to deploy Consul. 21 | * [test](https://github.com/hashicorp/terraform-aws-consul/tree/master/test): Automated tests for the modules and examples. 22 | * [root folder](https://github.com/hashicorp/terraform-aws-consul/tree/master): The root folder is *an example* of how to use the [consul-cluster module](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/consul-cluster) 23 | module to deploy a [Consul](https://www.consul.io/) cluster in [AWS](https://aws.amazon.com/). The Terraform Registry requires the root of every repo to contain Terraform code, so we've put one of the examples there. This example is great for learning and experimenting, but for production use, please use the underlying modules in the [modules folder](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules) directly. 24 | 25 | To deploy Consul servers for production using this repo: 26 | 27 | 1. Create a Consul AMI using a Packer template that references the [install-consul module](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/install-consul). 28 | Here is an [example Packer template](https://github.com/hashicorp/terraform-aws-consul/tree/master/examples/consul-ami#quick-start). 29 | 30 | If you are just experimenting with this Module, you may find it more convenient to use one of our official public AMIs. 31 | Check out the `aws_ami` data source usage in `main.tf` for how to auto-discover this AMI. 32 | 33 | **WARNING! Do NOT use these AMIs in your production setup. In production, you should build your own AMIs in your own 34 | AWS account.** 35 | 36 | 1. Deploy that AMI across an Auto Scaling Group using the Terraform [consul-cluster module](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/consul-cluster) 37 | and execute the [run-consul script](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/run-consul) with the `--server` flag during boot on each 38 | Instance in the Auto Scaling Group to form the Consul cluster. Here is [an example Terraform 39 | configuration](https://github.com/hashicorp/terraform-aws-consul/tree/master/examples/root-example#quick-start) to provision a Consul cluster. 40 | 41 | To deploy Consul clients for production using this repo: 42 | 43 | 1. Use the [install-consul module](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/install-consul) to install Consul alongside your application code. 44 | 1. Before booting your app, execute the [run-consul script](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/run-consul) with `--client` flag. 45 | 1. Your app can now use the local Consul agent for service discovery and key/value storage. 46 | 1. Optionally, you can use the [install-dnsmasq module](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/install-dnsmasq) for Ubuntu 16.04 and Amazon Linux 2 or [setup-systemd-resolved](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/setup-systemd-resolved) for Ubuntu 18.04 to configure Consul as the DNS for a 47 | specific domain (e.g. `.consul`) so that URLs such as `foo.service.consul` resolve automatically to the IP 48 | address(es) for a service `foo` registered in Consul (all other domain names will be continue to resolve using the 49 | default resolver on the OS). 50 | 51 | 52 | 53 | 54 | ## What's a Module? 55 | 56 | A Module is a canonical, reusable, best-practices definition for how to run a single piece of infrastructure, such 57 | as a database or server cluster. Each Module is created using [Terraform](https://www.terraform.io/), and 58 | includes automated tests, examples, and documentation. It is maintained both by the open source community and 59 | companies that provide commercial support. 60 | 61 | Instead of figuring out the details of how to run a piece of infrastructure from scratch, you can reuse 62 | existing code that has been proven in production. And instead of maintaining all that infrastructure code yourself, 63 | you can leverage the work of the Module community to pick up infrastructure improvements through 64 | a version number bump. 65 | 66 | 67 | 68 | ## Who maintains this Module? 69 | 70 | This Module is maintained by [Gruntwork](http://www.gruntwork.io/). If you're looking for help or commercial 71 | support, send an email to [modules@gruntwork.io](mailto:modules@gruntwork.io?Subject=Consul%20Module). 72 | Gruntwork can help with: 73 | 74 | * Setup, customization, and support for this Module. 75 | * Modules for other types of infrastructure, such as VPCs, Docker clusters, databases, and continuous integration. 76 | * Modules that meet compliance requirements, such as HIPAA. 77 | * Consulting & Training on AWS, Terraform, and DevOps. 78 | 79 | 80 | 81 | ## Code included in this Module: 82 | 83 | * [install-consul](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/install-consul): This module installs Consul using a 84 | [Packer](https://www.packer.io/) template to create a Consul 85 | [Amazon Machine Image (AMI)](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html). 86 | 87 | * [consul-cluster](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/consul-cluster): The module includes Terraform code to deploy a Consul AMI across an [Auto 88 | Scaling Group](https://aws.amazon.com/autoscaling/). 89 | 90 | * [run-consul](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/run-consul): This module includes the scripts to configure and run Consul. It is used 91 | by the above Packer module at build-time to set configurations, and by the Terraform module at runtime 92 | with [User Data](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html#user-data-shell-scripts) 93 | to create the cluster. 94 | 95 | * [install-dnsmasq module](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/install-dnsmasq): Install [Dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html) 96 | for Ubuntu 16.04 and Amazon Linux 2 and configure it to forward requests for a specific domain to Consul. This allows you to use Consul as a DNS server 97 | for URLs such as `foo.service.consul`. 98 | 99 | * [setup-systemd-resolved module](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/setup-systemd-resolved): Setup [systemd-resolved](https://www.freedesktop.org/software/systemd/man/resolved.conf.html) 100 | for ubuntu 18.04 and configure it to forward requests for a specific domain to Consul. This allows you to use Consul as a DNS server 101 | for URLs such as `foo.service.consul`. 102 | 103 | * [consul-iam-policies](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/consul-iam-policies): Defines the IAM policies necessary for a Consul cluster. 104 | 105 | * [consul-security-group-rules](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/consul-security-group-rules): Defines the security group rules used by a 106 | Consul cluster to control the traffic that is allowed to go in and out of the cluster. 107 | 108 | * [consul-client-security-group-rules](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/consul-client-security-group-rules): Defines the security group rules 109 | used by a Consul agent to control the traffic that is allowed to go in and out. 110 | 111 | 112 | 113 | ## How do I contribute to this Module? 114 | 115 | Contributions are very welcome! Check out the [Contribution Guidelines](https://github.com/hashicorp/terraform-aws-consul/tree/master/CONTRIBUTING.md) for instructions. 116 | 117 | 118 | 119 | ## How is this Module versioned? 120 | 121 | This Module follows the principles of [Semantic Versioning](http://semver.org/). You can find each new release, 122 | along with the changelog, in the [Releases Page](../../releases). 123 | 124 | During initial development, the major version will be 0 (e.g., `0.x.y`), which indicates the code does not yet have a 125 | stable API. Once we hit `1.0.0`, we will make every effort to maintain a backwards compatible API and use the MAJOR, 126 | MINOR, and PATCH versions on each release to indicate any incompatibilities. 127 | 128 | 129 | 130 | ## License 131 | 132 | This code is released under the Apache 2.0 License. Please see [LICENSE](https://github.com/hashicorp/terraform-aws-consul/tree/master/LICENSE) and [NOTICE](https://github.com/hashicorp/terraform-aws-consul/tree/master/NOTICE) for more 133 | details. 134 | 135 | Copyright © 2017 Gruntwork, Inc. 136 | -------------------------------------------------------------------------------- /_ci/publish-amis-in-new-account.md: -------------------------------------------------------------------------------- 1 | # How to Publish AMIs in a New AWS Account 2 | 3 | This readme discusses how to migrate the `publish-amis.sh` script to a new AWS account. 4 | 5 | To make using this Module as easy as possible, we want to automatically build and publish AMIs based on the 6 | [/examples/consul-ami/consul.json](https://github.com/hashicorp/terraform-aws-consul/tree/master/examples/consul-ami/consul.json) Packer template upon every release of this repo. 7 | This way, users can simply git clone this repo and `terraform apply` the [consul-cluster example](https://github.com/hashicorp/terraform-aws-consul/tree/master/examples/root-example) 8 | without first having to build their own AMI. Note that the auto-built AMIs are meant mostly for first-time users to 9 | easily try out a Module. In a production setting, many users will want to validate the contents of their AMI by 10 | manually building it in their own account. 11 | 12 | Unfortunately, auto-building AMIs creates a chicken-and-egg problem. How can we run code that automatically finds the 13 | latest AMI until that AMI actually exists? But to build those AMIs, we have to run a build in CircleCI, which also runs 14 | automated tests, which will fail when they cannot find the desired AMI. 15 | 16 | Our solution is that, for the `publish-amis` git branch only, on every commit, we will build and publish AMIs but we will 17 | not run tests. For all other branches, AMIs will only be built upon a new git tag (GitHub release), and tests will be 18 | run on every commit as usual. These settings are configured in the [circle.yml](https://github.com/hashicorp/terraform-aws-consul/tree/master/.circleci/config.yml) file. 19 | 20 | In addition to the above, don't forget to update the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment 21 | variables in CircleCI to reflect the new AWS account. 22 | 23 | Finally, note that, on a brand new account, many AWS regions are limited to just 5 EC2 Instances in an Auto Scaling Group, 24 | but the automated tests in this repo create up to 10 EC2 Instances. Therefore, automated tests will fail if they run in 25 | a region with too small a limit. To avoid this issue, request an increase in the number of t2-family EC2 Instances 26 | allowed in every AWS region from AWS support. 27 | -------------------------------------------------------------------------------- /_ci/publish-amis.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Build the example AMI, copy it to all AWS regions, and make all AMIs public. 4 | # 5 | # This script is meant to be run in a CircleCI job. 6 | # 7 | 8 | set -e 9 | 10 | readonly PACKER_TEMPLATE_PATH="examples/consul-ami/consul.json" 11 | readonly PACKER_TEMPLATE_DEFAULT_REGION="us-east-1" 12 | readonly AMI_PROPERTIES_FILE="/tmp/ami.properties" 13 | 14 | # In CircleCI, every build populates the branch name in CIRCLE_BRANCH except builds triggered by a new tag, for which 15 | # the CIRCLE_BRANCH env var is empty. We assume tags are only issued against the master branch. 16 | readonly BRANCH_NAME="${CIRCLE_BRANCH:-master}" 17 | 18 | readonly PACKER_BUILD_NAME="$1" 19 | 20 | if [[ -z "$PACKER_BUILD_NAME" ]]; then 21 | echo "ERROR: You must pass in the Packer build name as the first argument to this function." 22 | exit 1 23 | fi 24 | 25 | if [[ -z "$PUBLISH_AMI_AWS_ACCESS_KEY_ID" || -z "$PUBLISH_AMI_AWS_SECRET_ACCESS_KEY" ]]; then 26 | echo "The PUBLISH_AMI_AWS_ACCESS_KEY_ID and PUBLISH_AMI_AWS_SECRET_ACCESS_KEY environment variables must be set to the AWS credentials to use to publish the AMIs." 27 | exit 1 28 | fi 29 | 30 | echo "Checking out branch $BRANCH_NAME to make sure we do all work in a branch and not in detached HEAD state" 31 | git checkout "$BRANCH_NAME" 32 | 33 | # We publish the AMIs to a different AWS account, so set those credentials 34 | export AWS_ACCESS_KEY_ID="$PUBLISH_AMI_AWS_ACCESS_KEY_ID" 35 | export AWS_SECRET_ACCESS_KEY="$PUBLISH_AMI_AWS_SECRET_ACCESS_KEY" 36 | 37 | # Build the example AMI. WARNING! In a production setting, you should build your own AMI to ensure it has exactly the 38 | # configuration you want. We build this example AMI solely to make initial use of this Module as easy as possible. 39 | build-packer-artifact \ 40 | --packer-template-path "$PACKER_TEMPLATE_PATH" \ 41 | --build-name "$PACKER_BUILD_NAME" \ 42 | --output-properties-file "$AMI_PROPERTIES_FILE" 43 | 44 | # Copy the AMI to all regions and make it public in each 45 | source "$AMI_PROPERTIES_FILE" 46 | publish-ami \ 47 | --all-regions \ 48 | --source-ami-id "$ARTIFACT_ID" \ 49 | --source-ami-region "$PACKER_TEMPLATE_DEFAULT_REGION" \ 50 | --markdown-title-text "$PACKER_BUILD_NAME: Latest Public AMIs" \ 51 | --markdown-description-text "**WARNING! Do NOT use these AMIs in a production setting.** They are meant only to make 52 | initial experiments with this module more convenient." 53 | -------------------------------------------------------------------------------- /_docs/architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gomex/terraform-aws-consul/022b45283357b478b96aa0ceb79c9c79d4730fa1/_docs/architecture.png -------------------------------------------------------------------------------- /_docs/consul-ui-screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gomex/terraform-aws-consul/022b45283357b478b96aa0ceb79c9c79d4730fa1/_docs/consul-ui-screenshot.png -------------------------------------------------------------------------------- /_docs/package-managers.md: -------------------------------------------------------------------------------- 1 | # Package Managers 2 | 3 | To create the scripts in `install-consul`, we had to find a way to write and package the scripts that satisfied a 4 | number of requirements. This document captures the requirements, the options we considered, and an explanation of 5 | which option we picked and why. 6 | 7 | 8 | 9 | ## The requirements 10 | 11 | We need to write and package the scripts in this Module in a way that satisfies the following requirements: 12 | 13 | - **Packages**. There needs to be a way to fetch these scripts from a canonical location (e.g. GitHub repo, package 14 | manager repository) at a specific version number (e.g. `v0.0.3` of `install-consul`), much like a package manager. 15 | We don't want people copy/pasting these scripts into their local repos, or it'll make upgrades and maintenance 16 | difficult. 17 | 18 | - **Cross-platform**. The packaging system should work on most major Linux distributions. It should also work on OS X, 19 | as that's what many people use for development. 20 | 21 | - **Handles dependencies**. These scripts rely on certain dependencies being installed on the system, such as `curl`, 22 | `wget`, `jq`, `aws`, and so on. We need a way to automatically manage and install these dependencies that works 23 | across all major Linux distributions. 24 | 25 | - **Simple package manager installation**: We don't want a package manager that takes a dozen steps to install. 26 | 27 | - **Simple client usage**. The scripts in this Module are fairly simple, so it shouldn't take a dozen steps to 28 | install one. Ideally, we can use a one-liner such as `apt install -y install-consul`, except it should work on all 29 | major Linux distributions. 30 | 31 | - **Simple publish usage**. We need a fast and reliable way to publish new versions of the scripts. Ideally, we'd avoid 32 | having to publish each update to multiple package repos (apt, yum, etc), especially if that requires any sort of 33 | manual approval (e.g. a PR for each new version). 34 | 35 | - **Testable in dev mode**. We must be able to do local, iterative development on the example code in the 36 | [examples](https://github.com/hashicorp/terraform-aws-consul/tree/master/examples) folder. That means there is a way to "package" these scripts so that, in dev mode, they are 37 | downloaded from the local file system, rather than some package repo such as apt or yum. 38 | 39 | - **Mature**: We want to use a solution that is mature, battle-tested, and has an active community around it. 40 | 41 | 42 | 43 | ## The options 44 | 45 | Here are the options we've looked at. 46 | 47 | ### [Nix](https://nixos.org/nix/) 48 | 49 | - **Description**: Purely functional package manager, so dependency versioning, rollback, etc works very cleanly. 50 | - **Dependency Management**: Yes. 51 | - **Install process**: Simple. `bash <(curl https://nixos.org/nix/install)`. 52 | - **Client usage**: Simple. `nix-env --install PACKAGE`. 53 | - **Publish usage**: Complicated. Nix has its own [expression 54 | language](https://nixos.org/nix/manual/#sec-expression-syntax), which I found fairly confusing. The docs are 55 | so-so. Creating new packages and pushing new versions seems to require [a pull 56 | request](https://nixos.org/wiki/Create_and_debug_nix_packages). 57 | - **Dev mode**: Complicated. Not clear how to use it in dev mode. 58 | - **Maturity**: Moderate. It's been around a while and there is a community around it, but it's buggy and confusing to 59 | use. 60 | 61 | **Verdict**: It's confusing to use, slow (every install downloads the universe), buggy on OS X, and it's not clear how 62 | to use it in dev mode. 63 | 64 | ### [tpkg](http://tpkg.github.io/) 65 | 66 | - **Description**: Package apps as super-powered tar files. 67 | - **Dependency Management**. Yes. Supports both native installers (e.g. apt) and tpkg itself. 68 | - **Install process**: Difficult. Requires Ruby and Ruby Gems to be installed first, so every Packer template would 69 | have to install Ruby and Gem (e.g. `sudo apt install ruby`), which some people won't want on their production 70 | servers, and then install `tpkg`: `sudo gem install tpkg`. 71 | - **Client usage**: Simple. `tpkg --install PACKAGE`. 72 | - **Publish usage**: Simple. `tpkg --make PATH`. That produces a file you can upload to your own [package 73 | server](http://tpkg.github.io/package_server.html), which can be any web server that hosts the file and a special 74 | metadata file. Might be able to use GitHub releases or S3 for this. 75 | - **Dev mode**: Simple. The `tpkg --make PATH` command makes the package available for local install. 76 | - **Maturity**: Poor. Might be a dead project. The [GitHub repo](https://github.com/tpkg) has almost no followers. 77 | Only a couple commits in the last few years. 78 | 79 | **Verdict**: Dependency on Ruby and the lack of community activity is a no-go. 80 | 81 | ### [Snap](https://snapcraft.io/) 82 | 83 | - **Description**: A way to install "apps" on all major Linux distributions. It seems like it's designed for standalone apps and 84 | binaries rather than scripting. Packages, called "snaps", are completely isolated from each other and the host OS 85 | (using cgroups?) and can define interfaces, slots, plugs, etc to communicate with each other (a bit like "type 86 | safety"). 87 | - **Dependency Management**: No. Or at least, I can't find it. 88 | - **Install process**: Simple. `sudo apt install snapd`. 89 | - **Client usage**: Simple. `sudo snap install PACKAGE`. 90 | - **Publish usage**: Complicated. You have to sign up for an account in the [Ubuntu 91 | Store](https://myapps.developer.ubuntu.com/), install a separate app (`sudo apt install snapcraft`), login 92 | (`snapcraft login`), configure channels (stable, beta, etc); after that, it's an easy `snapcraft push` command 93 | for each new version. 94 | - **Dev mode**: Simple. `snapcraft` supports it. 95 | - **Maturity**: Moderate. Community seems fairly active, as this is a project maintained by Canonical. 96 | 97 | **Verdict**: It only works on Linux, so hard to do development. 98 | 99 | ### `curl | bash` 100 | 101 | - **Description**: Upload our scripts to Git, release them with version numbers, and pipe `curl` into `bash` to run them. 102 | - **Dependency Management**: No. 103 | - **Install process**: Simple. Nothing to install! Well, perhaps `curl`, but that's as simple as it gets. 104 | - **Client usage**: Simple. `curl -Ls https://raw.githubusercontent.com/foo/bar/v0.0.3/install-consul | bash /dev/stdin`. 105 | Unfortunately, without any checksum or signature verification, this is a mild security risk if the GitHub repo 106 | gets hijacked. Moreover, this only works for individual files. If the script has dependencies, those have to 107 | be downloaded separately. 108 | - **Publish usage**: Simple. Just create a new GitHub release. 109 | - **Dev mode**: Simple. Just change the URL to a local file path. 110 | - **Maturity**: Strong. No need for a community, as we're just using `curl`! 111 | 112 | **Verdict**: This only works well for a single file. Of course, that file could download other files, but to do that, 113 | the file has to know what version it is, what to use to download, where to download to, etc. 114 | 115 | 116 | ### [Gruntwork Installer](https://github.com/gruntwork-io/gruntwork-installer) 117 | 118 | - **Description**: A slightly more structured version of piping `curl` into `bash`. You specify a GitHub repo, a path, and a version 119 | number and the installer checks out the repo at the specified version, and runs an `install.sh` script in the 120 | specified path. 121 | - **Dependency Management**: No. It's up to the `install.sh` script to figure out the details. 122 | - **Install process**: Simple. `curl -Ls https://raw.githubusercontent.com/gruntwork-io/gruntwork-installer/master/bootstrap-gruntwork-installer.sh | bash /dev/stdin --version v0.0.14`. 123 | Note, this is subject to the same security risks as piping `curl` into `bash`. Since there is just one installer 124 | and we don't update it often, we *could* publish it into apt, yum, etc repos to avoid this problem. 125 | - **Client usage**: Simple. `gruntwork-install --module-name 'PATH' --repo 'https://github.com/foo/bar' --tag v0.0.3`. 126 | Does not currently do checksum or signature verification, but that could be added. 127 | - **Publish usage**: Simple. Just create a new GitHub release. Works with private GitHub repos too. 128 | - **Dev mode**: Simple. Just specify a local file path. 129 | - **Maturity**: Poor. The community is tiny, though this project is actively maintained by Gruntwork. 130 | 131 | **Verdict**: A bit too specific to Gruntwork's use case. 132 | 133 | ### [fpm](https://github.com/jordansissel/fpm) 134 | 135 | - **Description**: A script that makes it easy to package your code as native packages (e.g. `.deb`, `.rpm`). 136 | - **Dependency Management**: Yes. 137 | - **Install process**: Simple. No install process, as you use your standard OS package managers (i.e. `apt`, `yum`). 138 | - **Client usage**: Simple. `sudo apt install -y PACKAGE`. 139 | - **Publish usage**: Complicated. You have to package and publish to all major Linux package repos. 140 | - **Dev mode**: Complicated. Not clear how you use it in dev mode. 141 | - **Maturity**: Strong. Big community, active project. 142 | 143 | **Verdict**: Requires publishing to multiple repos for every release, which is complicated. 144 | 145 | ### Configuration management tools (e.g. [Ansible](https://www.ansible.com/), [Chef](https://www.chef.io/)) 146 | 147 | - **Description**: Tools built for managing server configuration. 148 | - **Dependency Management**: Yes. Most cfg mgmt tools have ways of leveraging the built-in package managers 149 | (e.g. [package command in Chef](https://docs.chef.io/resource_package.html) and [package command in 150 | Ansible](http://docs.ansible.com/ansible/package_module.html)). 151 | - **Install process**: Simple. Packer can do it automatically for [Chef 152 | Solo](https://www.packer.io/docs/provisioners/chef-solo.html) and you can do it manually for Ansible: 153 | `sudo apt install -y ansible`. 154 | - **Client usage**: Complicated. You first have to download the Chef Recipe or Ansible Playbook from the Module 155 | repo (e.g. using a `shell-local` provisioner with `curl`) and then you can use the downloaded recipe or playbook 156 | with the built-in Packer commands (e.g. [chef-solo 157 | Provisioner](https://www.packer.io/docs/provisioners/chef-solo.html) and [ansible-local 158 | Provisioner](https://www.packer.io/docs/provisioners/ansible-local.html)). 159 | - **Publish usage**: Simple. Just create a new GitHub release. 160 | - **Dev mode**: Simple. Just use local file paths for the recipes and playbooks. 161 | - **Maturity**: Strong. All these cfg mgmt tools have massive communities. 162 | 163 | **Verdict**: Requires installing the tools on each server and learning a new set of tools, which feels like overkill 164 | for a few simple scripts. 165 | 166 | ### Git 167 | 168 | - **Description**: Run `git clone` with the `--branch` parameter (which can be set to a tag) to check out a specific version of the 169 | code. 170 | - **Dependency Management**: No. 171 | - **Install process**: Simple. Just install Git, if it's not installed already. 172 | - **Client usage**: Simple. Once you've run `git clone`, all the code you need is on disk, and you just execute it. 173 | - **Publish usage**: Simple. Just create a new GitHub release. 174 | - **Dev mode**: Simple. Just use your local checkout. 175 | - **Maturity**: Strong. It's Git, so the community is massive. 176 | 177 | **Verdict**: The biggest missing feature is dependency management, but it's a perfect fit in every other way, so 178 | this is our choice. -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | ## NOTE: About [/modules](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules) and [/examples](https://github.com/hashicorp/terraform-aws-consul/tree/master/examples) 2 | 3 | HashiCorp's Terraform Registry requires every repo to have a `main.tf` in its root dir. The Consul code is broken down into multiple sub-modules, so they can't all be in the root dir [/](https://github.com/hashicorp/terraform-aws-consul/tree/master). Therefore, Consul's sub-modules are in the [/modules](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules) subdirectory, the example code is in the [/examples](https://github.com/hashicorp/terraform-aws-consul/tree/master/examples) subdirectory, and the root dir [/](https://github.com/hashicorp/terraform-aws-consul/tree/master) _also_ has an example in it, as described in [root-example](https://github.com/awesome/terraform-aws-consul/tree/master/examples/root-example). 4 | 5 | More info: https://github.com/hashicorp/terraform-aws-consul/pull/79/files/079e75015a5d89e7ffc89997aa0904e9de4cdb97#r212763365 6 | -------------------------------------------------------------------------------- /examples/consul-ami/README.md: -------------------------------------------------------------------------------- 1 | # Consul AMI 2 | 3 | This folder shows an example of how to use the [install-consul](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/install-consul) and 4 | either [install-dnsmasq](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/install-dnsmasq) for Ubuntu 16.04 and Amazon Linux 2 or [setup-systemd-resolved](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/setup-systemd-resolved) for Ubuntu 18.04 modules with [Packer](https://www.packer.io/) to create [Amazon Machine 5 | Images (AMIs)](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) that have Consul and Dnsmasq installed on 6 | top of: 7 | 8 | 1. Ubuntu 16.04 9 | 1. Ubuntu 18.04 10 | 1. Amazon Linux 2 11 | 12 | These AMIs will have [Consul](https://www.consul.io/) installed and configured to automatically join a cluster during 13 | boot-up. They also have [Dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html) installed and configured to use 14 | Consul for DNS lookups of the `.consul` domain (e.g. `foo.service.consul`) (see [registering 15 | services](https://www.consul.io/intro/getting-started/services.html) for instructions on how to register your services 16 | in Consul). To see how to deploy this AMI, check out the [consul-cluster example](https://github.com/hashicorp/terraform-aws-consul/tree/master/examples/root-example). 17 | 18 | For more info on Consul installation and configuration, check out the 19 | [install-consul](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/install-consul) and [install-dnsmasq](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/install-dnsmasq) for Ubuntu 16.04 and Amazon Linux 2 or [setup-systemd-resolved](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/setup-systemd-resolved) for Ubuntu 18.04 documentation. 20 | 21 | ## Dependencies 22 | 1. AWSCLI must be installed on the base AMI in order for run-consul to run 23 | 1. Git CAN be installed on the base AMI if you want to use clone commands 24 | 25 | 26 | ## Quick start 27 | 28 | To build the Consul AMI: 29 | 30 | 1. `git clone` this repo to your computer. 31 | 1. Install [Packer](https://www.packer.io/). 32 | 1. Configure your AWS credentials using one of the [options supported by the AWS 33 | SDK](http://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html). Usually, the easiest option is to 34 | set the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables. 35 | 1. Update the `variables` section of the `consul.json` Packer template to configure the AWS region, Consul version, and 36 | Dnsmasq version you wish to use. If you want to install Consul Enterprise, skip the version variable and instead set 37 | the `download_url` to the full url that points to the consul enterprise zipped package. 38 | 1. Run `packer build consul.json`. 39 | 40 | When the build finishes, it will output the IDs of the new AMIs. To see how to deploy one of these AMIs, check out the 41 | [consul-cluster example](https://github.com/hashicorp/terraform-aws-consul/tree/master/examples/root-example). 42 | 43 | 44 | 45 | 46 | ## Creating your own Packer template for production usage 47 | 48 | When creating your own Packer template for production usage, you can copy the example in this folder more or less 49 | exactly, except for one change: we recommend replacing the `file` provisioner with a call to `git clone` in the `shell` 50 | provisioner. Instead of: 51 | 52 | ```json 53 | { 54 | "provisioners": [{ 55 | "type": "file", 56 | "source": "{{template_dir}}/../../../terraform-aws-consul", 57 | "destination": "/tmp" 58 | },{ 59 | "type": "shell", 60 | "inline": [ 61 | "/tmp/terraform-aws-consul/modules/install-consul/install-consul --version {{user `consul_version`}}" 62 | ], 63 | "pause_before": "30s" 64 | },{ 65 | "type": "shell", 66 | "only": ["ubuntu16-ami", "amazon-linux-2-ami"], 67 | "inline": [ 68 | "/tmp/terraform-aws-consul/modules/install-dnsmasq/install-dnsmasq" 69 | ], 70 | "pause_before": "30s" 71 | },{ 72 | "type": "shell", 73 | "only": ["ubuntu18-ami"], 74 | "inline": [ 75 | "/tmp/terraform-aws-consul/modules/setup-systemd-resolved/setup-systemd-resolved" 76 | ], 77 | "pause_before": "30s" 78 | }] 79 | } 80 | ``` 81 | 82 | Your code should look more like this: 83 | 84 | ```json 85 | { 86 | "provisioners": [{ 87 | "type": "shell", 88 | "inline": [ 89 | "git clone --branch https://github.com/hashicorp/terraform-aws-consul.git /tmp/terraform-aws-consul", 90 | "/tmp/terraform-aws-consul/modules/install-consul/install-consul --version {{user `consul_version`}}" 91 | ], 92 | "pause_before": "30s" 93 | },{ 94 | "type": "shell", 95 | "only": ["ubuntu16-ami", "amazon-linux-2-ami"], 96 | "inline": [ 97 | "/tmp/terraform-aws-consul/modules/install-dnsmasq/install-dnsmasq" 98 | ], 99 | "pause_before": "30s" 100 | },{ 101 | "type": "shell", 102 | "only": ["ubuntu18-ami"], 103 | "inline": [ 104 | "/tmp/terraform-aws-consul/modules/setup-systemd-resolved/setup-systemd-resolved" 105 | ], 106 | "pause_before": "30s" 107 | }] 108 | } 109 | ``` 110 | 111 | **NOTE:** Amazon Linux 2 users will need to install Git first. 112 | 113 | You should replace `` in the code above with the version of this module that you want to use (see 114 | the [Releases Page](../../releases) for all available versions). That's because for production usage, you should always 115 | use a fixed, known version of this Module, downloaded from the official Git repo. On the other hand, when you're 116 | just experimenting with the Module, it's OK to use a local checkout of the Module, uploaded from your own 117 | computer. 118 | -------------------------------------------------------------------------------- /examples/consul-ami/consul.json: -------------------------------------------------------------------------------- 1 | { 2 | "min_packer_version": "1.5.4", 3 | "variables": { 4 | "aws_region": "us-east-1", 5 | "consul_version": "1.5.1", 6 | "download_url": "{{env `CONSUL_DOWNLOAD_URL`}}" 7 | }, 8 | "builders": [{ 9 | "name": "ubuntu16-ami", 10 | "ami_name": "consul-ubuntu-{{isotime | clean_resource_name}}-{{uuid}}", 11 | "ami_description": "An Ubuntu 16.04 AMI that has Consul installed.", 12 | "instance_type": "t2.micro", 13 | "region": "{{user `aws_region`}}", 14 | "type": "amazon-ebs", 15 | "source_ami_filter": { 16 | "filters": { 17 | "virtualization-type": "hvm", 18 | "architecture": "x86_64", 19 | "name": "*ubuntu-xenial-16.04-amd64-server-*", 20 | "block-device-mapping.volume-type": "gp2", 21 | "root-device-type": "ebs" 22 | }, 23 | "owners": ["099720109477"], 24 | "most_recent": true 25 | }, 26 | "ssh_username": "ubuntu" 27 | },{ 28 | "name": "ubuntu18-ami", 29 | "ami_name": "consul-ubuntu-{{isotime | clean_resource_name}}-{{uuid}}", 30 | "ami_description": "An Ubuntu 18.04 AMI that has Consul installed.", 31 | "instance_type": "t2.micro", 32 | "region": "{{user `aws_region`}}", 33 | "associate_public_ip_address": true, 34 | "type": "amazon-ebs", 35 | "source_ami_filter": { 36 | "filters": { 37 | "virtualization-type": "hvm", 38 | "architecture": "x86_64", 39 | "name": "ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*", 40 | "block-device-mapping.volume-type": "gp2", 41 | "root-device-type": "ebs" 42 | }, 43 | "owners": ["099720109477"], 44 | "most_recent": true 45 | }, 46 | "ssh_username": "ubuntu" 47 | },{ 48 | "name": "amazon-linux-2-ami", 49 | "ami_name": "consul-amazon-linux-2-{{isotime | clean_resource_name}}-{{uuid}}", 50 | "ami_description": "An Amazon Linux 2 AMI that has Consul installed.", 51 | "instance_type": "t2.micro", 52 | "region": "{{user `aws_region`}}", 53 | "type": "amazon-ebs", 54 | "source_ami_filter": { 55 | "filters": { 56 | "virtualization-type": "hvm", 57 | "architecture": "x86_64", 58 | "name": "*amzn2-ami-hvm-*-x86_64-gp2", 59 | "block-device-mapping.volume-type": "gp2", 60 | "root-device-type": "ebs" 61 | }, 62 | "owners": ["amazon"], 63 | "most_recent": true 64 | }, 65 | "ssh_username": "ec2-user" 66 | }], 67 | "provisioners": [{ 68 | "type": "shell", 69 | "inline": ["mkdir -p /tmp/terraform-aws-consul/modules"] 70 | },{ 71 | "type": "file", 72 | "source": "{{template_dir}}/../../modules/", 73 | "destination": "/tmp/terraform-aws-consul/modules", 74 | "pause_before": "30s" 75 | },{ 76 | "type": "shell", 77 | "inline": [ 78 | "if test -n \"{{user `download_url`}}\"; then", 79 | " /tmp/terraform-aws-consul/modules/install-consul/install-consul --download-url {{user `download_url`}};", 80 | "else", 81 | " /tmp/terraform-aws-consul/modules/install-consul/install-consul --version {{user `consul_version`}};", 82 | "fi" 83 | ], 84 | "pause_before": "30s" 85 | },{ 86 | "type": "shell", 87 | "only": ["ubuntu16-ami", "amazon-linux-2-ami"], 88 | "inline": [ 89 | "/tmp/terraform-aws-consul/modules/install-dnsmasq/install-dnsmasq" 90 | ], 91 | "pause_before": "30s" 92 | },{ 93 | "type": "shell", 94 | "only": ["ubuntu18-ami"], 95 | "inline": [ 96 | "/tmp/terraform-aws-consul/modules/setup-systemd-resolved/setup-systemd-resolved" 97 | ], 98 | "pause_before": "30s" 99 | }] 100 | } 101 | -------------------------------------------------------------------------------- /examples/consul-examples-helper/README.md: -------------------------------------------------------------------------------- 1 | # Consul Examples Helper 2 | 3 | This folder contains a helper script called `consul-examples-helper.sh` for working with the 4 | [consul-cluster example](https://github.com/hashicorp/terraform-aws-consul/tree/master/examples/root-example). After running `terraform apply` on the example, if you run 5 | `consul-examples-helper.sh`, it will automatically: 6 | 7 | 1. Wait for the Consul server cluster to come up. 8 | 1. Print out the IP addresses of the Consul servers. 9 | 1. Print out some example commands you can run against your Consul servers. 10 | 11 | 12 | -------------------------------------------------------------------------------- /examples/consul-examples-helper/consul-examples-helper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # A script that is meant to be used with the Consul cluster examples to: 3 | # 4 | # 1. Wait for the Consul server cluster to come up. 5 | # 2. Print out the IP addresses of the Consul servers. 6 | # 3. Print out some example commands you can run against your Consul servers. 7 | 8 | set -e 9 | 10 | readonly SCRIPT_NAME="$(basename "$0")" 11 | 12 | readonly MAX_RETRIES=30 13 | readonly SLEEP_BETWEEN_RETRIES_SEC=10 14 | 15 | function log { 16 | local readonly level="$1" 17 | local readonly message="$2" 18 | local readonly timestamp=$(date +"%Y-%m-%d %H:%M:%S") 19 | >&2 echo -e "${timestamp} [${level}] [$SCRIPT_NAME] ${message}" 20 | } 21 | 22 | function log_info { 23 | local readonly message="$1" 24 | log "INFO" "$message" 25 | } 26 | 27 | function log_warn { 28 | local readonly message="$1" 29 | log "WARN" "$message" 30 | } 31 | 32 | function log_error { 33 | local readonly message="$1" 34 | log "ERROR" "$message" 35 | } 36 | 37 | function assert_is_installed { 38 | local readonly name="$1" 39 | 40 | if [[ ! $(command -v ${name}) ]]; then 41 | log_error "The binary '$name' is required by this script but is not installed or in the system's PATH." 42 | exit 1 43 | fi 44 | } 45 | 46 | function get_required_terraform_output { 47 | local readonly output_name="$1" 48 | local output_value 49 | 50 | output_value=$(terraform output -no-color "$output_name") 51 | 52 | if [[ -z "$output_value" ]]; then 53 | log_error "Unable to find a value for Terraform output $output_name" 54 | exit 1 55 | fi 56 | 57 | echo "$output_value" 58 | } 59 | 60 | # 61 | # Usage: join SEPARATOR ARRAY 62 | # 63 | # Joins the elements of ARRAY with the SEPARATOR character between them. 64 | # 65 | # Examples: 66 | # 67 | # join ", " ("A" "B" "C") 68 | # Returns: "A, B, C" 69 | # 70 | function join { 71 | local readonly separator="$1" 72 | shift 73 | local readonly values=("$@") 74 | 75 | printf "%s$separator" "${values[@]}" | sed "s/$separator$//" 76 | } 77 | 78 | function get_all_consul_server_ips { 79 | local expected_num_servers 80 | expected_num_servers=$(get_required_terraform_output "num_servers") 81 | 82 | log_info "Looking up public IP addresses for $expected_num_servers Consul server EC2 Instances." 83 | 84 | local ips 85 | local i 86 | 87 | for (( i=1; i<="$MAX_RETRIES"; i++ )); do 88 | ips=($(get_consul_cluster_ips)) 89 | if [[ "${#ips[@]}" -eq "$expected_num_servers" ]]; then 90 | log_info "Found all $expected_num_servers public IP addresses!" 91 | echo "${ips[@]}" 92 | return 93 | else 94 | log_warn "Found ${#ips[@]} of $expected_num_servers public IP addresses. Will sleep for $SLEEP_BETWEEN_RETRIES_SEC seconds and try again." 95 | sleep "$SLEEP_BETWEEN_RETRIES_SEC" 96 | fi 97 | done 98 | 99 | log_error "Failed to find the IP addresses for $expected_num_servers Consul server EC2 Instances after $MAX_RETRIES retries." 100 | exit 1 101 | } 102 | 103 | function wait_for_all_consul_servers_to_register { 104 | local readonly server_ips=($@) 105 | local readonly server_ip="${server_ips[0]}" 106 | 107 | local expected_num_servers 108 | expected_num_servers=$(get_required_terraform_output "num_servers") 109 | 110 | log_info "Waiting for $expected_num_servers Consul servers to register in the cluster" 111 | 112 | for (( i=1; i<="$MAX_RETRIES"; i++ )); do 113 | log_info "Running 'consul members' command against server at IP address $server_ip" 114 | # Intentionally use local and readonly here so that this script doesn't exit if the consul members or grep commands 115 | # exit with an error. 116 | local readonly members=$(consul members -http-addr="$server_ip:8500") 117 | local readonly server_members=$(echo "$members" | grep "server") 118 | local readonly num_servers=$(echo "$server_members" | wc -l | tr -d ' ') 119 | 120 | if [[ "$num_servers" -eq "$expected_num_servers" ]]; then 121 | log_info "All $expected_num_servers Consul servers have registered in the cluster!" 122 | return 123 | else 124 | log_info "$num_servers out of $expected_num_servers Consul servers have registered in the cluster." 125 | log_info "Sleeping for $SLEEP_BETWEEN_RETRIES_SEC seconds and will check again." 126 | sleep "$SLEEP_BETWEEN_RETRIES_SEC" 127 | fi 128 | done 129 | 130 | log_error "Did not find $expected_num_servers Consul servers registered after $MAX_RETRIES retries." 131 | exit 1 132 | } 133 | 134 | function get_consul_cluster_ips { 135 | local aws_region 136 | local cluster_tag_key 137 | local cluster_tag_value 138 | local instances 139 | 140 | aws_region=$(get_required_terraform_output "aws_region") 141 | cluster_tag_key=$(get_required_terraform_output "consul_servers_cluster_tag_key") 142 | cluster_tag_value=$(get_required_terraform_output "consul_servers_cluster_tag_value") 143 | 144 | log_info "Fetching public IP addresses for EC2 Instances in $aws_region with tag $cluster_tag_key=$cluster_tag_value" 145 | 146 | instances=$(aws ec2 describe-instances \ 147 | --region "$aws_region" \ 148 | --filter "Name=tag:$cluster_tag_key,Values=$cluster_tag_value" "Name=instance-state-name,Values=running") 149 | 150 | echo "$instances" | jq -r '.Reservations[].Instances[].PublicIpAddress' 151 | } 152 | 153 | function print_instructions { 154 | local readonly server_ips=($@) 155 | 156 | local instructions=() 157 | instructions+=("\nYour Consul servers are running at the following IP addresses:\n\n${server_ips[@]/#/ }\n") 158 | instructions+=("Some commands for you to try:\n") 159 | instructions+=(" consul members -http-addr=${server_ips[0]}:8500") 160 | instructions+=(" consul kv put -http-addr=${server_ips[0]}:8500 foo bar") 161 | instructions+=(" consul kv get -http-addr=${server_ips[0]}:8500 foo") 162 | instructions+=(" consul kv get -http-addr=${server_ips[1]}:8500 foo") 163 | instructions+=(" consul kv get -http-addr=${server_ips[2]}:8500 foo") 164 | instructions+=("\nTo see the Consul UI, open the following URL in your web browser:\n") 165 | instructions+=(" http://${server_ips[0]}:8500/ui/\n") 166 | 167 | local instructions_str 168 | instructions_str=$(join "\n" "${instructions[@]}") 169 | 170 | echo -e "$instructions_str" 171 | } 172 | 173 | function run { 174 | assert_is_installed "aws" 175 | assert_is_installed "jq" 176 | assert_is_installed "terraform" 177 | assert_is_installed "consul" 178 | 179 | local server_ips 180 | server_ips=$(get_all_consul_server_ips) 181 | 182 | wait_for_all_consul_servers_to_register "$server_ips" 183 | print_instructions "$server_ips" 184 | } 185 | 186 | run -------------------------------------------------------------------------------- /examples/example-with-custom-asg-role/README.md: -------------------------------------------------------------------------------- 1 | # Consul Cluster Example 2 | 3 | This folder shows an example of Terraform code that uses the [consul-cluster module](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/consul-cluster) to deploy 4 | a [Consul](https://www.consul.io/) cluster in [AWS](https://aws.amazon.com/). The cluster consists of two Auto Scaling 5 | Groups (ASGs): one with a small number of Consul server nodes, which are responsible for being part of the [consensus 6 | quorum](https://www.consul.io/docs/internals/consensus.html), and one with a larger number of client nodes, which 7 | would typically run alongside your apps: 8 | 9 | ![Consul architecture](https://github.com/hashicorp/terraform-aws-consul/blob/master/_docs/architecture.png?raw=true) 10 | 11 | The Consul server nodes are launched using a custom autoscaling service-linked role for the autoscaling group instead of the default autoscaling service-linked role. This enables a custom role to be assigned which may be desired for using KMS encrypted AMIs. [More Information](https://forums.aws.amazon.com/thread.jspa?threadID=277523) 12 | 13 | You will need to create an [Amazon Machine Image (AMI)](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) 14 | that has Consul installed, which you can do using the [consul-ami example](https://github.com/hashicorp/terraform-aws-consul/tree/master/examples/consul-ami)). Note that to keep 15 | this example simple, both the server ASG and client ASG are running the exact same AMI. In real-world usage, you'd 16 | probably have multiple client ASGs, and each of those ASGs would run a different AMI that has the Consul agent 17 | installed alongside your apps. 18 | 19 | For more info on how the Consul cluster works, check out the [consul-cluster](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/consul-cluster) documentation. 20 | 21 | 22 | 23 | ## Quick start 24 | 25 | To deploy a Consul Cluster: 26 | 27 | 1. `git clone` this repo to your computer. 28 | 1. Optional: build a Consul AMI. See the [consul-ami example](https://github.com/hashicorp/terraform-aws-consul/tree/master/examples/consul-ami) documentation for instructions. Make sure to 29 | note down the ID of the AMI. 30 | 1. Install [Terraform](https://www.terraform.io/). 31 | 1. Open `variables.tf`, set the environment variables specified at the top of the file, and fill in any other variables that 32 | don't have a default. If you built a custom AMI, put the AMI ID into the `ami_id` variable. Otherwise, one of our 33 | public example AMIs will be used by default. These AMIs are great for learning/experimenting, but are NOT 34 | recommended for production use. 35 | 1. Run `terraform init`. 36 | 1. Run `terraform apply`. 37 | 1. Run the [consul-examples-helper.sh script](https://github.com/hashicorp/terraform-aws-consul/tree/master/examples/consul-examples-helper/consul-examples-helper.sh) to 38 | print out the IP addresses of the Consul servers and some example commands you can run to interact with the cluster: 39 | `../consul-examples-helper/consul-examples-helper.sh`. 40 | 41 | -------------------------------------------------------------------------------- /examples/example-with-custom-asg-role/main.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # DEPLOY A CONSUL CLUSTER IN AWS 3 | # These templates show an example of how to use the consul-cluster module to deploy Consul in AWS. We deploy two Auto 4 | # Scaling Groups (ASGs): one with a small number of Consul server nodes and one with a larger number of Consul client 5 | # nodes. Note that these templates assume that the AMI you provide via the ami_id input variable is built from 6 | # the examples/example-with-encryption/packer/consul-with-certs.json Packer template. 7 | # --------------------------------------------------------------------------------------------------------------------- 8 | 9 | # ---------------------------------------------------------------------------------------------------------------------- 10 | # REQUIRE A SPECIFIC TERRAFORM VERSION OR HIGHER 11 | # ---------------------------------------------------------------------------------------------------------------------- 12 | terraform { 13 | # This module is now only being tested with Terraform 0.13.x. However, to make upgrading easier, we are setting 14 | # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it 15 | # forwards compatible with 0.13.x code. 16 | required_version = ">= 0.12.26" 17 | } 18 | 19 | # --------------------------------------------------------------------------------------------------------------------- 20 | # Create a custom role for consul 21 | # --------------------------------------------------------------------------------------------------------------------- 22 | resource "aws_iam_service_linked_role" "consul_asg_role" { 23 | aws_service_name = "autoscaling.amazonaws.com" 24 | custom_suffix = var.consul_service_linked_role_suffix 25 | description = "Service-Linked Role enables access to AWS Services and Resources used or managed by Auto Scaling" 26 | } 27 | 28 | # --------------------------------------------------------------------------------------------------------------------- 29 | # DEPLOY THE CONSUL SERVER NODES 30 | # --------------------------------------------------------------------------------------------------------------------- 31 | 32 | module "consul_servers" { 33 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 34 | # to a specific version of the modules, such as the following example: 35 | # source = "git::git@github.com:hashicorp/terraform-aws-consul.git//modules/consul-cluster?ref=v0.0.1" 36 | source = "../../modules/consul-cluster" 37 | 38 | cluster_name = "${var.cluster_name}-server" 39 | cluster_size = var.num_servers 40 | instance_type = "t2.micro" 41 | spot_price = var.spot_price 42 | service_linked_role_arn = aws_iam_service_linked_role.consul_asg_role.arn 43 | 44 | # The EC2 Instances will use these tags to automatically discover each other and form a cluster 45 | cluster_tag_key = var.cluster_tag_key 46 | cluster_tag_value = var.cluster_name 47 | 48 | ami_id = var.ami_id 49 | user_data = data.template_file.user_data_server.rendered 50 | 51 | vpc_id = data.aws_vpc.default.id 52 | subnet_ids = data.aws_subnet_ids.default.ids 53 | 54 | # To make testing easier, we allow Consul and SSH requests from any IP address here but in a production 55 | # deployment, we strongly recommend you limit this to the IP address ranges of known, trusted servers inside your VPC. 56 | allowed_ssh_cidr_blocks = ["0.0.0.0/0"] 57 | 58 | allowed_inbound_cidr_blocks = ["0.0.0.0/0"] 59 | ssh_key_name = var.ssh_key_name 60 | 61 | tags = [ 62 | { 63 | key = "Environment" 64 | value = "development" 65 | propagate_at_launch = true 66 | } 67 | ] 68 | } 69 | 70 | # --------------------------------------------------------------------------------------------------------------------- 71 | # THE USER DATA SCRIPT THAT WILL RUN ON EACH CONSUL SERVER EC2 INSTANCE WHEN IT'S BOOTING 72 | # This script will configure and start Consul 73 | # --------------------------------------------------------------------------------------------------------------------- 74 | 75 | data "template_file" "user_data_server" { 76 | template = file("${path.module}/user-data-server.sh") 77 | 78 | vars = { 79 | cluster_tag_key = var.cluster_tag_key 80 | cluster_tag_value = var.cluster_name 81 | enable_gossip_encryption = var.enable_gossip_encryption 82 | gossip_encryption_key = var.gossip_encryption_key 83 | enable_rpc_encryption = var.enable_rpc_encryption 84 | ca_path = var.ca_path 85 | cert_file_path = var.cert_file_path 86 | key_file_path = var.key_file_path 87 | } 88 | } 89 | 90 | # --------------------------------------------------------------------------------------------------------------------- 91 | # DEPLOY THE CONSUL CLIENT NODES 92 | # Note that you do not have to use the consul-cluster module to deploy your clients. We do so simply because it 93 | # provides a convenient way to deploy an Auto Scaling Group with the necessary IAM and security group permissions for 94 | # Consul, but feel free to deploy those clients however you choose (e.g. a single EC2 Instance, a Docker cluster, etc). 95 | # --------------------------------------------------------------------------------------------------------------------- 96 | 97 | module "consul_clients" { 98 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 99 | # to a specific version of the modules, such as the following example: 100 | # source = "git::git@github.com:hashicorp/terraform-aws-consul.git//modules/consul-cluster?ref=v0.0.1" 101 | source = "../../modules/consul-cluster" 102 | 103 | cluster_name = "${var.cluster_name}-client" 104 | cluster_size = var.num_clients 105 | instance_type = "t2.micro" 106 | spot_price = var.spot_price 107 | 108 | cluster_tag_key = "consul-clients" 109 | cluster_tag_value = var.cluster_name 110 | 111 | ami_id = var.ami_id 112 | user_data = data.template_file.user_data_client.rendered 113 | 114 | vpc_id = data.aws_vpc.default.id 115 | subnet_ids = data.aws_subnet_ids.default.ids 116 | 117 | # To make testing easier, we allow Consul and SSH requests from any IP address here but in a production 118 | # deployment, we strongly recommend you limit this to the IP address ranges of known, trusted servers inside your VPC. 119 | allowed_ssh_cidr_blocks = ["0.0.0.0/0"] 120 | 121 | allowed_inbound_cidr_blocks = ["0.0.0.0/0"] 122 | ssh_key_name = var.ssh_key_name 123 | } 124 | 125 | # --------------------------------------------------------------------------------------------------------------------- 126 | # THE USER DATA SCRIPT THAT WILL RUN ON EACH CONSUL CLIENT EC2 INSTANCE WHEN IT'S BOOTING 127 | # This script will configure and start Consul 128 | # --------------------------------------------------------------------------------------------------------------------- 129 | 130 | data "template_file" "user_data_client" { 131 | template = file("${path.module}/user-data-client.sh") 132 | 133 | vars = { 134 | cluster_tag_key = var.cluster_tag_key 135 | cluster_tag_value = var.cluster_name 136 | enable_gossip_encryption = var.enable_gossip_encryption 137 | gossip_encryption_key = var.gossip_encryption_key 138 | enable_rpc_encryption = var.enable_rpc_encryption 139 | ca_path = var.ca_path 140 | cert_file_path = var.cert_file_path 141 | key_file_path = var.key_file_path 142 | } 143 | } 144 | 145 | # --------------------------------------------------------------------------------------------------------------------- 146 | # DEPLOY CONSUL IN THE DEFAULT VPC AND SUBNETS 147 | # Using the default VPC and subnets makes this example easy to run and test, but it means Consul is accessible from the 148 | # public Internet. For a production deployment, we strongly recommend deploying into a custom VPC with private subnets. 149 | # --------------------------------------------------------------------------------------------------------------------- 150 | 151 | data "aws_vpc" "default" { 152 | default = var.vpc_id == null ? true : false 153 | id = var.vpc_id 154 | } 155 | 156 | data "aws_subnet_ids" "default" { 157 | vpc_id = data.aws_vpc.default.id 158 | } 159 | 160 | data "aws_region" "current" { 161 | } 162 | 163 | -------------------------------------------------------------------------------- /examples/example-with-custom-asg-role/outputs.tf: -------------------------------------------------------------------------------- 1 | output "num_servers" { 2 | value = module.consul_servers.cluster_size 3 | } 4 | 5 | output "asg_name_servers" { 6 | value = module.consul_servers.asg_name 7 | } 8 | 9 | output "launch_config_name_servers" { 10 | value = module.consul_servers.launch_config_name 11 | } 12 | 13 | output "iam_role_arn_servers" { 14 | value = module.consul_servers.iam_role_arn 15 | } 16 | 17 | output "iam_role_id_servers" { 18 | value = module.consul_servers.iam_role_id 19 | } 20 | 21 | output "security_group_id_servers" { 22 | value = module.consul_servers.security_group_id 23 | } 24 | 25 | output "num_clients" { 26 | value = module.consul_clients.cluster_size 27 | } 28 | 29 | output "asg_name_clients" { 30 | value = module.consul_clients.asg_name 31 | } 32 | 33 | output "launch_config_name_clients" { 34 | value = module.consul_clients.launch_config_name 35 | } 36 | 37 | output "iam_role_arn_clients" { 38 | value = module.consul_clients.iam_role_arn 39 | } 40 | 41 | output "iam_role_id_clients" { 42 | value = module.consul_clients.iam_role_id 43 | } 44 | 45 | output "security_group_id_clients" { 46 | value = module.consul_clients.security_group_id 47 | } 48 | 49 | output "aws_region" { 50 | value = data.aws_region.current.name 51 | } 52 | 53 | output "consul_servers_cluster_tag_key" { 54 | value = module.consul_servers.cluster_tag_key 55 | } 56 | 57 | output "consul_servers_cluster_tag_value" { 58 | value = module.consul_servers.cluster_tag_value 59 | } 60 | 61 | -------------------------------------------------------------------------------- /examples/example-with-custom-asg-role/user-data-client.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script is meant to be run in the User Data of each EC2 Instance while it's booting. The script uses the 3 | # run-consul script to configure and start Consul in client mode. Note that this script assumes it's running in an AMI 4 | # built from the Packer template in examples/consul-ami/consul.json. 5 | 6 | set -e 7 | 8 | # Send the log output from this script to user-data.log, syslog, and the console 9 | # From: https://alestic.com/2010/12/ec2-user-data-output/ 10 | exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 11 | 12 | # These variables are passed in via Terraform template interplation 13 | /opt/consul/bin/run-consul --client --cluster-tag-key "${cluster_tag_key}" --cluster-tag-value "${cluster_tag_value}" 14 | 15 | # You could add commands to boot your other apps here -------------------------------------------------------------------------------- /examples/example-with-custom-asg-role/user-data-server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script is meant to be run in the User Data of each EC2 Instance while it's booting. The script uses the 3 | # run-consul script to configure and start Consul in server mode. Note that this script assumes it's running in an AMI 4 | # built from the Packer template in examples/consul-ami/consul.json. 5 | 6 | set -e 7 | 8 | # Send the log output from this script to user-data.log, syslog, and the console 9 | # From: https://alestic.com/2010/12/ec2-user-data-output/ 10 | exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 11 | 12 | # These variables are passed in via Terraform template interplation 13 | /opt/consul/bin/run-consul --server --cluster-tag-key "${cluster_tag_key}" --cluster-tag-value "${cluster_tag_value}" -------------------------------------------------------------------------------- /examples/example-with-custom-asg-role/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # ENVIRONMENT VARIABLES 3 | # Define these secrets as environment variables 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | # AWS_ACCESS_KEY_ID 7 | # AWS_SECRET_ACCESS_KEY 8 | # AWS_DEFAULT_REGION 9 | 10 | # --------------------------------------------------------------------------------------------------------------------- 11 | # OPTIONAL PARAMETERS 12 | # These parameters have reasonable defaults. 13 | # --------------------------------------------------------------------------------------------------------------------- 14 | 15 | variable "ami_id" { 16 | description = "The ID of the AMI to run in the cluster. This should be an AMI built from the Packer template under examples/example-with-encryption/packer/consul-with-certs.json. To keep this example simple, we run the same AMI on both server and client nodes, but in real-world usage, your client nodes would also run your apps. If the default value is used, Terraform will look up the latest AMI build automatically." 17 | type = string 18 | default = null 19 | } 20 | 21 | variable "cluster_name" { 22 | description = "What to name the Consul cluster and all of its associated resources" 23 | type = string 24 | default = "consul-example" 25 | } 26 | 27 | variable "num_servers" { 28 | description = "The number of Consul server nodes to deploy. We strongly recommend using 3 or 5." 29 | type = number 30 | default = 3 31 | } 32 | 33 | variable "num_clients" { 34 | description = "The number of Consul client nodes to deploy. You typically run the Consul client alongside your apps, so set this value to however many Instances make sense for your app code." 35 | type = number 36 | default = 3 37 | } 38 | 39 | variable "cluster_tag_key" { 40 | description = "The tag the EC2 Instances will look for to automatically discover each other and form a cluster." 41 | type = string 42 | default = "consul-servers" 43 | } 44 | 45 | variable "ssh_key_name" { 46 | description = "The name of an EC2 Key Pair that can be used to SSH to the EC2 Instances in this cluster. Set to an empty string to not associate a Key Pair." 47 | type = string 48 | default = null 49 | } 50 | 51 | variable "vpc_id" { 52 | description = "The ID of the VPC in which the nodes will be deployed. Uses default VPC if not supplied." 53 | type = string 54 | default = null 55 | } 56 | 57 | variable "spot_price" { 58 | description = "The maximum hourly price to pay for EC2 Spot Instances." 59 | type = string 60 | default = null 61 | } 62 | 63 | variable "enable_gossip_encryption" { 64 | description = "Encrypt gossip traffic between nodes. Must also specify encryption key." 65 | type = bool 66 | default = true 67 | } 68 | 69 | variable "enable_rpc_encryption" { 70 | description = "Encrypt RPC traffic between nodes. Must also specify TLS certificates and keys." 71 | type = bool 72 | default = true 73 | } 74 | 75 | variable "gossip_encryption_key" { 76 | description = "16 byte cryptographic key to encrypt gossip traffic between nodes. Must set 'enable_gossip_encryption' to true for this to take effect. WARNING: Setting the encryption key here means it will be stored in plain text. We're doing this here to keep the example simple, but in production you should inject it more securely, e.g. retrieving it from KMS." 77 | type = string 78 | default = null 79 | } 80 | 81 | variable "ca_path" { 82 | description = "Path to the directory of CA files used to verify outgoing connections." 83 | type = string 84 | default = "/opt/consul/tls/ca" 85 | } 86 | 87 | variable "cert_file_path" { 88 | description = "Path to the certificate file used to verify incoming connections." 89 | type = string 90 | default = "/opt/consul/tls/consul.crt.pem" 91 | } 92 | 93 | variable "key_file_path" { 94 | description = "Path to the certificate key used to verify incoming connections." 95 | type = string 96 | default = "/opt/consul/tls/consul.key.pem" 97 | } 98 | 99 | variable "consul_service_linked_role_suffix" { 100 | description = "Suffix for the aws_iam_service_linked_role created for the consul cluster auto scaling group to use" 101 | type = string 102 | default = "test-consul-service-linked-role" 103 | } 104 | 105 | -------------------------------------------------------------------------------- /examples/example-with-encryption/README.md: -------------------------------------------------------------------------------- 1 | # Consul cluster with encryption example 2 | 3 | This folder contains a set of Terraform manifest for deploying a Consul cluster in AWS, including a Packer manifest that creates an AMI with a set of insecured certs for TLS validation, as well as installing an updated version of the `run-consul` script that accepts parameters for enabling RPC and gossip encryption. 4 | 5 | The resulting AMI id can then be passed as a parameter to `variables.tf`. The `enable_gossip_encryption` and `enable_rpc_encryption` variables are set to `true` by default in this example, but they don't have to be in your implementation. In this example they're passed as parameters to the `user_data` template to generate the flags passed to `run-consul` but you can use a different strategy. 6 | 7 | The end result of this example should be a cluster of 3 Consul servers and 3 Consul clients, all running on individual EC2 instances. If the default variables are used, both gossip and RPC encryption will be enabled. You can validate this by trying to bring up another Consul node or cluster NOT running with encryption and attempt to join the existing cluster. 8 | 9 | Running this example with encryption turned off and then attempt to upgrade it to use encryption is a good exercise to validate that a production cluster can be upgraded with minimal impact. 10 | 11 | To understand more about how Consul handles encryption or how you can upgrade to use encryption without downtime, check out the [Consul encryption documentation](https://www.consul.io/docs/agent/encryption.html). **IMPORTANT:** The certs included in this repo are **NOT** meant to be used in production. You should generate your own certs if you're running this for anything other than experimenting or testing. 12 | 13 | ## Quick start 14 | 15 | To deploy a Consul cluster with encryption enabled: 16 | 17 | 1. Create a new AMI using the Packer manifest and the certificates in the `packer` directory. 18 | 1. Modify `main.tf` to add your provider credentials, VPC/subnet ids if you need to, etc. 19 | 1. Modify `variables.tf` to customize the cluster. **NOTE:** the `gossip_encryption_key` variable must be a 16-byte key that can be generated offline with `consul keygen`. It's **NOT** a good idea to keep this key **in plain text** in source control. It should be encrypted beforehand (with something like KMS) and decrypted by Consul during boot. 20 | 1. Run `terraform init`. 21 | 1. Run `terraform apply`. 22 | 1. `ssh` into one of the boxes and make sure all nodes correctly discover each other (by running `consul members` for example). 23 | 1. You can also validate that encryption is turned on by looking at `/opt/consul/log/consul-stdout.log` and verifying you see `Encrypt: Gossip: true, TLS-Outgoing: true, TLS-Incoming: true`. -------------------------------------------------------------------------------- /examples/example-with-encryption/main.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # DEPLOY A CONSUL CLUSTER IN AWS 3 | # These templates show an example of how to use the consul-cluster module to deploy Consul in AWS. We deploy two Auto 4 | # Scaling Groups (ASGs): one with a small number of Consul server nodes and one with a larger number of Consul client 5 | # nodes. Note that these templates assume that the AMI you provide via the ami_id input variable is built from 6 | # the examples/example-with-encryption/packer/consul-with-certs.json Packer template. 7 | # --------------------------------------------------------------------------------------------------------------------- 8 | 9 | # ---------------------------------------------------------------------------------------------------------------------- 10 | # REQUIRE A SPECIFIC TERRAFORM VERSION OR HIGHER 11 | # ---------------------------------------------------------------------------------------------------------------------- 12 | terraform { 13 | # This module is now only being tested with Terraform 0.13.x. However, to make upgrading easier, we are setting 14 | # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it 15 | # forwards compatible with 0.13.x code. 16 | required_version = ">= 0.12.26" 17 | } 18 | 19 | # --------------------------------------------------------------------------------------------------------------------- 20 | # DEPLOY THE CONSUL SERVER NODES 21 | # --------------------------------------------------------------------------------------------------------------------- 22 | 23 | module "consul_servers" { 24 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 25 | # to a specific version of the modules, such as the following example: 26 | # source = "git::git@github.com:hashicorp/terraform-aws-consul.git//modules/consul-cluster?ref=v0.0.1" 27 | source = "../../modules/consul-cluster" 28 | 29 | cluster_name = "${var.cluster_name}-server" 30 | cluster_size = var.num_servers 31 | instance_type = "t2.micro" 32 | spot_price = var.spot_price 33 | 34 | # The EC2 Instances will use these tags to automatically discover each other and form a cluster 35 | cluster_tag_key = var.cluster_tag_key 36 | cluster_tag_value = var.cluster_name 37 | 38 | ami_id = var.ami_id 39 | user_data = data.template_file.user_data_server.rendered 40 | 41 | vpc_id = data.aws_vpc.default.id 42 | subnet_ids = data.aws_subnet_ids.default.ids 43 | 44 | # To make testing easier, we allow Consul and SSH requests from any IP address here but in a production 45 | # deployment, we strongly recommend you limit this to the IP address ranges of known, trusted servers inside your VPC. 46 | allowed_ssh_cidr_blocks = ["0.0.0.0/0"] 47 | 48 | allowed_inbound_cidr_blocks = ["0.0.0.0/0"] 49 | ssh_key_name = var.ssh_key_name 50 | 51 | tags = [ 52 | { 53 | key = "Environment" 54 | value = "development" 55 | propagate_at_launch = true 56 | } 57 | ] 58 | } 59 | 60 | # --------------------------------------------------------------------------------------------------------------------- 61 | # THE USER DATA SCRIPT THAT WILL RUN ON EACH CONSUL SERVER EC2 INSTANCE WHEN IT'S BOOTING 62 | # This script will configure and start Consul 63 | # --------------------------------------------------------------------------------------------------------------------- 64 | 65 | data "template_file" "user_data_server" { 66 | template = file("${path.module}/user-data-server.sh") 67 | 68 | vars = { 69 | cluster_tag_key = var.cluster_tag_key 70 | cluster_tag_value = var.cluster_name 71 | enable_gossip_encryption = var.enable_gossip_encryption 72 | gossip_encryption_key = var.gossip_encryption_key 73 | enable_rpc_encryption = var.enable_rpc_encryption 74 | ca_path = var.ca_path 75 | cert_file_path = var.cert_file_path 76 | key_file_path = var.key_file_path 77 | } 78 | } 79 | 80 | # --------------------------------------------------------------------------------------------------------------------- 81 | # DEPLOY THE CONSUL CLIENT NODES 82 | # Note that you do not have to use the consul-cluster module to deploy your clients. We do so simply because it 83 | # provides a convenient way to deploy an Auto Scaling Group with the necessary IAM and security group permissions for 84 | # Consul, but feel free to deploy those clients however you choose (e.g. a single EC2 Instance, a Docker cluster, etc). 85 | # --------------------------------------------------------------------------------------------------------------------- 86 | 87 | module "consul_clients" { 88 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 89 | # to a specific version of the modules, such as the following example: 90 | # source = "git::git@github.com:hashicorp/terraform-aws-consul.git//modules/consul-cluster?ref=v0.0.1" 91 | source = "../../modules/consul-cluster" 92 | 93 | cluster_name = "${var.cluster_name}-client" 94 | cluster_size = var.num_clients 95 | instance_type = "t2.micro" 96 | spot_price = var.spot_price 97 | 98 | cluster_tag_key = "consul-clients" 99 | cluster_tag_value = var.cluster_name 100 | 101 | ami_id = var.ami_id 102 | user_data = data.template_file.user_data_client.rendered 103 | 104 | vpc_id = data.aws_vpc.default.id 105 | subnet_ids = data.aws_subnet_ids.default.ids 106 | 107 | # To make testing easier, we allow Consul and SSH requests from any IP address here but in a production 108 | # deployment, we strongly recommend you limit this to the IP address ranges of known, trusted servers inside your VPC. 109 | allowed_ssh_cidr_blocks = ["0.0.0.0/0"] 110 | 111 | allowed_inbound_cidr_blocks = ["0.0.0.0/0"] 112 | ssh_key_name = var.ssh_key_name 113 | } 114 | 115 | # --------------------------------------------------------------------------------------------------------------------- 116 | # THE USER DATA SCRIPT THAT WILL RUN ON EACH CONSUL CLIENT EC2 INSTANCE WHEN IT'S BOOTING 117 | # This script will configure and start Consul 118 | # --------------------------------------------------------------------------------------------------------------------- 119 | 120 | data "template_file" "user_data_client" { 121 | template = file("${path.module}/user-data-client.sh") 122 | 123 | vars = { 124 | cluster_tag_key = var.cluster_tag_key 125 | cluster_tag_value = var.cluster_name 126 | enable_gossip_encryption = var.enable_gossip_encryption 127 | gossip_encryption_key = var.gossip_encryption_key 128 | enable_rpc_encryption = var.enable_rpc_encryption 129 | ca_path = var.ca_path 130 | cert_file_path = var.cert_file_path 131 | key_file_path = var.key_file_path 132 | } 133 | } 134 | 135 | # --------------------------------------------------------------------------------------------------------------------- 136 | # DEPLOY CONSUL IN THE DEFAULT VPC AND SUBNETS 137 | # Using the default VPC and subnets makes this example easy to run and test, but it means Consul is accessible from the 138 | # public Internet. For a production deployment, we strongly recommend deploying into a custom VPC with private subnets. 139 | # --------------------------------------------------------------------------------------------------------------------- 140 | 141 | data "aws_vpc" "default" { 142 | default = var.vpc_id == null ? true : false 143 | id = var.vpc_id 144 | } 145 | 146 | data "aws_subnet_ids" "default" { 147 | vpc_id = data.aws_vpc.default.id 148 | } 149 | 150 | data "aws_region" "current" { 151 | } 152 | 153 | -------------------------------------------------------------------------------- /examples/example-with-encryption/outputs.tf: -------------------------------------------------------------------------------- 1 | output "num_servers" { 2 | value = module.consul_servers.cluster_size 3 | } 4 | 5 | output "asg_name_servers" { 6 | value = module.consul_servers.asg_name 7 | } 8 | 9 | output "launch_config_name_servers" { 10 | value = module.consul_servers.launch_config_name 11 | } 12 | 13 | output "iam_role_arn_servers" { 14 | value = module.consul_servers.iam_role_arn 15 | } 16 | 17 | output "iam_role_id_servers" { 18 | value = module.consul_servers.iam_role_id 19 | } 20 | 21 | output "security_group_id_servers" { 22 | value = module.consul_servers.security_group_id 23 | } 24 | 25 | output "num_clients" { 26 | value = module.consul_clients.cluster_size 27 | } 28 | 29 | output "asg_name_clients" { 30 | value = module.consul_clients.asg_name 31 | } 32 | 33 | output "launch_config_name_clients" { 34 | value = module.consul_clients.launch_config_name 35 | } 36 | 37 | output "iam_role_arn_clients" { 38 | value = module.consul_clients.iam_role_arn 39 | } 40 | 41 | output "iam_role_id_clients" { 42 | value = module.consul_clients.iam_role_id 43 | } 44 | 45 | output "security_group_id_clients" { 46 | value = module.consul_clients.security_group_id 47 | } 48 | 49 | output "aws_region" { 50 | value = data.aws_region.current.name 51 | } 52 | 53 | output "consul_servers_cluster_tag_key" { 54 | value = module.consul_servers.cluster_tag_key 55 | } 56 | 57 | output "consul_servers_cluster_tag_value" { 58 | value = module.consul_servers.cluster_tag_value 59 | } 60 | 61 | -------------------------------------------------------------------------------- /examples/example-with-encryption/packer/README.md: -------------------------------------------------------------------------------- 1 | # Example TLS Certificates 2 | 3 | ### DON'T use these files in production 4 | 5 | These files are meant to be used only for the example-with-encryption cluster. They're not secure and you shouldn't use them for production services. 6 | 7 | ### Files 8 | 9 | - **ca.crt.pem**: The public certificate of the Certificate Authority used to create these files. 10 | - **consul.crt.pem:** The TLS public certificate issued by the Certificate Authority of the Consul server. 11 | - **consul.key.pem:** The TLS private key that corresponds to the TLS public certificate. 12 | 13 | The TLS files are configured as follows: 14 | 15 | - The only authorized IP address is `127.0.0.1` and no domains are authorized at all, so you might not be able to use them for host verification. 16 | - The TLS certificate is valid until April 4 2038. 17 | 18 | ### How to create your own certificates 19 | 20 | Since you're already using Terraform, it's probably easiest to use the [TLS Provider](https://www.terraform.io/docs/providers/tls/index.html) to generate your own certificates. You can find a good working example in the [private-tls-cert module](https://github.com/hashicorp/terraform-aws-vault/tree/master/modules/private-tls-cert) within the [terraform-aws-vault repo](https://github.com/hashicorp/terraform-aws-vault). -------------------------------------------------------------------------------- /examples/example-with-encryption/packer/ca.crt.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDHzCCAgegAwIBAgIRAMNGzaIEwbEdKld/bqfQmLowDQYJKoZIhvcNAQELBQAw 3 | KTEQMA4GA1UEChMHQWNtZSBDbzEVMBMGA1UEAxMMYWNtZS5jbyBjZXJ0MB4XDTE4 4 | MDQwNDIwMDU0OFoXDTM3MTIyMDIwMDU0OFowKTEQMA4GA1UEChMHQWNtZSBDbzEV 5 | MBMGA1UEAxMMYWNtZS5jbyBjZXJ0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB 6 | CgKCAQEAmWLnVAQdpBncYUoJbVwIabUrNpgumyRv1daRgk00r/dCAVxzx2FIBlsQ 7 | Vsmqu/SavxU5xyum+Ovwm4wm2bpthU3JD8FQdTkfc1OTRVMFkM3S7lLXxpZ9gyno 8 | BTQz3Md6+fWiJW75EMN0krXPgvJySpcafeYJlOnjDWT/uzD6PZ4VGg5ymsKdtXkE 9 | jvTXlQm+QNvGJbKekSvXWy+/FHKVDA8bdD7xkCtnJsVTeWsNj9ZHQ1aalkfhAM+a 10 | esJ7PgrYSwPm0Kj43hSh4D8YwFBFemmKb9PF0XPBxoWEVjHBi1buH2+nLDW6Op9P 11 | jXyK2om2RJUFR9kX7q3RNg4WJVSN0wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAqQw 12 | DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUL1wcYO+z0ItRQIMgkIvUBl+ku/cw 13 | DQYJKoZIhvcNAQELBQADggEBAGlDFGI7HptpifpMbvHRInV1dXwIVIY7V8KoYD8n 14 | CpIutAZwNs7hDJNkPLgVKkOozN0n4mupxiF6JWzWdRGQn9lNIoi6qJNKgfPgX6Y9 15 | Z/DoLup45y0ScxH56LGuAo3ViahTj7Lglcf7A1ov48XPpq1kUzopVBvOqaWbe4xm 16 | 61RQF8OjzHtJfvFrmjVkwgTbtGQErqBxMZ+vDkH1covSx2wAta9NhI/O4t3sMsCk 17 | wFb2+V1ekrDFA5rn8N+/LzDWQgR645KgqTC/pEQXppKZ2SWUktIoW6ytAMm+1gut 18 | o3q7RvidQZjNS5O9FAIuOio0zSej+1lFlVGqj+ugiUKlZno= 19 | -----END CERTIFICATE----- 20 | 21 | -------------------------------------------------------------------------------- /examples/example-with-encryption/packer/consul-with-certs.json: -------------------------------------------------------------------------------- 1 | { 2 | "min_packer_version": "0.12.0", 3 | "variables": { 4 | "aws_region": "us-east-1", 5 | "consul_version": "1.0.5", 6 | "ca_public_key_path": "{{template_dir}}/ca.crt.pem", 7 | "tls_public_key_path": "{{template_dir}}/consul.crt.pem", 8 | "tls_private_key_path": "{{template_dir}}/consul.key.pem" 9 | }, 10 | "builders": [{ 11 | "ami_name": "consul-with-encryption-ubuntu-{{isotime | clean_resource_name}}-{{uuid}}", 12 | "ami_description": "An Ubuntu 16.04 AMI that has Consul installed and TLS certificates.", 13 | "instance_type": "t2.micro", 14 | "name": "ubuntu16-ami", 15 | "region": "{{user `aws_region`}}", 16 | "type": "amazon-ebs", 17 | "source_ami_filter": { 18 | "filters": { 19 | "virtualization-type": "hvm", 20 | "architecture": "x86_64", 21 | "name": "*ubuntu-xenial-16.04-amd64-server-*", 22 | "block-device-mapping.volume-type": "gp2", 23 | "root-device-type": "ebs" 24 | }, 25 | "owners": ["099720109477"], 26 | "most_recent": true 27 | }, 28 | "ssh_username": "ubuntu" 29 | },{ 30 | "name": "ubuntu18-ami", 31 | "ami_name": "consul-with-encryption-ubuntu-18.04-{{isotime | clean_resource_name}}-{{uuid}}", 32 | "ami_description": "An Ubuntu 18.04 AMI that has Consul installed and TLS certificates.", 33 | "instance_type": "t2.micro", 34 | "region": "{{user `aws_region`}}", 35 | "type": "amazon-ebs", 36 | "source_ami_filter": { 37 | "filters": { 38 | "virtualization-type": "hvm", 39 | "architecture": "x86_64", 40 | "name": "ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*", 41 | "block-device-mapping.volume-type": "gp2", 42 | "root-device-type": "ebs" 43 | }, 44 | "owners": ["099720109477"], 45 | "most_recent": true 46 | }, 47 | "ssh_username": "ubuntu" 48 | },{ 49 | "ami_name": "consul-with-encryption-amazon-linux-2-{{isotime | clean_resource_name}}-{{uuid}}", 50 | "ami_description": "An Amazon Linux 2 AMI that has Consul installed and TLS certificates.", 51 | "instance_type": "t2.micro", 52 | "name": "amazon-linux-2-ami", 53 | "region": "{{user `aws_region`}}", 54 | "type": "amazon-ebs", 55 | "source_ami_filter": { 56 | "filters": { 57 | "virtualization-type": "hvm", 58 | "architecture": "x86_64", 59 | "name": "*amzn2-ami-hvm-*-x86_64-gp2", 60 | "block-device-mapping.volume-type": "gp2", 61 | "root-device-type": "ebs" 62 | }, 63 | "owners": ["amazon"], 64 | "most_recent": true 65 | }, 66 | "ssh_username": "ec2-user" 67 | }], 68 | "provisioners": [{ 69 | "type": "file", 70 | "source": "{{user `ca_public_key_path`}}", 71 | "destination": "/tmp/ca.crt.pem" 72 | },{ 73 | "type": "file", 74 | "source": "{{user `tls_public_key_path`}}", 75 | "destination": "/tmp/consul.crt.pem" 76 | },{ 77 | "type": "file", 78 | "source": "{{user `tls_private_key_path`}}", 79 | "destination": "/tmp/consul.key.pem" 80 | },{ 81 | "type": "shell", 82 | "inline": ["mkdir -p /tmp/terraform-aws-consul"] 83 | },{ 84 | "type": "file", 85 | "source": "{{template_dir}}/../../../", 86 | "destination": "/tmp/terraform-aws-consul" 87 | },{ 88 | "type": "shell", 89 | "inline": [ 90 | "/tmp/terraform-aws-consul/modules/install-consul/install-consul --version {{user `consul_version`}} --ca-file-path /tmp/ca.crt.pem --cert-file-path /tmp/consul.crt.pem --key-file-path /tmp/consul.key.pem" 91 | ], 92 | "pause_before": "30s" 93 | },{ 94 | "type": "shell", 95 | "only": ["ubuntu16-ami", "amazon-linux-2-ami"], 96 | "inline": [ 97 | "/tmp/terraform-aws-consul/modules/install-dnsmasq/install-dnsmasq" 98 | ], 99 | "pause_before": "30s" 100 | },{ 101 | "type": "shell", 102 | "only": ["ubuntu18-ami"], 103 | "inline": [ 104 | "/tmp/terraform-aws-consul/modules/setup-systemd-resolved/setup-systemd-resolved" 105 | ], 106 | "pause_before": "30s" 107 | }] 108 | } 109 | -------------------------------------------------------------------------------- /examples/example-with-encryption/packer/consul.crt.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDDTCCAfWgAwIBAgIQGnw43ksJJiSs///euD6+VzANBgkqhkiG9w0BAQsFADAp 3 | MRAwDgYDVQQKEwdBY21lIENvMRUwEwYDVQQDEwxhY21lLmNvIGNlcnQwHhcNMTgw 4 | NDA0MjAwNzQyWhcNMzcxMjIwMjAwNzQyWjApMRAwDgYDVQQKEwdBY21lIENvMRUw 5 | EwYDVQQDEwxhY21lLmNvIGNlcnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK 6 | AoIBAQDIur5F2BtEENCQgc4eUoje8Uo5S1wmC2qJxxNLZzKT4Z6h8VnV0j0JnHME 7 | HCtVEkdgpeknGDBl2Wbqzc9LTIHw5/uYXD/2Nw2kY/StRE+rY7B1k80X9o/TO3Gz 8 | SPjIkPGVX3gSbpkqZ5Q29YqhnP84Ji/XIx/2p4cVNXCpIzYWWjLg7cGRjH8VlBEE 9 | J3t7XvjTd8V/Z8xyUGmXm0pXABKZW1SpwhEEZa0WnwHWUZaILJ8p9V0BfCT83DD0 10 | 9Y7WaS1wBW2rEMvWDngHVFdJrlD2aqxUEwc7z56vr+zDuN8enjsyhwqOMMNB6juh 11 | eC0+cPr30d+vU3xwLmNnZfiPmhbTAgMBAAGjMTAvMA4GA1UdDwEB/wQEAwIFoDAM 12 | BgNVHRMBAf8EAjAAMA8GA1UdEQQIMAaHBH8AAAEwDQYJKoZIhvcNAQELBQADggEB 13 | AFt+j4Yqs+2MeUJ0hmj3TsnpCs3lNXjH71I2xALLzss/2Y+8v9YyXf8jtEG5mp56 14 | 6OZh/9a8o/4Q7gLTNWaCdrjSLfPeELHqVPNFWqPcAkaErGQ/YBm8Li9oJGQSGN+/ 15 | 9ocSH8gOSOfvMR8OsInqIJkN2EhIq990XBLYX2K7LwIUCRcvLp2W16yo3kfbriL/ 16 | GIoB3HWkf9IpSct8pnQLBsySSEftxHzCKX26vjlua8E/oxClgr2o2dKAZxRGbo0j 17 | Ev3B2TS1AP0WSOKLKfDjXfKUFiXHYkVu7ieVQRRwp1PcMurjZLeOhnSEmXPLz5x4 18 | WIplX8bABL2Jc7W1DWgIl4c= 19 | -----END CERTIFICATE----- 20 | 21 | -------------------------------------------------------------------------------- /examples/example-with-encryption/packer/consul.key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEogIBAAKCAQEAyLq+RdgbRBDQkIHOHlKI3vFKOUtcJgtqiccTS2cyk+GeofFZ 3 | 1dI9CZxzBBwrVRJHYKXpJxgwZdlm6s3PS0yB8Of7mFw/9jcNpGP0rURPq2OwdZPN 4 | F/aP0ztxs0j4yJDxlV94Em6ZKmeUNvWKoZz/OCYv1yMf9qeHFTVwqSM2Floy4O3B 5 | kYx/FZQRBCd7e17403fFf2fMclBpl5tKVwASmVtUqcIRBGWtFp8B1lGWiCyfKfVd 6 | AXwk/Nww9PWO1mktcAVtqxDL1g54B1RXSa5Q9mqsVBMHO8+er6/sw7jfHp47MocK 7 | jjDDQeo7oXgtPnD699Hfr1N8cC5jZ2X4j5oW0wIDAQABAoIBABAUdZ2evEBtoGeT 8 | HnHrZq+FYM7KLK2XiIdU/USzFagnENAkR3ALkXwtnkEcCJSeDD2/zLenBmmCSu5Q 9 | YmTD0jz8OHlyGmWc72jV4QnoSQGP0Nj6aBmdVNQ1FNyp4dQfWFsA9Nt1JKvy9Dqa 10 | 3Oopa4CR/o8bbzoRHY8jYs0J1xFfMBEDDpL3JZVhHaJFsN+NNmDDCXcouEG5wPP+ 11 | G5fBrj2CNze9lOclNrcIr0U7CpvDpgVfBC77o8//lt/SVeY07Wkv/q+XY1BNNUZA 12 | JWSlHDMcmU5mUKBKbndaOChUpG9TlbIuX+fsIZGrimREu8DXAooCuva43f7i6Q7f 13 | PQvWObECgYEA2wGTuvrrZyaB2lK77266dTilIHZloegYg8+P8HB81UiAcyspp/Gm 14 | 9E6a5ut0O4CxbODkjAkEoVSvTC+RQmx0+VJ5CF1F8nfKRO0x94dJZm4rV8kBtIpQ 15 | xIqLkfwZQS83+bcIkQb2x51qaCW1WocFdRZBXcMeD8NkJrmZSwEaSHcCgYEA6qLV 16 | BgCNwctw3B+OsoK9YtaJMrpMNxlJB1eMM0mh3LWTKljQjYL9PcqtryhjEY30O+P6 17 | /h/HDg+Ua97JWMXg6ZoEjR0f2IFyclLr4Cjd/+ENeaavdOUItsEljH13ZlkdQhQ2 18 | f3ZzWg4pONHqzos0WtLGx9+73/XKiEU9Umq+V4UCgYAk1D4QqpOdtBcQLOuX1yoZ 19 | /l2eeOqccJh2yhftmi0T/O5E79RAft3Rpcpc3Omd/AVaRvOILRgPPVuBV1p5Lh35 20 | s4nRA0Z5APYN40mM075fMFWvnIjh4IS2mIniEd/5W0RxhPs20FH9vZYgT38Q6zXa 21 | u9RgPSi4A+a6sGl7+IJ//wKBgEZRoJgY6GMRDnTZ/FEhf5K0iw+p55eHu2w436qr 22 | QZg8zqsdTPtsK2suZ9kbuH0VVwHv9FPewDBFmQyvCxbQzvQTlZOZC+c9/Eeo9EoM 23 | 1WOPVhOEUgctcOwcfJ1bKMKCqJMaLa8Ll8yAe/gsaTuXWoxxqONT+SFIr3chkRw3 24 | I25lAoGAfbGx12bI2Dt/klcBjEkDRMmd/bPE88MhSRd8kiBY6tLFgCl3Rrsj42YJ 25 | etL2HVPQ/pP6k7B1NJwTgGTDZvT2CTADBIxAMM03jsBDG5rfk0UM1Wb7HhvWVk3X 26 | 8PqpMj3VjGQYGqhtVNYwqBmAPqz+Z+IDy9SWbMXjeeYlkEVUCWk= 27 | -----END RSA PRIVATE KEY----- 28 | 29 | -------------------------------------------------------------------------------- /examples/example-with-encryption/user-data-client.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script is meant to be run in the User Data of each EC2 Instance while it's booting. The script uses the 3 | # run-consul script to configure and start Consul in client mode. Note that this script assumes it's running in an AMI 4 | # built from the Packer template in examples/consul-ami/consul.json. 5 | 6 | set -e 7 | 8 | # Send the log output from this script to user-data.log, syslog, and the console 9 | # From: https://alestic.com/2010/12/ec2-user-data-output/ 10 | exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 11 | 12 | # These variables are passed in via Terraform template interplation 13 | if [[ "${enable_gossip_encryption}" == "true" && ! -z "${gossip_encryption_key}" ]]; then 14 | # Note that setting the encryption key in plain text here means that it will be readable from the Terraform state file 15 | # and/or the EC2 API/console. We're doing this for simplicity, but in a real production environment you should pass an 16 | # encrypted key to Terraform and decrypt it before passing it to run-consul with something like KMS. 17 | gossip_encryption_configuration="--enable-gossip-encryption --gossip-encryption-key ${gossip_encryption_key}" 18 | fi 19 | 20 | if [[ "${enable_rpc_encryption}" == "true" && ! -z "${ca_path}" && ! -z "${cert_file_path}" && ! -z "${key_file_path}" ]]; then 21 | rpc_encryption_configuration="--enable-rpc-encryption --ca-path ${ca_path} --cert-file-path ${cert_file_path} --key-file-path ${key_file_path}" 22 | fi 23 | 24 | /opt/consul/bin/run-consul --client --cluster-tag-key "${cluster_tag_key}" --cluster-tag-value "${cluster_tag_value}" $gossip_encryption_configuration $rpc_encryption_configuration 25 | 26 | # You could add commands to boot your other apps here -------------------------------------------------------------------------------- /examples/example-with-encryption/user-data-server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script is meant to be run in the User Data of each EC2 Instance while it's booting. The script uses the 3 | # run-consul script to configure and start Consul in server mode. Note that this script assumes it's running in an AMI 4 | # built from the Packer template in examples/consul-ami/consul.json. 5 | 6 | set -e 7 | 8 | # Send the log output from this script to user-data.log, syslog, and the console 9 | # From: https://alestic.com/2010/12/ec2-user-data-output/ 10 | exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 11 | 12 | # These variables are passed in via Terraform template interplation 13 | if [[ "${enable_gossip_encryption}" == "true" && ! -z "${gossip_encryption_key}" ]]; then 14 | # Note that setting the encryption key in plain text here means that it will be readable from the Terraform state file 15 | # and/or the EC2 API/console. We're doing this for simplicity, but in a real production environment you should pass an 16 | # encrypted key to Terraform and decrypt it before passing it to run-consul with something like KMS. 17 | gossip_encryption_configuration="--enable-gossip-encryption --gossip-encryption-key ${gossip_encryption_key}" 18 | fi 19 | 20 | if [[ "${enable_rpc_encryption}" == "true" && ! -z "${ca_path}" && ! -z "${cert_file_path}" && ! -z "${key_file_path}" ]]; then 21 | rpc_encryption_configuration="--enable-rpc-encryption --ca-path ${ca_path} --cert-file-path ${cert_file_path} --key-file-path ${key_file_path}" 22 | fi 23 | 24 | /opt/consul/bin/run-consul --server --cluster-tag-key "${cluster_tag_key}" --cluster-tag-value "${cluster_tag_value}" $gossip_encryption_configuration $rpc_encryption_configuration -------------------------------------------------------------------------------- /examples/example-with-encryption/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # ENVIRONMENT VARIABLES 3 | # Define these secrets as environment variables 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | # AWS_ACCESS_KEY_ID 7 | # AWS_SECRET_ACCESS_KEY 8 | # AWS_DEFAULT_REGION 9 | 10 | # --------------------------------------------------------------------------------------------------------------------- 11 | # OPTIONAL PARAMETERS 12 | # These parameters have reasonable defaults. 13 | # --------------------------------------------------------------------------------------------------------------------- 14 | 15 | variable "ami_id" { 16 | description = "The ID of the AMI to run in the cluster. This should be an AMI built from the Packer template under examples/example-with-encryption/packer/consul-with-certs.json. To keep this example simple, we run the same AMI on both server and client nodes, but in real-world usage, your client nodes would also run your apps. If the default value is used, Terraform will look up the latest AMI build automatically." 17 | type = string 18 | default = null 19 | } 20 | 21 | variable "cluster_name" { 22 | description = "What to name the Consul cluster and all of its associated resources" 23 | type = string 24 | default = "consul-example" 25 | } 26 | 27 | variable "num_servers" { 28 | description = "The number of Consul server nodes to deploy. We strongly recommend using 3 or 5." 29 | type = number 30 | default = 3 31 | } 32 | 33 | variable "num_clients" { 34 | description = "The number of Consul client nodes to deploy. You typically run the Consul client alongside your apps, so set this value to however many Instances make sense for your app code." 35 | type = number 36 | default = 3 37 | } 38 | 39 | variable "cluster_tag_key" { 40 | description = "The tag the EC2 Instances will look for to automatically discover each other and form a cluster." 41 | type = string 42 | default = "consul-servers" 43 | } 44 | 45 | variable "ssh_key_name" { 46 | description = "The name of an EC2 Key Pair that can be used to SSH to the EC2 Instances in this cluster. Set to an empty string to not associate a Key Pair." 47 | type = string 48 | default = null 49 | } 50 | 51 | variable "vpc_id" { 52 | description = "The ID of the VPC in which the nodes will be deployed. Uses default VPC if not supplied." 53 | type = string 54 | default = null 55 | } 56 | 57 | variable "spot_price" { 58 | description = "The maximum hourly price to pay for EC2 Spot Instances." 59 | type = string 60 | default = null 61 | } 62 | 63 | variable "enable_gossip_encryption" { 64 | description = "Encrypt gossip traffic between nodes. Must also specify encryption key." 65 | type = bool 66 | default = true 67 | } 68 | 69 | variable "enable_rpc_encryption" { 70 | description = "Encrypt RPC traffic between nodes. Must also specify TLS certificates and keys." 71 | type = bool 72 | default = true 73 | } 74 | 75 | variable "gossip_encryption_key" { 76 | description = "16 byte cryptographic key to encrypt gossip traffic between nodes. Must set 'enable_gossip_encryption' to true for this to take effect. WARNING: Setting the encryption key here means it will be stored in plain text. We're doing this here to keep the example simple, but in production you should inject it more securely, e.g. retrieving it from KMS." 77 | type = string 78 | default = "" 79 | } 80 | 81 | variable "ca_path" { 82 | description = "Path to the directory of CA files used to verify outgoing connections." 83 | type = string 84 | default = "/opt/consul/tls/ca" 85 | } 86 | 87 | variable "cert_file_path" { 88 | description = "Path to the certificate file used to verify incoming connections." 89 | type = string 90 | default = "/opt/consul/tls/consul.crt.pem" 91 | } 92 | 93 | variable "key_file_path" { 94 | description = "Path to the certificate key used to verify incoming connections." 95 | type = string 96 | default = "/opt/consul/tls/consul.key.pem" 97 | } 98 | 99 | -------------------------------------------------------------------------------- /examples/root-example/README.md: -------------------------------------------------------------------------------- 1 | # Consul Cluster Example 2 | 3 | This folder shows an example of Terraform code that uses the [consul-cluster module](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/consul-cluster) to deploy 4 | a [Consul](https://www.consul.io/) cluster in [AWS](https://aws.amazon.com/). The cluster consists of two Auto Scaling 5 | Groups (ASGs): one with a small number of Consul server nodes, which are responsible for being part of the [consensus 6 | quorum](https://www.consul.io/docs/internals/consensus.html), and one with a larger number of client nodes, which 7 | would typically run alongside your apps: 8 | 9 | ![Consul architecture](https://github.com/hashicorp/terraform-aws-consul/blob/master/_docs/architecture.png?raw=true) 10 | 11 | You will need to create an [Amazon Machine Image (AMI)](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) 12 | that has Consul installed, which you can do using the [consul-ami example](https://github.com/hashicorp/terraform-aws-consul/tree/master/examples/consul-ami)). Note that to keep 13 | this example simple, both the server ASG and client ASG are running the exact same AMI. In real-world usage, you'd 14 | probably have multiple client ASGs, and each of those ASGs would run a different AMI that has the Consul agent 15 | installed alongside your apps. 16 | 17 | For more info on how the Consul cluster works, check out the [consul-cluster](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/consul-cluster) documentation. 18 | 19 | 20 | 21 | ## Quick start 22 | 23 | To deploy a Consul Cluster: 24 | 25 | 1. `git clone` this repo to your computer. 26 | 1. Optional: build a Consul AMI. See the [consul-ami example](https://github.com/hashicorp/terraform-aws-consul/tree/master/examples/consul-ami) documentation for instructions. Make sure to 27 | note down the ID of the AMI. 28 | 1. Install [Terraform](https://www.terraform.io/). 29 | 1. Open `variables.tf`, set the environment variables specified at the top of the file, and fill in any other variables that 30 | don't have a default. If you built a custom AMI, put the AMI ID into the `ami_id` variable. Otherwise, one of our 31 | public example AMIs will be used by default. These AMIs are great for learning/experimenting, but are NOT 32 | recommended for production use. 33 | 1. Run `terraform init`. 34 | 1. Run `terraform apply`. 35 | 1. Run the [consul-examples-helper.sh script](https://github.com/hashicorp/terraform-aws-consul/tree/master/examples/consul-examples-helper/consul-examples-helper.sh) to 36 | print out the IP addresses of the Consul servers and some example commands you can run to interact with the cluster: 37 | `../consul-examples-helper/consul-examples-helper.sh`. 38 | 39 | -------------------------------------------------------------------------------- /examples/root-example/user-data-client.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script is meant to be run in the User Data of each EC2 Instance while it's booting. The script uses the 3 | # run-consul script to configure and start Consul in client mode. Note that this script assumes it's running in an AMI 4 | # built from the Packer template in examples/consul-ami/consul.json. 5 | 6 | set -e 7 | 8 | # Send the log output from this script to user-data.log, syslog, and the console 9 | # From: https://alestic.com/2010/12/ec2-user-data-output/ 10 | exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 11 | 12 | # These variables are passed in via Terraform template interplation 13 | /opt/consul/bin/run-consul --client --cluster-tag-key "${cluster_tag_key}" --cluster-tag-value "${cluster_tag_value}" 14 | 15 | # You could add commands to boot your other apps here -------------------------------------------------------------------------------- /examples/root-example/user-data-server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script is meant to be run in the User Data of each EC2 Instance while it's booting. The script uses the 3 | # run-consul script to configure and start Consul in server mode. Note that this script assumes it's running in an AMI 4 | # built from the Packer template in examples/consul-ami/consul.json. 5 | 6 | set -e 7 | 8 | # Send the log output from this script to user-data.log, syslog, and the console 9 | # From: https://alestic.com/2010/12/ec2-user-data-output/ 10 | exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 11 | 12 | # These variables are passed in via Terraform template interplation 13 | /opt/consul/bin/run-consul --server --cluster-tag-key "${cluster_tag_key}" --cluster-tag-value "${cluster_tag_value}" -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # DEPLOY A CONSUL CLUSTER IN AWS 3 | # These templates show an example of how to use the consul-cluster module to deploy Consul in AWS. We deploy two Auto 4 | # Scaling Groups (ASGs): one with a small number of Consul server nodes and one with a larger number of Consul client 5 | # nodes. Note that these templates assume that the AMI you provide via the ami_id input variable is built from 6 | # the examples/consul-ami/consul.json Packer template. 7 | # --------------------------------------------------------------------------------------------------------------------- 8 | 9 | # ---------------------------------------------------------------------------------------------------------------------- 10 | # PROVIDER AND BACKEND 11 | # --------------------------------------------------------------------------------------------------------------------- 12 | provider "aws" { 13 | region = "us-east-1" 14 | version = "~> 2.0" 15 | } 16 | 17 | terraform { 18 | required_version = ">= 0.12.26" 19 | backend "s3" { 20 | # Lembre de trocar o bucket para o seu, não pode ser o mesmo nome 21 | bucket = "descomplicando-terraform-gomex-tfstates" 22 | key = "terraform-test.tfstate" 23 | region = "us-east-1" 24 | } 25 | } 26 | 27 | # --------------------------------------------------------------------------------------------------------------------- 28 | # AUTOMATICALLY LOOK UP THE LATEST PRE-BUILT AMI 29 | # This repo contains a CircleCI job that automatically builds and publishes the latest AMI by building the Packer 30 | # template at /examples/consul-ami upon every new release. The Terraform data source below automatically looks up the 31 | # latest AMI so that a simple "terraform apply" will just work without the user needing to manually build an AMI and 32 | # fill in the right value. 33 | # 34 | # !! WARNING !! These exmaple AMIs are meant only convenience when initially testing this repo. Do NOT use these example 35 | # AMIs in a production setting because it is important that you consciously think through the configuration you want 36 | # in your own production AMI. 37 | # 38 | # NOTE: This Terraform data source must return at least one AMI result or the entire template will fail. See 39 | # /_ci/publish-amis-in-new-account.md for more information. 40 | # --------------------------------------------------------------------------------------------------------------------- 41 | data "aws_ami" "consul" { 42 | most_recent = true 43 | 44 | # If we change the AWS Account in which test are run, update this value. 45 | owners = var.owners 46 | 47 | filter { 48 | name = "virtualization-type" 49 | values = ["hvm"] 50 | } 51 | 52 | filter { 53 | name = "is-public" 54 | values = ["false"] 55 | } 56 | 57 | filter { 58 | name = "name" 59 | values = ["consul-ubuntu-*"] 60 | } 61 | } 62 | 63 | # --------------------------------------------------------------------------------------------------------------------- 64 | # DEPLOY THE CONSUL SERVER NODES 65 | # --------------------------------------------------------------------------------------------------------------------- 66 | 67 | module "consul_servers" { 68 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 69 | # to a specific version of the modules, such as the following example: 70 | # source = "git::git@github.com:hashicorp/terraform-aws-consul.git//modules/consul-cluster?ref=v0.0.1" 71 | source = "./modules/consul-cluster" 72 | 73 | cluster_name = "${var.cluster_name}-server" 74 | cluster_size = var.num_servers 75 | instance_type = "t2.micro" 76 | spot_price = var.spot_price 77 | 78 | # The EC2 Instances will use these tags to automatically discover each other and form a cluster 79 | cluster_tag_key = var.cluster_tag_key 80 | cluster_tag_value = var.cluster_name 81 | 82 | ami_id = "${var.ami_id == null ? data.aws_ami.consul.image_id : var.ami_id}" 83 | user_data = "${data.template_file.user_data_server.rendered}" 84 | 85 | vpc_id = data.aws_vpc.default.id 86 | subnet_ids = data.aws_subnet_ids.default.ids 87 | 88 | # To make testing easier, we allow Consul and SSH requests from any IP address here but in a production 89 | # deployment, we strongly recommend you limit this to the IP address ranges of known, trusted servers inside your VPC. 90 | allowed_ssh_cidr_blocks = ["0.0.0.0/0"] 91 | 92 | allowed_inbound_cidr_blocks = ["0.0.0.0/0"] 93 | ssh_key_name = "hashiweek" 94 | 95 | tags = [ 96 | { 97 | key = "Environment" 98 | value = "development" 99 | propagate_at_launch = true 100 | } 101 | ] 102 | } 103 | 104 | # --------------------------------------------------------------------------------------------------------------------- 105 | # THE USER DATA SCRIPT THAT WILL RUN ON EACH CONSUL SERVER EC2 INSTANCE WHEN IT'S BOOTING 106 | # This script will configure and start Consul 107 | # --------------------------------------------------------------------------------------------------------------------- 108 | 109 | data "template_file" "user_data_server" { 110 | template = file("${path.module}/examples/root-example/user-data-server.sh") 111 | 112 | vars = { 113 | cluster_tag_key = var.cluster_tag_key 114 | cluster_tag_value = var.cluster_name 115 | } 116 | } 117 | 118 | # --------------------------------------------------------------------------------------------------------------------- 119 | # DEPLOY THE CONSUL CLIENT NODES 120 | # Note that you do not have to use the consul-cluster module to deploy your clients. We do so simply because it 121 | # provides a convenient way to deploy an Auto Scaling Group with the necessary IAM and security group permissions for 122 | # Consul, but feel free to deploy those clients however you choose (e.g. a single EC2 Instance, a Docker cluster, etc). 123 | # --------------------------------------------------------------------------------------------------------------------- 124 | 125 | module "consul_clients" { 126 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 127 | # to a specific version of the modules, such as the following example: 128 | # source = "git::git@github.com:hashicorp/terraform-aws-consul.git//modules/consul-cluster?ref=v0.0.1" 129 | source = "./modules/consul-cluster" 130 | 131 | cluster_name = "${var.cluster_name}-client" 132 | cluster_size = var.num_clients 133 | instance_type = "t2.micro" 134 | spot_price = var.spot_price 135 | 136 | cluster_tag_key = "consul-clients" 137 | cluster_tag_value = var.cluster_name 138 | 139 | ami_id = "${var.ami_id == null ? data.aws_ami.consul.image_id : var.ami_id}" 140 | user_data = "${data.template_file.user_data_client.rendered}" 141 | 142 | vpc_id = data.aws_vpc.default.id 143 | subnet_ids = data.aws_subnet_ids.default.ids 144 | 145 | # To make testing easier, we allow Consul and SSH requests from any IP address here but in a production 146 | # deployment, we strongly recommend you limit this to the IP address ranges of known, trusted servers inside your VPC. 147 | allowed_ssh_cidr_blocks = ["0.0.0.0/0"] 148 | 149 | allowed_inbound_cidr_blocks = ["0.0.0.0/0"] 150 | ssh_key_name = var.ssh_key_name 151 | } 152 | 153 | # --------------------------------------------------------------------------------------------------------------------- 154 | # THE USER DATA SCRIPT THAT WILL RUN ON EACH CONSUL CLIENT EC2 INSTANCE WHEN IT'S BOOTING 155 | # This script will configure and start Consul 156 | # --------------------------------------------------------------------------------------------------------------------- 157 | 158 | data "template_file" "user_data_client" { 159 | template = file("${path.module}/examples/root-example/user-data-client.sh") 160 | 161 | vars = { 162 | cluster_tag_key = var.cluster_tag_key 163 | cluster_tag_value = var.cluster_name 164 | } 165 | } 166 | 167 | # --------------------------------------------------------------------------------------------------------------------- 168 | # DEPLOY CONSUL IN THE DEFAULT VPC AND SUBNETS 169 | # Using the default VPC and subnets makes this example easy to run and test, but it means Consul is accessible from the 170 | # public Internet. For a production deployment, we strongly recommend deploying into a custom VPC with private subnets. 171 | # --------------------------------------------------------------------------------------------------------------------- 172 | 173 | data "aws_vpc" "default" { 174 | default = var.vpc_id == null ? true : false 175 | id = "${var.vpc_id}" 176 | } 177 | 178 | data "aws_subnet_ids" "default" { 179 | vpc_id = data.aws_vpc.default.id 180 | } 181 | 182 | data "aws_region" "current" { 183 | } 184 | 185 | -------------------------------------------------------------------------------- /modules/README.md: -------------------------------------------------------------------------------- 1 | ## NOTE: About [/modules](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules) and [/examples](https://github.com/hashicorp/terraform-aws-consul/tree/master/examples) 2 | 3 | HashiCorp's Terraform Registry requires every repo to have a `main.tf` in its root dir. The Consul code is broken down into multiple sub-modules, so they can't all be in the root dir [/](https://github.com/hashicorp/terraform-aws-consul/tree/master). Therefore, Consul's sub-modules are in the [/modules](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules) subdirectory, the example code is in the [/examples](https://github.com/hashicorp/terraform-aws-consul/tree/master/examples) subdirectory, and the root dir [/](https://github.com/hashicorp/terraform-aws-consul/tree/master) _also_ has an example in it, as described in [root-example](https://github.com/awesome/terraform-aws-consul/tree/master/examples/root-example). 4 | 5 | More info: https://github.com/hashicorp/terraform-aws-consul/pull/79/files/079e75015a5d89e7ffc89997aa0904e9de4cdb97#r212763365 6 | -------------------------------------------------------------------------------- /modules/consul-client-security-group-rules/README.md: -------------------------------------------------------------------------------- 1 | # Consul Client Security Group Rules Module 2 | 3 | This folder contains a [Terraform](https://www.terraform.io/) module that defines the security group rules used by a 4 | [Consul](https://www.consul.io/) client to control the traffic that is allowed to go in and out. 5 | 6 | Normally, you'd get these rules by default if you're using the [consul-cluster module](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/consul-cluster), but if 7 | you're running Consul on top of a different cluster, then you can use this module to add the necessary security group 8 | rules to that cluster. For example, imagine you were using the [vault-cluster 9 | module](https://github.com/hashicorp/terraform-aws-vault/tree/master/modules/vault-cluster) to run a cluster of 10 | servers that have both Vault and Consul agent on each node: 11 | 12 | ```hcl 13 | module "vault_servers" { 14 | source = "git::git@github.com:hashicorp/terraform-aws-vault.git//modules/vault-cluster?ref=v0.0.1" 15 | 16 | # This AMI has both Vault and Consul installed 17 | ami_id = "ami-1234abcd" 18 | } 19 | ``` 20 | 21 | The `vault-cluster` module will provide the security group rules for Vault, but not for the Consul agent. To ensure those servers 22 | have the necessary ports open for using Consul, you can use this module as follows: 23 | 24 | ```hcl 25 | module "security_group_rules" { 26 | source = "git::git@github.com:hashicorp/terraform-aws-consul.git//modules/consul-client-security-group-rules?ref=v0.0.2" 27 | 28 | security_group_id = "${module.vault_servers.security_group_id}" 29 | 30 | # ... (other params omitted) ... 31 | } 32 | ``` 33 | 34 | Note the following parameters: 35 | 36 | * `source`: Use this parameter to specify the URL of this module. The double slash (`//`) is intentional 37 | and required. Terraform uses it to specify subfolders within a Git repo (see [module 38 | sources](https://www.terraform.io/docs/modules/sources.html)). The `ref` parameter specifies a specific Git tag in 39 | this repo. That way, instead of using the latest version of this module from the `master` branch, which 40 | will change every time you run Terraform, you're using a fixed version of the repo. 41 | 42 | * `security_group_id`: Use this parameter to specify the ID of the security group to which the rules in this module 43 | should be added. 44 | 45 | You can find the other parameters in [variables.tf](variables.tf). 46 | 47 | Check out the [consul-cluster module](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/consul-cluster) for working sample code. 48 | -------------------------------------------------------------------------------- /modules/consul-client-security-group-rules/main.tf: -------------------------------------------------------------------------------- 1 | ## --------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRE A SPECIFIC TERRAFORM VERSION OR HIGHER 3 | # --------------------------------------------------------------------------------------------------------------------- 4 | 5 | terraform { 6 | # This module is now only being tested with Terraform 0.13.x. However, to make upgrading easier, we are setting 7 | # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it 8 | # forwards compatible with 0.13.x code. 9 | required_version = ">= 0.12.26" 10 | } 11 | 12 | ## --------------------------------------------------------------------------------------------------------------------- 13 | # CREATE THE SECURITY GROUP RULES THAT CONTROL WHAT TRAFFIC CAN GO IN AND OUT OF A CONSUL AGENT CLUSTER 14 | # --------------------------------------------------------------------------------------------------------------------- 15 | 16 | resource "aws_security_group_rule" "allow_serf_lan_tcp_inbound" { 17 | count = length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0 18 | type = "ingress" 19 | from_port = var.serf_lan_port 20 | to_port = var.serf_lan_port 21 | protocol = "tcp" 22 | cidr_blocks = var.allowed_inbound_cidr_blocks 23 | 24 | security_group_id = var.security_group_id 25 | } 26 | 27 | resource "aws_security_group_rule" "allow_serf_lan_udp_inbound" { 28 | count = length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0 29 | type = "ingress" 30 | from_port = var.serf_lan_port 31 | to_port = var.serf_lan_port 32 | protocol = "udp" 33 | cidr_blocks = var.allowed_inbound_cidr_blocks 34 | 35 | security_group_id = var.security_group_id 36 | } 37 | 38 | resource "aws_security_group_rule" "allow_serf_lan_tcp_inbound_from_security_group_ids" { 39 | count = var.allowed_inbound_security_group_count 40 | type = "ingress" 41 | from_port = var.serf_lan_port 42 | to_port = var.serf_lan_port 43 | protocol = "tcp" 44 | source_security_group_id = element(var.allowed_inbound_security_group_ids, count.index) 45 | 46 | security_group_id = var.security_group_id 47 | } 48 | 49 | resource "aws_security_group_rule" "allow_serf_lan_udp_inbound_from_security_group_ids" { 50 | count = var.allowed_inbound_security_group_count 51 | type = "ingress" 52 | from_port = var.serf_lan_port 53 | to_port = var.serf_lan_port 54 | protocol = "udp" 55 | source_security_group_id = element(var.allowed_inbound_security_group_ids, count.index) 56 | 57 | security_group_id = var.security_group_id 58 | } 59 | 60 | # Similar to the *_inbound_from_security_group_ids rules, allow inbound from ourself 61 | 62 | resource "aws_security_group_rule" "allow_serf_lan_tcp_inbound_from_self" { 63 | type = "ingress" 64 | from_port = var.serf_lan_port 65 | to_port = var.serf_lan_port 66 | protocol = "tcp" 67 | self = true 68 | 69 | security_group_id = var.security_group_id 70 | } 71 | 72 | resource "aws_security_group_rule" "allow_serf_lan_udp_inbound_from_self" { 73 | type = "ingress" 74 | from_port = var.serf_lan_port 75 | to_port = var.serf_lan_port 76 | protocol = "udp" 77 | self = true 78 | 79 | security_group_id = var.security_group_id 80 | } 81 | 82 | -------------------------------------------------------------------------------- /modules/consul-client-security-group-rules/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRED PARAMETERS 3 | # You must provide a value for each of these parameters. 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "security_group_id" { 7 | description = "The ID of the security group to which we should add the Consul security group rules" 8 | } 9 | 10 | variable "allowed_inbound_cidr_blocks" { 11 | description = "A list of CIDR-formatted IP address ranges from which the EC2 Instances will allow connections to Consul" 12 | type = list(string) 13 | default = [] 14 | } 15 | 16 | # --------------------------------------------------------------------------------------------------------------------- 17 | # OPTIONAL PARAMETERS 18 | # These parameters have reasonable defaults. 19 | # --------------------------------------------------------------------------------------------------------------------- 20 | 21 | variable "allowed_inbound_security_group_ids" { 22 | description = "A list of security group IDs that will be allowed to connect to Consul" 23 | type = list(string) 24 | default = [] 25 | } 26 | 27 | variable "allowed_inbound_security_group_count" { 28 | description = "The number of entries in var.allowed_inbound_security_group_ids. Ideally, this value could be computed dynamically, but we pass this variable to a Terraform resource's 'count' property and Terraform requires that 'count' be computed with literals or data sources only." 29 | default = 0 30 | } 31 | 32 | variable "serf_lan_port" { 33 | description = "The port used to handle gossip in the LAN. Required by all agents." 34 | default = 8301 35 | } 36 | 37 | -------------------------------------------------------------------------------- /modules/consul-cluster/main.tf: -------------------------------------------------------------------------------- 1 | # ---------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRE A SPECIFIC TERRAFORM VERSION OR HIGHER 3 | # ---------------------------------------------------------------------------------------------------------------------- 4 | 5 | terraform { 6 | # This module is now only being tested with Terraform 0.13.x. However, to make upgrading easier, we are setting 7 | # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it 8 | # forwards compatible with 0.13.x code. 9 | required_version = ">= 0.12.26" 10 | } 11 | 12 | # --------------------------------------------------------------------------------------------------------------------- 13 | # CREATE AN AUTO SCALING GROUP (ASG) TO RUN CONSUL 14 | # --------------------------------------------------------------------------------------------------------------------- 15 | 16 | resource "aws_autoscaling_group" "autoscaling_group" { 17 | name_prefix = var.cluster_name 18 | 19 | launch_configuration = aws_launch_configuration.launch_configuration.name 20 | 21 | availability_zones = var.availability_zones 22 | vpc_zone_identifier = var.subnet_ids 23 | 24 | # Run a fixed number of instances in the ASG 25 | min_size = var.cluster_size 26 | max_size = var.cluster_size 27 | desired_capacity = var.cluster_size 28 | termination_policies = [var.termination_policies] 29 | 30 | health_check_type = var.health_check_type 31 | health_check_grace_period = var.health_check_grace_period 32 | wait_for_capacity_timeout = var.wait_for_capacity_timeout 33 | service_linked_role_arn = var.service_linked_role_arn 34 | 35 | enabled_metrics = var.enabled_metrics 36 | 37 | protect_from_scale_in = var.protect_from_scale_in 38 | 39 | tags = flatten( 40 | [ 41 | { 42 | key = "Name" 43 | value = var.cluster_name 44 | propagate_at_launch = true 45 | }, 46 | { 47 | key = var.cluster_tag_key 48 | value = var.cluster_tag_value 49 | propagate_at_launch = true 50 | }, 51 | var.tags, 52 | ] 53 | ) 54 | 55 | lifecycle { 56 | # As of AWS Provider 3.x, inline load_balancers and target_group_arns 57 | # in an aws_autoscaling_group take precedence over attachment resources. 58 | # Since the consul-cluster module does not define any Load Balancers, 59 | # it's safe to assume that we will always want to favor an attachment 60 | # over these inline properties. 61 | # 62 | # For further discussion and links to relevant documentation, see 63 | # https://github.com/hashicorp/terraform-aws-vault/issues/210 64 | ignore_changes = [load_balancers, target_group_arns] 65 | } 66 | } 67 | 68 | # --------------------------------------------------------------------------------------------------------------------- 69 | # CREATE LAUNCH CONFIGURATION TO DEFINE WHAT RUNS ON EACH INSTANCE IN THE ASG 70 | # --------------------------------------------------------------------------------------------------------------------- 71 | 72 | resource "aws_launch_configuration" "launch_configuration" { 73 | name_prefix = "${var.cluster_name}-" 74 | image_id = var.ami_id 75 | instance_type = var.instance_type 76 | user_data = var.user_data 77 | spot_price = var.spot_price 78 | 79 | iam_instance_profile = var.enable_iam_setup ? element( 80 | concat(aws_iam_instance_profile.instance_profile.*.name, [""]), 81 | 0, 82 | ) : var.iam_instance_profile_name 83 | key_name = var.ssh_key_name 84 | 85 | security_groups = concat( 86 | [aws_security_group.lc_security_group.id], 87 | var.additional_security_group_ids, 88 | ) 89 | placement_tenancy = var.tenancy 90 | associate_public_ip_address = var.associate_public_ip_address 91 | 92 | ebs_optimized = var.root_volume_ebs_optimized 93 | 94 | root_block_device { 95 | volume_type = var.root_volume_type 96 | volume_size = var.root_volume_size 97 | delete_on_termination = var.root_volume_delete_on_termination 98 | } 99 | 100 | # Important note: whenever using a launch configuration with an auto scaling group, you must set 101 | # create_before_destroy = true. However, as soon as you set create_before_destroy = true in one resource, you must 102 | # also set it in every resource that it depends on, or you'll get an error about cyclic dependencies (especially when 103 | # removing resources). For more info, see: 104 | # 105 | # https://www.terraform.io/docs/providers/aws/r/launch_configuration.html 106 | # https://terraform.io/docs/configuration/resources.html 107 | lifecycle { 108 | create_before_destroy = true 109 | } 110 | } 111 | 112 | # --------------------------------------------------------------------------------------------------------------------- 113 | # CREATE A SECURITY GROUP TO CONTROL WHAT REQUESTS CAN GO IN AND OUT OF EACH EC2 INSTANCE 114 | # --------------------------------------------------------------------------------------------------------------------- 115 | 116 | resource "aws_security_group" "lc_security_group" { 117 | name_prefix = var.cluster_name 118 | description = "Security group for the ${var.cluster_name} launch configuration" 119 | vpc_id = var.vpc_id 120 | 121 | # aws_launch_configuration.launch_configuration in this module sets create_before_destroy to true, which means 122 | # everything it depends on, including this resource, must set it as well, or you'll get cyclic dependency errors 123 | # when you try to do a terraform destroy. 124 | lifecycle { 125 | create_before_destroy = true 126 | } 127 | 128 | tags = merge( 129 | { 130 | "Name" = var.cluster_name 131 | }, 132 | var.security_group_tags, 133 | ) 134 | } 135 | 136 | resource "aws_security_group_rule" "allow_ssh_inbound" { 137 | count = length(var.allowed_ssh_cidr_blocks) >= 1 ? 1 : 0 138 | type = "ingress" 139 | from_port = var.ssh_port 140 | to_port = var.ssh_port 141 | protocol = "tcp" 142 | cidr_blocks = var.allowed_ssh_cidr_blocks 143 | 144 | security_group_id = aws_security_group.lc_security_group.id 145 | } 146 | 147 | resource "aws_security_group_rule" "allow_ssh_inbound_from_security_group_ids" { 148 | count = var.allowed_ssh_security_group_count 149 | type = "ingress" 150 | from_port = var.ssh_port 151 | to_port = var.ssh_port 152 | protocol = "tcp" 153 | source_security_group_id = element(var.allowed_ssh_security_group_ids, count.index) 154 | 155 | security_group_id = aws_security_group.lc_security_group.id 156 | } 157 | 158 | resource "aws_security_group_rule" "allow_all_outbound" { 159 | type = "egress" 160 | from_port = 0 161 | to_port = 0 162 | protocol = "-1" 163 | cidr_blocks = ["0.0.0.0/0"] 164 | 165 | security_group_id = aws_security_group.lc_security_group.id 166 | } 167 | 168 | # --------------------------------------------------------------------------------------------------------------------- 169 | # THE CONSUL-SPECIFIC INBOUND/OUTBOUND RULES COME FROM THE CONSUL-SECURITY-GROUP-RULES MODULE 170 | # --------------------------------------------------------------------------------------------------------------------- 171 | 172 | module "security_group_rules" { 173 | source = "../consul-security-group-rules" 174 | 175 | security_group_id = aws_security_group.lc_security_group.id 176 | allowed_inbound_cidr_blocks = var.allowed_inbound_cidr_blocks 177 | allowed_inbound_security_group_ids = var.allowed_inbound_security_group_ids 178 | allowed_inbound_security_group_count = var.allowed_inbound_security_group_count 179 | 180 | server_rpc_port = var.server_rpc_port 181 | cli_rpc_port = var.cli_rpc_port 182 | serf_lan_port = var.serf_lan_port 183 | serf_wan_port = var.serf_wan_port 184 | http_api_port = var.http_api_port 185 | dns_port = var.dns_port 186 | } 187 | 188 | # --------------------------------------------------------------------------------------------------------------------- 189 | # ATTACH AN IAM ROLE TO EACH EC2 INSTANCE 190 | # We can use the IAM role to grant the instance IAM permissions so we can use the AWS CLI without having to figure out 191 | # how to get our secret AWS access keys onto the box. 192 | # --------------------------------------------------------------------------------------------------------------------- 193 | 194 | resource "aws_iam_instance_profile" "instance_profile" { 195 | count = var.enable_iam_setup ? 1 : 0 196 | 197 | name_prefix = var.cluster_name 198 | path = var.instance_profile_path 199 | role = element(concat(aws_iam_role.instance_role.*.name, [""]), 0) 200 | 201 | # aws_launch_configuration.launch_configuration in this module sets create_before_destroy to true, which means 202 | # everything it depends on, including this resource, must set it as well, or you'll get cyclic dependency errors 203 | # when you try to do a terraform destroy. 204 | lifecycle { 205 | create_before_destroy = true 206 | } 207 | } 208 | 209 | resource "aws_iam_role" "instance_role" { 210 | count = var.enable_iam_setup ? 1 : 0 211 | 212 | name_prefix = var.cluster_name 213 | assume_role_policy = data.aws_iam_policy_document.instance_role.json 214 | 215 | # aws_iam_instance_profile.instance_profile in this module sets create_before_destroy to true, which means 216 | # everything it depends on, including this resource, must set it as well, or you'll get cyclic dependency errors 217 | # when you try to do a terraform destroy. 218 | lifecycle { 219 | create_before_destroy = true 220 | } 221 | } 222 | 223 | data "aws_iam_policy_document" "instance_role" { 224 | statement { 225 | effect = "Allow" 226 | actions = ["sts:AssumeRole"] 227 | 228 | principals { 229 | type = "Service" 230 | identifiers = ["ec2.amazonaws.com"] 231 | } 232 | } 233 | } 234 | 235 | # --------------------------------------------------------------------------------------------------------------------- 236 | # THE IAM POLICIES COME FROM THE CONSUL-IAM-POLICIES MODULE 237 | # --------------------------------------------------------------------------------------------------------------------- 238 | 239 | module "iam_policies" { 240 | source = "../consul-iam-policies" 241 | 242 | enabled = var.enable_iam_setup 243 | iam_role_id = element(concat(aws_iam_role.instance_role.*.id, [""]), 0) 244 | } 245 | 246 | -------------------------------------------------------------------------------- /modules/consul-cluster/outputs.tf: -------------------------------------------------------------------------------- 1 | output "asg_name" { 2 | value = aws_autoscaling_group.autoscaling_group.name 3 | description = "This is the name for the autoscaling group generated by the module" 4 | } 5 | 6 | output "cluster_size" { 7 | value = aws_autoscaling_group.autoscaling_group.desired_capacity 8 | description = "This is the desired size of the consul cluster in the autoscaling group" 9 | } 10 | 11 | output "launch_config_name" { 12 | value = aws_launch_configuration.launch_configuration.name 13 | description = "This is the name of the launch_configuration used to bootstrap the cluster instances" 14 | } 15 | 16 | output "iam_role_arn" { 17 | value = element(concat(aws_iam_role.instance_role.*.arn, [""]), 0) 18 | description = "This is the arn of instance role if enable_iam_setup variable is set to true" 19 | } 20 | 21 | output "iam_role_id" { 22 | value = element(concat(aws_iam_role.instance_role.*.id, [""]), 0) 23 | description = "This is the id of instance role if enable_iam_setup variable is set to true" 24 | } 25 | 26 | output "security_group_id" { 27 | value = aws_security_group.lc_security_group.id 28 | description = "This is the id of security group that governs ingress and egress for the cluster instances" 29 | } 30 | 31 | output "cluster_tag_key" { 32 | value = var.cluster_tag_key 33 | description = "This is the tag key used to allow the consul servers to autojoin" 34 | } 35 | 36 | output "cluster_tag_value" { 37 | value = var.cluster_tag_value 38 | description = "This is the tag value used to allow the consul servers to autojoin" 39 | } 40 | 41 | -------------------------------------------------------------------------------- /modules/consul-cluster/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRED PARAMETERS 3 | # You must provide a value for each of these parameters. 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "cluster_name" { 7 | description = "The name of the Consul cluster (e.g. consul-stage). This variable is used to namespace all resources created by this module." 8 | type = string 9 | } 10 | 11 | variable "ami_id" { 12 | description = "The ID of the AMI to run in this cluster. Should be an AMI that had Consul installed and configured by the install-consul module." 13 | type = string 14 | } 15 | 16 | variable "instance_type" { 17 | description = "The type of EC2 Instances to run for each node in the cluster (e.g. t2.micro)." 18 | type = string 19 | } 20 | 21 | variable "vpc_id" { 22 | description = "The ID of the VPC in which to deploy the Consul cluster" 23 | type = string 24 | } 25 | 26 | variable "allowed_inbound_cidr_blocks" { 27 | description = "A list of CIDR-formatted IP address ranges from which the EC2 Instances will allow connections to Consul" 28 | type = list(string) 29 | } 30 | 31 | variable "user_data" { 32 | description = "A User Data script to execute while the server is booting. We recommend passing in a bash script that executes the run-consul script, which should have been installed in the Consul AMI by the install-consul module." 33 | type = string 34 | } 35 | 36 | # --------------------------------------------------------------------------------------------------------------------- 37 | # OPTIONAL PARAMETERS 38 | # These parameters have reasonable defaults. 39 | # --------------------------------------------------------------------------------------------------------------------- 40 | 41 | variable "cluster_size" { 42 | description = "The number of nodes to have in the Consul cluster. We strongly recommended that you use either 3 or 5." 43 | type = number 44 | default = 3 45 | } 46 | 47 | variable "cluster_tag_key" { 48 | description = "Add a tag with this key and the value var.cluster_tag_value to each Instance in the ASG. This can be used to automatically find other Consul nodes and form a cluster." 49 | type = string 50 | default = "consul-servers" 51 | } 52 | 53 | variable "cluster_tag_value" { 54 | description = "Add a tag with key var.clsuter_tag_key and this value to each Instance in the ASG. This can be used to automatically find other Consul nodes and form a cluster." 55 | type = string 56 | default = "auto-join" 57 | } 58 | 59 | variable "subnet_ids" { 60 | description = "The subnet IDs into which the EC2 Instances should be deployed. We recommend one subnet ID per node in the cluster_size variable. At least one of var.subnet_ids or var.availability_zones must be non-empty." 61 | type = list(string) 62 | default = null 63 | } 64 | 65 | variable "availability_zones" { 66 | description = "The availability zones into which the EC2 Instances should be deployed. We recommend one availability zone per node in the cluster_size variable. At least one of var.subnet_ids or var.availability_zones must be non-empty." 67 | type = list(string) 68 | default = null 69 | } 70 | 71 | variable "ssh_key_name" { 72 | description = "The name of an EC2 Key Pair that can be used to SSH to the EC2 Instances in this cluster. Set to an empty string to not associate a Key Pair." 73 | type = string 74 | default = null 75 | } 76 | 77 | variable "allowed_ssh_cidr_blocks" { 78 | description = "A list of CIDR-formatted IP address ranges from which the EC2 Instances will allow SSH connections" 79 | type = list(string) 80 | default = [] 81 | } 82 | 83 | variable "allowed_ssh_security_group_ids" { 84 | description = "A list of security group IDs from which the EC2 Instances will allow SSH connections" 85 | type = list(string) 86 | default = [] 87 | } 88 | 89 | variable "allowed_ssh_security_group_count" { 90 | description = "The number of entries in var.allowed_ssh_security_group_ids. Ideally, this value could be computed dynamically, but we pass this variable to a Terraform resource's 'count' property and Terraform requires that 'count' be computed with literals or data sources only." 91 | type = number 92 | default = 0 93 | } 94 | 95 | variable "allowed_inbound_security_group_ids" { 96 | description = "A list of security group IDs that will be allowed to connect to Consul" 97 | type = list(string) 98 | default = [] 99 | } 100 | 101 | variable "allowed_inbound_security_group_count" { 102 | description = "The number of entries in var.allowed_inbound_security_group_ids. Ideally, this value could be computed dynamically, but we pass this variable to a Terraform resource's 'count' property and Terraform requires that 'count' be computed with literals or data sources only." 103 | type = number 104 | default = 0 105 | } 106 | 107 | variable "additional_security_group_ids" { 108 | description = "A list of additional security group IDs to add to Consul EC2 Instances" 109 | type = list(string) 110 | default = [] 111 | } 112 | 113 | variable "security_group_tags" { 114 | description = "Tags to be applied to the LC security group" 115 | type = map(string) 116 | default = {} 117 | } 118 | 119 | variable "termination_policies" { 120 | description = "A list of policies to decide how the instances in the auto scale group should be terminated. The allowed values are OldestInstance, NewestInstance, OldestLaunchConfiguration, ClosestToNextInstanceHour, Default." 121 | type = string 122 | default = "Default" 123 | } 124 | 125 | variable "associate_public_ip_address" { 126 | description = "If set to true, associate a public IP address with each EC2 Instance in the cluster." 127 | type = bool 128 | default = false 129 | } 130 | 131 | variable "spot_price" { 132 | description = "The maximum hourly price to pay for EC2 Spot Instances." 133 | type = number 134 | default = null 135 | } 136 | 137 | variable "tenancy" { 138 | description = "The tenancy of the instance. Must be one of: null, default or dedicated. For EC2 Spot Instances only null or dedicated can be used." 139 | type = string 140 | default = null 141 | } 142 | 143 | variable "root_volume_ebs_optimized" { 144 | description = "If true, the launched EC2 instance will be EBS-optimized." 145 | type = bool 146 | default = false 147 | } 148 | 149 | variable "root_volume_type" { 150 | description = "The type of volume. Must be one of: standard, gp2, or io1." 151 | type = string 152 | default = "standard" 153 | } 154 | 155 | variable "root_volume_size" { 156 | description = "The size, in GB, of the root EBS volume." 157 | type = number 158 | default = 50 159 | } 160 | 161 | variable "root_volume_delete_on_termination" { 162 | description = "Whether the volume should be destroyed on instance termination." 163 | type = bool 164 | default = true 165 | } 166 | 167 | variable "wait_for_capacity_timeout" { 168 | description = "A maximum duration that Terraform should wait for ASG instances to be healthy before timing out. Setting this to '0' causes Terraform to skip all Capacity Waiting behavior." 169 | type = string 170 | default = "10m" 171 | } 172 | 173 | variable "service_linked_role_arn" { 174 | description = "The ARN of the service-linked role that the ASG will use to call other AWS services" 175 | type = string 176 | default = null 177 | } 178 | 179 | variable "health_check_type" { 180 | description = "Controls how health checking is done. Must be one of EC2 or ELB." 181 | type = string 182 | default = "EC2" 183 | } 184 | 185 | variable "health_check_grace_period" { 186 | description = "Time, in seconds, after instance comes into service before checking health." 187 | type = number 188 | default = 300 189 | } 190 | 191 | variable "instance_profile_path" { 192 | description = "Path in which to create the IAM instance profile." 193 | type = string 194 | default = "/" 195 | } 196 | 197 | variable "server_rpc_port" { 198 | description = "The port used by servers to handle incoming requests from other agents." 199 | type = number 200 | default = 8300 201 | } 202 | 203 | variable "cli_rpc_port" { 204 | description = "The port used by all agents to handle RPC from the CLI." 205 | type = number 206 | default = 8400 207 | } 208 | 209 | variable "serf_lan_port" { 210 | description = "The port used to handle gossip in the LAN. Required by all agents." 211 | type = number 212 | default = 8301 213 | } 214 | 215 | variable "serf_wan_port" { 216 | description = "The port used by servers to gossip over the WAN to other servers." 217 | type = number 218 | default = 8302 219 | } 220 | 221 | variable "http_api_port" { 222 | description = "The port used by clients to talk to the HTTP API" 223 | type = number 224 | default = 8500 225 | } 226 | 227 | variable "dns_port" { 228 | description = "The port used to resolve DNS queries." 229 | type = number 230 | default = 8600 231 | } 232 | 233 | variable "ssh_port" { 234 | description = "The port used for SSH connections" 235 | type = number 236 | default = 22 237 | } 238 | 239 | variable "tags" { 240 | description = "List of extra tag blocks added to the autoscaling group configuration. Each element in the list is a map containing keys 'key', 'value', and 'propagate_at_launch' mapped to the respective values." 241 | type = list(object({ key : string, value : string, propagate_at_launch : bool })) 242 | default = [] 243 | } 244 | 245 | variable "enabled_metrics" { 246 | description = "List of autoscaling group metrics to enable." 247 | type = list(string) 248 | default = [] 249 | } 250 | 251 | variable "enable_iam_setup" { 252 | description = "If true, create the IAM Role, IAM Instance Profile, and IAM Policies. If false, these will not be created, and you can pass in your own IAM Instance Profile via var.iam_instance_profile_name." 253 | type = bool 254 | default = true 255 | } 256 | 257 | variable "iam_instance_profile_name" { 258 | description = "If enable_iam_setup is false then this will be the name of the IAM instance profile to attach" 259 | type = string 260 | default = null 261 | } 262 | 263 | variable "protect_from_scale_in" { 264 | description = "(Optional) Allows setting instance protection. The autoscaling group will not select instances with this setting for termination during scale in events." 265 | type = bool 266 | default = false 267 | } 268 | -------------------------------------------------------------------------------- /modules/consul-iam-policies/README.md: -------------------------------------------------------------------------------- 1 | # Consul IAM Policies 2 | 3 | This folder contains a [Terraform](https://www.terraform.io/) module that defines the IAM Policies used by a 4 | [Consul](https://www.consul.io/) cluster. 5 | 6 | Normally, you'd get these policies by default if you're using the [consul-cluster submodule](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/consul-cluster), 7 | but if you're running Consul on top of a different cluster (e.g. you're co-locating Consul with Nomad), then you can 8 | use this module to add the necessary IAM policies to that that cluster. For example, imagine you were using the 9 | [nomad-cluster module](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster) to run a 10 | cluster of servers that have both Nomad and Consul on each node: 11 | 12 | ```hcl 13 | module "nomad_servers" { 14 | source = "git::git@github.com:hashicorp/terraform-aws-nomad.git//modules/nomad-cluster?ref=v0.0.1" 15 | 16 | # This AMI has both Nomad and Consul installed 17 | ami_id = "ami-1234abcd" 18 | } 19 | ``` 20 | 21 | The `nomad-cluster` module will provide the IAM policies for Nomad, but not for Consul. To ensure those servers 22 | have the necessary IAM permissions to run Consul, you can use this module as follows: 23 | 24 | ```hcl 25 | module "iam_policies" { 26 | source = "git::git@github.com:hashicorp/terraform-aws-consul.git//modules/consul-iam-policies?ref=v0.0.2" 27 | 28 | iam_role_id = "${module.nomad_servers.iam_role_id}" 29 | 30 | # ... (other params omitted) ... 31 | } 32 | ``` 33 | 34 | Note the following parameters: 35 | 36 | * `source`: Use this parameter to specify the URL of this module. The double slash (`//`) is intentional 37 | and required. Terraform uses it to specify subfolders within a Git repo (see [module 38 | sources](https://www.terraform.io/docs/modules/sources.html)). The `ref` parameter specifies a specific Git tag in 39 | this repo. That way, instead of using the latest version of this module from the `master` branch, which 40 | will change every time you run Terraform, you're using a fixed version of the repo. 41 | 42 | * `iam_role_id`: Use this parameter to specify the ID of the IAM Role to which the rules in this module 43 | should be added. 44 | 45 | You can find the other parameters in [variables.tf](variables.tf). 46 | 47 | Check out the [consul-cluster example](https://github.com/hashicorp/terraform-aws-consul/tree/master/examples/root-example) for working sample code. 48 | -------------------------------------------------------------------------------- /modules/consul-iam-policies/main.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRE A SPECIFIC TERRAFORM VERSION OR HIGHER 3 | # --------------------------------------------------------------------------------------------------------------------- 4 | 5 | terraform { 6 | # This module is now only being tested with Terraform 0.13.x. However, to make upgrading easier, we are setting 7 | # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it 8 | # forwards compatible with 0.13.x code. 9 | required_version = ">= 0.12.26" 10 | } 11 | 12 | # --------------------------------------------------------------------------------------------------------------------- 13 | # ATTACH AN IAM POLICY THAT ALLOWS THE CONSUL NODES TO AUTOMATICALLY DISCOVER EACH OTHER AND FORM A CLUSTER 14 | # --------------------------------------------------------------------------------------------------------------------- 15 | 16 | resource "aws_iam_role_policy" "auto_discover_cluster" { 17 | count = var.enabled ? 1 : 0 18 | name = "auto-discover-cluster" 19 | role = var.iam_role_id 20 | policy = data.aws_iam_policy_document.auto_discover_cluster.json 21 | } 22 | 23 | data "aws_iam_policy_document" "auto_discover_cluster" { 24 | statement { 25 | effect = "Allow" 26 | 27 | actions = [ 28 | "ec2:DescribeInstances", 29 | "ec2:DescribeTags", 30 | "autoscaling:DescribeAutoScalingGroups", 31 | ] 32 | 33 | resources = ["*"] 34 | } 35 | } 36 | 37 | -------------------------------------------------------------------------------- /modules/consul-iam-policies/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRED PARAMETERS 3 | # You must provide a value for each of these parameters. 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "iam_role_id" { 7 | description = "The ID of the IAM Role to which these IAM policies should be attached" 8 | type = string 9 | } 10 | 11 | variable "enabled" { 12 | description = "Give the option to disable this module if required" 13 | type = bool 14 | default = true 15 | } 16 | 17 | -------------------------------------------------------------------------------- /modules/consul-security-group-rules/README.md: -------------------------------------------------------------------------------- 1 | # Consul Security Group Rules Module 2 | 3 | This folder contains a [Terraform](https://www.terraform.io/) module that defines the security group rules used by a 4 | [Consul](https://www.consul.io/) cluster to control the traffic that is allowed to go in and out of the cluster. 5 | 6 | Normally, you'd get these rules by default if you're using the [consul-cluster module](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/consul-cluster), but if 7 | you're running Consul on top of a different cluster, then you can use this module to add the necessary security group 8 | rules to that cluster. For example, imagine you were using the [nomad-cluster 9 | module](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster) to run a cluster of 10 | servers that have both Nomad and Consul on each node: 11 | 12 | ```hcl 13 | module "nomad_servers" { 14 | source = "git::git@github.com:hashicorp/terraform-aws-nomad.git//modules/nomad-cluster?ref=v0.0.1" 15 | 16 | # This AMI has both Nomad and Consul installed 17 | ami_id = "ami-1234abcd" 18 | } 19 | ``` 20 | 21 | The `nomad-cluster` module will provide the security group rules for Nomad, but not for Consul. To ensure those servers 22 | have the necessary ports open for using Consul, you can use this module as follows: 23 | 24 | ```hcl 25 | module "security_group_rules" { 26 | source = "git::git@github.com:hashicorp/terraform-aws-consul.git//modules/consul-security-group-rules?ref=v0.0.2" 27 | 28 | security_group_id = "${module.nomad_servers.security_group_id}" 29 | 30 | # ... (other params omitted) ... 31 | } 32 | ``` 33 | 34 | Note the following parameters: 35 | 36 | * `source`: Use this parameter to specify the URL of this module. The double slash (`//`) is intentional 37 | and required. Terraform uses it to specify subfolders within a Git repo (see [module 38 | sources](https://www.terraform.io/docs/modules/sources.html)). The `ref` parameter specifies a specific Git tag in 39 | this repo. That way, instead of using the latest version of this module from the `master` branch, which 40 | will change every time you run Terraform, you're using a fixed version of the repo. 41 | 42 | * `security_group_id`: Use this parameter to specify the ID of the security group to which the rules in this module 43 | should be added. 44 | 45 | You can find the other parameters in [variables.tf](variables.tf). 46 | 47 | Check out the [consul-cluster example](https://github.com/hashicorp/terraform-aws-consul/tree/master/examples/root-example) for working sample code. 48 | -------------------------------------------------------------------------------- /modules/consul-security-group-rules/main.tf: -------------------------------------------------------------------------------- 1 | # ---------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRE A SPECIFIC TERRAFORM VERSION OR HIGHER 3 | # ---------------------------------------------------------------------------------------------------------------------- 4 | terraform { 5 | # This module is now only being tested with Terraform 0.13.x. However, to make upgrading easier, we are setting 6 | # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it 7 | # forwards compatible with 0.13.x code. 8 | required_version = ">= 0.12.26" 9 | } 10 | 11 | # --------------------------------------------------------------------------------------------------------------------- 12 | # CREATE THE SECURITY GROUP RULES THAT CONTROL WHAT TRAFFIC CAN GO IN AND OUT OF A CONSUL CLUSTER 13 | # --------------------------------------------------------------------------------------------------------------------- 14 | 15 | resource "aws_security_group_rule" "allow_server_rpc_inbound" { 16 | count = length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0 17 | type = "ingress" 18 | from_port = var.server_rpc_port 19 | to_port = var.server_rpc_port 20 | protocol = "tcp" 21 | cidr_blocks = var.allowed_inbound_cidr_blocks 22 | 23 | security_group_id = var.security_group_id 24 | } 25 | 26 | resource "aws_security_group_rule" "allow_cli_rpc_inbound" { 27 | count = length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0 28 | type = "ingress" 29 | from_port = var.cli_rpc_port 30 | to_port = var.cli_rpc_port 31 | protocol = "tcp" 32 | cidr_blocks = var.allowed_inbound_cidr_blocks 33 | 34 | security_group_id = var.security_group_id 35 | } 36 | 37 | resource "aws_security_group_rule" "allow_serf_wan_tcp_inbound" { 38 | count = length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0 39 | type = "ingress" 40 | from_port = var.serf_wan_port 41 | to_port = var.serf_wan_port 42 | protocol = "tcp" 43 | cidr_blocks = var.allowed_inbound_cidr_blocks 44 | 45 | security_group_id = var.security_group_id 46 | } 47 | 48 | resource "aws_security_group_rule" "allow_serf_wan_udp_inbound" { 49 | count = length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0 50 | type = "ingress" 51 | from_port = var.serf_wan_port 52 | to_port = var.serf_wan_port 53 | protocol = "udp" 54 | cidr_blocks = var.allowed_inbound_cidr_blocks 55 | 56 | security_group_id = var.security_group_id 57 | } 58 | 59 | resource "aws_security_group_rule" "allow_http_api_inbound" { 60 | count = length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0 61 | type = "ingress" 62 | from_port = var.http_api_port 63 | to_port = var.http_api_port 64 | protocol = "tcp" 65 | cidr_blocks = var.allowed_inbound_cidr_blocks 66 | 67 | security_group_id = var.security_group_id 68 | } 69 | 70 | resource "aws_security_group_rule" "allow_dns_tcp_inbound" { 71 | count = length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0 72 | type = "ingress" 73 | from_port = var.dns_port 74 | to_port = var.dns_port 75 | protocol = "tcp" 76 | cidr_blocks = var.allowed_inbound_cidr_blocks 77 | 78 | security_group_id = var.security_group_id 79 | } 80 | 81 | resource "aws_security_group_rule" "allow_dns_udp_inbound" { 82 | count = length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0 83 | type = "ingress" 84 | from_port = var.dns_port 85 | to_port = var.dns_port 86 | protocol = "udp" 87 | cidr_blocks = var.allowed_inbound_cidr_blocks 88 | 89 | security_group_id = var.security_group_id 90 | } 91 | 92 | resource "aws_security_group_rule" "allow_server_rpc_inbound_from_security_group_ids" { 93 | count = var.allowed_inbound_security_group_count 94 | type = "ingress" 95 | from_port = var.server_rpc_port 96 | to_port = var.server_rpc_port 97 | protocol = "tcp" 98 | source_security_group_id = element(var.allowed_inbound_security_group_ids, count.index) 99 | 100 | security_group_id = var.security_group_id 101 | } 102 | 103 | resource "aws_security_group_rule" "allow_cli_rpc_inbound_from_security_group_ids" { 104 | count = var.allowed_inbound_security_group_count 105 | type = "ingress" 106 | from_port = var.cli_rpc_port 107 | to_port = var.cli_rpc_port 108 | protocol = "tcp" 109 | source_security_group_id = element(var.allowed_inbound_security_group_ids, count.index) 110 | 111 | security_group_id = var.security_group_id 112 | } 113 | 114 | resource "aws_security_group_rule" "allow_serf_wan_tcp_inbound_from_security_group_ids" { 115 | count = var.allowed_inbound_security_group_count 116 | type = "ingress" 117 | from_port = var.serf_wan_port 118 | to_port = var.serf_wan_port 119 | protocol = "tcp" 120 | source_security_group_id = element(var.allowed_inbound_security_group_ids, count.index) 121 | 122 | security_group_id = var.security_group_id 123 | } 124 | 125 | resource "aws_security_group_rule" "allow_serf_wan_udp_inbound_from_security_group_ids" { 126 | count = var.allowed_inbound_security_group_count 127 | type = "ingress" 128 | from_port = var.serf_wan_port 129 | to_port = var.serf_wan_port 130 | protocol = "udp" 131 | source_security_group_id = element(var.allowed_inbound_security_group_ids, count.index) 132 | 133 | security_group_id = var.security_group_id 134 | } 135 | 136 | resource "aws_security_group_rule" "allow_http_api_inbound_from_security_group_ids" { 137 | count = var.allowed_inbound_security_group_count 138 | type = "ingress" 139 | from_port = var.http_api_port 140 | to_port = var.http_api_port 141 | protocol = "tcp" 142 | source_security_group_id = element(var.allowed_inbound_security_group_ids, count.index) 143 | 144 | security_group_id = var.security_group_id 145 | } 146 | 147 | resource "aws_security_group_rule" "allow_dns_tcp_inbound_from_security_group_ids" { 148 | count = var.allowed_inbound_security_group_count 149 | type = "ingress" 150 | from_port = var.dns_port 151 | to_port = var.dns_port 152 | protocol = "tcp" 153 | source_security_group_id = element(var.allowed_inbound_security_group_ids, count.index) 154 | 155 | security_group_id = var.security_group_id 156 | } 157 | 158 | resource "aws_security_group_rule" "allow_dns_udp_inbound_from_security_group_ids" { 159 | count = var.allowed_inbound_security_group_count 160 | type = "ingress" 161 | from_port = var.dns_port 162 | to_port = var.dns_port 163 | protocol = "udp" 164 | source_security_group_id = element(var.allowed_inbound_security_group_ids, count.index) 165 | 166 | security_group_id = var.security_group_id 167 | } 168 | 169 | # Similar to the *_inbound_from_security_group_ids rules, allow inbound from ourself 170 | 171 | resource "aws_security_group_rule" "allow_server_rpc_inbound_from_self" { 172 | type = "ingress" 173 | from_port = var.server_rpc_port 174 | to_port = var.server_rpc_port 175 | protocol = "tcp" 176 | self = true 177 | 178 | security_group_id = var.security_group_id 179 | } 180 | 181 | resource "aws_security_group_rule" "allow_cli_rpc_inbound_from_self" { 182 | type = "ingress" 183 | from_port = var.cli_rpc_port 184 | to_port = var.cli_rpc_port 185 | protocol = "tcp" 186 | self = true 187 | 188 | security_group_id = var.security_group_id 189 | } 190 | 191 | resource "aws_security_group_rule" "allow_serf_wan_tcp_inbound_from_self" { 192 | type = "ingress" 193 | from_port = var.serf_wan_port 194 | to_port = var.serf_wan_port 195 | protocol = "tcp" 196 | self = true 197 | 198 | security_group_id = var.security_group_id 199 | } 200 | 201 | resource "aws_security_group_rule" "allow_serf_wan_udp_inbound_from_self" { 202 | type = "ingress" 203 | from_port = var.serf_wan_port 204 | to_port = var.serf_wan_port 205 | protocol = "udp" 206 | self = true 207 | 208 | security_group_id = var.security_group_id 209 | } 210 | 211 | resource "aws_security_group_rule" "allow_http_api_inbound_from_self" { 212 | type = "ingress" 213 | from_port = var.http_api_port 214 | to_port = var.http_api_port 215 | protocol = "tcp" 216 | self = true 217 | 218 | security_group_id = var.security_group_id 219 | } 220 | 221 | resource "aws_security_group_rule" "allow_dns_tcp_inbound_from_self" { 222 | type = "ingress" 223 | from_port = var.dns_port 224 | to_port = var.dns_port 225 | protocol = "tcp" 226 | self = true 227 | 228 | security_group_id = var.security_group_id 229 | } 230 | 231 | resource "aws_security_group_rule" "allow_dns_udp_inbound_from_self" { 232 | type = "ingress" 233 | from_port = var.dns_port 234 | to_port = var.dns_port 235 | protocol = "udp" 236 | self = true 237 | 238 | security_group_id = var.security_group_id 239 | } 240 | 241 | # --------------------------------------------------------------------------------------------------------------------- 242 | # THE CONSUL-CLIENT SPECIFIC INBOUND/OUTBOUND RULES COME FROM THE CONSUL-CLIENT-SECURITY-GROUP-RULES MODULE 243 | # --------------------------------------------------------------------------------------------------------------------- 244 | 245 | module "client_security_group_rules" { 246 | source = "../consul-client-security-group-rules" 247 | 248 | security_group_id = var.security_group_id 249 | allowed_inbound_cidr_blocks = var.allowed_inbound_cidr_blocks 250 | allowed_inbound_security_group_ids = var.allowed_inbound_security_group_ids 251 | allowed_inbound_security_group_count = var.allowed_inbound_security_group_count 252 | 253 | serf_lan_port = var.serf_lan_port 254 | } 255 | 256 | -------------------------------------------------------------------------------- /modules/consul-security-group-rules/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRED PARAMETERS 3 | # You must provide a value for each of these parameters. 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "security_group_id" { 7 | description = "The ID of the security group to which we should add the Consul security group rules" 8 | type = string 9 | } 10 | 11 | variable "allowed_inbound_cidr_blocks" { 12 | description = "A list of CIDR-formatted IP address ranges from which the EC2 Instances will allow connections to Consul" 13 | type = list(string) 14 | default = [] 15 | } 16 | 17 | # --------------------------------------------------------------------------------------------------------------------- 18 | # OPTIONAL PARAMETERS 19 | # These parameters have reasonable defaults. 20 | # --------------------------------------------------------------------------------------------------------------------- 21 | 22 | variable "allowed_inbound_security_group_ids" { 23 | description = "A list of security group IDs that will be allowed to connect to Consul" 24 | type = list(string) 25 | default = [] 26 | } 27 | 28 | variable "allowed_inbound_security_group_count" { 29 | description = "The number of entries in var.allowed_inbound_security_group_ids. Ideally, this value could be computed dynamically, but we pass this variable to a Terraform resource's 'count' property and Terraform requires that 'count' be computed with literals or data sources only." 30 | type = number 31 | default = 0 32 | } 33 | 34 | variable "server_rpc_port" { 35 | description = "The port used by servers to handle incoming requests from other agents." 36 | type = number 37 | default = 8300 38 | } 39 | 40 | variable "cli_rpc_port" { 41 | description = "The port used by all agents to handle RPC from the CLI." 42 | type = number 43 | default = 8400 44 | } 45 | 46 | variable "serf_lan_port" { 47 | description = "The port used to handle gossip in the LAN. Required by all agents." 48 | type = number 49 | default = 8301 50 | } 51 | 52 | variable "serf_wan_port" { 53 | description = "The port used by servers to gossip over the WAN to other servers." 54 | type = number 55 | default = 8302 56 | } 57 | 58 | variable "http_api_port" { 59 | description = "The port used by clients to talk to the HTTP API" 60 | type = number 61 | default = 8500 62 | } 63 | 64 | variable "dns_port" { 65 | description = "The port used to resolve DNS queries." 66 | type = number 67 | default = 8600 68 | } 69 | 70 | -------------------------------------------------------------------------------- /modules/install-consul/README.md: -------------------------------------------------------------------------------- 1 | # Consul Install Script 2 | 3 | This folder contains a script for installing Consul and its dependencies. Use this script along with the 4 | [run-consul script](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/run-consul) to create a Consul [Amazon Machine Image 5 | (AMI)](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) that can be deployed in 6 | [AWS](https://aws.amazon.com/) across an Auto Scaling Group using the [consul-cluster module](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/consul-cluster). 7 | 8 | This script has been tested on the following operating systems: 9 | 10 | * Ubuntu 16.04 11 | * Ubuntu 18.04 12 | * Amazon Linux 2 13 | 14 | There is a good chance it will work on other flavors of Debian, CentOS, and RHEL as well. 15 | 16 | 17 | 18 | ## Quick start 19 | 20 | 21 | 22 | To install Consul, use `git` to clone this repository at a specific tag (see the [releases page](../../../../releases) 23 | for all available tags) and run the `install-consul` script: 24 | 25 | ``` 26 | git clone --branch https://github.com/hashicorp/terraform-aws-consul.git 27 | terraform-aws-consul/modules/install-consul/install-consul --version 0.8.0 28 | ``` 29 | 30 | The `install-consul` script will install Consul, its dependencies, and the [run-consul script](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/run-consul). 31 | The `run-consul` script is also run when the server is booting to start Consul and configure it to automatically 32 | join other nodes to form a cluster. 33 | 34 | We recommend running the `install-consul` script as part of a [Packer](https://www.packer.io/) template to create a 35 | Consul [Amazon Machine Image (AMI)](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) (see the 36 | [consul-ami example](https://github.com/hashicorp/terraform-aws-consul/tree/master/examples/consul-ami) for a fully-working sample code). You can then deploy the AMI across an Auto 37 | Scaling Group using the [consul-cluster module](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/consul-cluster) (see the [consul-cluster 38 | example](https://github.com/hashicorp/terraform-aws-consul/tree/master/examples/root-example) for fully-working sample code). 39 | 40 | 41 | 42 | 43 | ## Command line Arguments 44 | 45 | The `install-consul` script accepts the following arguments: 46 | 47 | * `version VERSION`: Install Consul version VERSION. Optional if download-url is provided. 48 | * `download-url URL`: Install the Consul package hosted in this url. Optional if version is provided. 49 | * `path DIR`: Install Consul into folder DIR. Optional. 50 | * `user USER`: The install dirs will be owned by user USER. Optional. 51 | * `ca-file-path PATH`: Path to a PEM-encoded certificate authority used to encrypt and verify authenticity of client and server connections. Optional. 52 | * `cert-file-path PATH`: Path to a PEM-encoded certificate, which will be provided to clients or servers to verify the agent's authenticity. Optional. 53 | * `key-file-path PATH`: Path to a PEM-encoded private key, used with the certificate to verify the agent's authenticity. Optional. 54 | 55 | Example: 56 | 57 | ``` 58 | install-consul --version 1.2.2 59 | ``` 60 | 61 | 62 | 63 | ## How it works 64 | 65 | The `install-consul` script does the following: 66 | 67 | 1. [Creates a user and folders for Consul](#create-a-user-and-folders-for-consul) 68 | 1. [Installs Consul binaries and scripts](#install-consul-binaries-and-scripts) 69 | 1. [Installs provided TLS certificates](#install-tls-certificates) 70 | 1. [Follow-up tasks](#follow-up-tasks) 71 | 72 | 73 | ### Creates a user and folders for Consul 74 | 75 | Creates an OS user named `consul`. Creates the following folders, all owned by user `consul`: 76 | 77 | * `/opt/consul`: base directory for Consul data (configurable via the `--path` argument). 78 | * `/opt/consul/bin`: directory for Consul binaries. 79 | * `/opt/consul/data`: directory where the Consul agent can store state. 80 | * `/opt/consul/config`: directory where the Consul agent looks up configuration. 81 | * `/opt/consul/log`: directory where Consul will store log output. 82 | * `/opt/consul/tls`: directory where an optional server certificate and private key are copied if provided. 83 | * `/opt/consul/tls/ca`: directory where an optional CA certificate is copied if provided. 84 | 85 | 86 | ### Installs Consul binaries and scripts 87 | 88 | Installs the following: 89 | 90 | * `consul`: Either downloads the Consul zip file from the [downloads page](https://www.consul.io/downloads.html) (the version 91 | number is configurable via the `--version` argument), or a package hosted on a precise url configurable with `--dowload-url` 92 | (useful for installing Consul Enterprise, for example) and extracts the `consul` binary into `/opt/consul/bin`. Adds a 93 | symlink to the `consul` binary in `/usr/local/bin`. 94 | * `run-consul`: Copies the [run-consul script](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/run-consul) into `/opt/consul/bin`. 95 | 96 | ### Installs TLS certificates 97 | 98 | Copies the certificates/key provided by the `--ca-file-path`, `cert-file-path` and `key-file-path` to the Consul 99 | configuration directory. If provided, the CA file is copied to `/opt/consul/tls/ca` and the server certificate/key 100 | are copied to `/opt/consul/tls` (assuming the default config path of `/opt/consul`). The script also sets the 101 | required permissions and file ownership. 102 | 103 | ### Follow-up tasks 104 | 105 | After the `install-consul` script finishes running, you may wish to do the following: 106 | 107 | 1. If you have custom Consul config (`.json`) files, you may want to copy them into the config directory (default: 108 | `/opt/consul/config`). 109 | 1. If `/usr/local/bin` isn't already part of `PATH`, you should add it so you can run the `consul` command without 110 | specifying the full path. 111 | 112 | 113 | 114 | ## Dependencies 115 | 116 | The install script assumes that `systemd` is already installed. We use it as a cross-platform supervisor to ensure Consul is started 117 | whenever the system boots and restarted if the Consul process crashes. Additionally, it is used to store all logs which can be accessed 118 | using `journalctl`. 119 | 120 | 121 | 122 | ## Why use Git to install this code? 123 | 124 | We needed an easy way to install these scripts that satisfied a number of requirements, including working on a variety 125 | of operating systems and supported versioning. Our current solution is to use `git`, but this may change in the future. 126 | See [Package Managers](https://github.com/hashicorp/terraform-aws-consul/tree/master/_docs/package-managers.md) for a full discussion of the requirements, trade-offs, and why we 127 | picked `git`. 128 | -------------------------------------------------------------------------------- /modules/install-consul/install-consul: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script can be used to install Consul and its dependencies. This script has been tested with the following 3 | # operating systems: 4 | # 5 | # 1. Ubuntu 16.04 6 | # 1. Ubuntu 18.04 7 | # 1. Amazon Linux 2 8 | 9 | set -e 10 | 11 | readonly DEFAULT_INSTALL_PATH="/opt/consul" 12 | readonly DEFAULT_CONSUL_USER="consul" 13 | readonly DOWNLOAD_PACKAGE_PATH="/tmp/consul.zip" 14 | 15 | readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 16 | readonly SYSTEM_BIN_DIR="/usr/local/bin" 17 | 18 | readonly SCRIPT_NAME="$(basename "$0")" 19 | 20 | function print_usage { 21 | echo 22 | echo "Usage: install-consul [OPTIONS]" 23 | echo 24 | echo "This script can be used to install Consul and its dependencies. This script has been tested with Ubuntu 16.04 and Amazon Linux 2." 25 | echo 26 | echo "Options:" 27 | echo 28 | echo -e " --version\t\tThe version of Consul to install. Optional if download-url is provided." 29 | echo -e " --download-url\t\tUrl to exact Consul package to be installed. Optional if version is provided." 30 | echo -e " --path\t\tThe path where Consul should be installed. Optional. Default: $DEFAULT_INSTALL_PATH." 31 | echo -e " --user\t\tThe user who will own the Consul install directories. Optional. Default: $DEFAULT_CONSUL_USER." 32 | echo -e " --ca-file-path\t\tPath to a PEM-encoded certificate authority used to encrypt and verify authenticity of client and server connections. Will be installed under /tls/ca." 33 | echo -e " --cert-file-path\t\tPath to a PEM-encoded certificate, which will be provided to clients or servers to verify the agent's authenticity. Will be installed under /tls. Must be provided along with --key-file-path." 34 | echo -e " --key-file-path\t\tPath to a PEM-encoded private key, used with the certificate to verify the agent's authenticity. Will be installed under /tls. Must be provided along with --cert-file-path" 35 | echo 36 | echo "Example:" 37 | echo 38 | echo " install-consul --version 1.2.2" 39 | } 40 | 41 | function log { 42 | local -r level="$1" 43 | local -r message="$2" 44 | local -r timestamp=$(date +"%Y-%m-%d %H:%M:%S") 45 | >&2 echo -e "${timestamp} [${level}] [$SCRIPT_NAME] ${message}" 46 | } 47 | 48 | function log_info { 49 | local -r message="$1" 50 | log "INFO" "$message" 51 | } 52 | 53 | function log_warn { 54 | local -r message="$1" 55 | log "WARN" "$message" 56 | } 57 | 58 | function log_error { 59 | local -r message="$1" 60 | log "ERROR" "$message" 61 | } 62 | 63 | function assert_not_empty { 64 | local -r arg_name="$1" 65 | local -r arg_value="$2" 66 | 67 | if [[ -z "$arg_value" ]]; then 68 | log_error "The value for '$arg_name' cannot be empty" 69 | print_usage 70 | exit 1 71 | fi 72 | } 73 | 74 | function assert_either_or { 75 | local -r arg1_name="$1" 76 | local -r arg1_value="$2" 77 | local -r arg2_name="$3" 78 | local -r arg2_value="$4" 79 | 80 | if [[ -z "$arg1_value" && -z "$arg2_value" ]]; then 81 | log_error "Either the value for '$arg1_name' or '$arg2_name' must be passed, both cannot be empty" 82 | print_usage 83 | exit 1 84 | fi 85 | } 86 | 87 | # A retry function that attempts to run a command a number of times and returns the output 88 | function retry { 89 | local -r cmd="$1" 90 | local -r description="$2" 91 | 92 | for i in $(seq 1 5); do 93 | log_info "$description" 94 | 95 | # The boolean operations with the exit status are there to temporarily circumvent the "set -e" at the 96 | # beginning of this script which exits the script immediatelly for error status while not losing the exit status code 97 | output=$(eval "$cmd") && exit_status=0 || exit_status=$? 98 | log_info "$output" 99 | if [[ $exit_status -eq 0 ]]; then 100 | echo "$output" 101 | return 102 | fi 103 | log_warn "$description failed. Will sleep for 10 seconds and try again." 104 | sleep 10 105 | done; 106 | 107 | log_error "$description failed after 5 attempts." 108 | exit $exit_status 109 | } 110 | 111 | function has_yum { 112 | [ -n "$(command -v yum)" ] 113 | } 114 | 115 | function has_apt_get { 116 | [ -n "$(command -v apt-get)" ] 117 | } 118 | 119 | function install_dependencies { 120 | log_info "Installing dependencies" 121 | 122 | if has_apt_get; then 123 | sudo apt-get update -y 124 | sudo apt-get install -y awscli curl unzip jq 125 | elif has_yum; then 126 | sudo yum update -y 127 | sudo yum install -y aws curl unzip jq 128 | else 129 | log_error "Could not find apt-get or yum. Cannot install dependencies on this OS." 130 | exit 1 131 | fi 132 | } 133 | 134 | function user_exists { 135 | local -r username="$1" 136 | id "$username" >/dev/null 2>&1 137 | } 138 | 139 | function create_consul_user { 140 | local -r username="$1" 141 | 142 | if user_exists "$username"; then 143 | echo "User $username already exists. Will not create again." 144 | else 145 | log_info "Creating user named $username" 146 | sudo useradd "$username" 147 | fi 148 | } 149 | 150 | function create_consul_install_paths { 151 | local -r path="$1" 152 | local -r username="$2" 153 | 154 | log_info "Creating install dirs for Consul at $path" 155 | sudo mkdir -p "$path" 156 | sudo mkdir -p "$path/bin" 157 | sudo mkdir -p "$path/config" 158 | sudo mkdir -p "$path/data" 159 | sudo mkdir -p "$path/tls/ca" 160 | 161 | log_info "Changing ownership of $path to $username" 162 | sudo chown -R "$username:$username" "$path" 163 | } 164 | 165 | function fetch_binary { 166 | local -r version="$1" 167 | local download_url="$2" 168 | 169 | if [[ -z "$download_url" && -n "$version" ]]; then 170 | download_url="https://releases.hashicorp.com/consul/${version}/consul_${version}_linux_amd64.zip" 171 | fi 172 | 173 | retry \ 174 | "curl -o '$DOWNLOAD_PACKAGE_PATH' '$download_url' --location --silent --fail --show-error" \ 175 | "Downloading Consul to $DOWNLOAD_PACKAGE_PATH" 176 | } 177 | 178 | function install_binary { 179 | local -r install_path="$1" 180 | local -r username="$2" 181 | 182 | local -r bin_dir="$install_path/bin" 183 | local -r consul_dest_path="$bin_dir/consul" 184 | local -r run_consul_dest_path="$bin_dir/run-consul" 185 | 186 | unzip -d /tmp "$DOWNLOAD_PACKAGE_PATH" 187 | 188 | log_info "Moving Consul binary to $consul_dest_path" 189 | sudo mv "/tmp/consul" "$consul_dest_path" 190 | sudo chown "$username:$username" "$consul_dest_path" 191 | sudo chmod a+x "$consul_dest_path" 192 | 193 | local -r symlink_path="$SYSTEM_BIN_DIR/consul" 194 | if [[ -f "$symlink_path" ]]; then 195 | log_info "Symlink $symlink_path already exists. Will not add again." 196 | else 197 | log_info "Adding symlink to $consul_dest_path in $symlink_path" 198 | sudo ln -s "$consul_dest_path" "$symlink_path" 199 | fi 200 | 201 | log_info "Copying Consul run script to $run_consul_dest_path" 202 | sudo cp "$SCRIPT_DIR/../run-consul/run-consul" "$run_consul_dest_path" 203 | sudo chown "$username:$username" "$run_consul_dest_path" 204 | sudo chmod a+x "$run_consul_dest_path" 205 | } 206 | 207 | function install_tls_certificates { 208 | local -r path="$1" 209 | local -r user="$2" 210 | local -r ca_file_path="$3" 211 | local -r cert_file_path="$4" 212 | local -r key_file_path="$5" 213 | 214 | local -r consul_tls_certs_path="$path/tls" 215 | local -r ca_certs_path="$consul_tls_certs_path/ca" 216 | 217 | log_info "Moving TLS certs to $consul_tls_certs_path and $ca_certs_path" 218 | 219 | sudo mkdir -p "$ca_certs_path" 220 | sudo mv "$ca_file_path" "$ca_certs_path/" 221 | sudo mv "$cert_file_path" "$consul_tls_certs_path/" 222 | sudo mv "$key_file_path" "$consul_tls_certs_path/" 223 | 224 | sudo chown -R "$user:$user" "$consul_tls_certs_path/" 225 | sudo find "$consul_tls_certs_path/" -type f -exec chmod u=r,g=,o= {} \; 226 | } 227 | 228 | function install { 229 | local version="" 230 | local download_url="" 231 | local path="$DEFAULT_INSTALL_PATH" 232 | local user="$DEFAULT_CONSUL_USER" 233 | local ca_file_path="" 234 | local cert_file_path="" 235 | local key_file_path="" 236 | 237 | while [[ $# -gt 0 ]]; do 238 | local key="$1" 239 | 240 | case "$key" in 241 | --version) 242 | version="$2" 243 | shift 244 | ;; 245 | --download-url) 246 | download_url="$2" 247 | shift 248 | ;; 249 | --path) 250 | path="$2" 251 | shift 252 | ;; 253 | --user) 254 | user="$2" 255 | shift 256 | ;; 257 | --ca-file-path) 258 | assert_not_empty "$key" "$2" 259 | ca_file_path="$2" 260 | shift 261 | ;; 262 | --cert-file-path) 263 | assert_not_empty "$key" "$2" 264 | cert_file_path="$2" 265 | shift 266 | ;; 267 | --key-file-path) 268 | assert_not_empty "$key" "$2" 269 | key_file_path="$2" 270 | shift 271 | ;; 272 | --help) 273 | print_usage 274 | exit 275 | ;; 276 | *) 277 | log_error "Unrecognized argument: $key" 278 | print_usage 279 | exit 1 280 | ;; 281 | esac 282 | 283 | shift 284 | done 285 | 286 | assert_either_or "--version" "$version" "--download-url" "$download_url" 287 | assert_not_empty "--path" "$path" 288 | assert_not_empty "--user" "$user" 289 | 290 | log_info "Starting Consul install" 291 | 292 | install_dependencies 293 | create_consul_user "$user" 294 | create_consul_install_paths "$path" "$user" 295 | 296 | fetch_binary "$version" "$download_url" 297 | install_binary "$path" "$user" 298 | 299 | if [[ -n "$ca_file_path" || -n "$cert_file_path" || -n "$key_file_path" ]]; then 300 | install_tls_certificates "$path" "$user" "$ca_file_path" "$cert_file_path" "$key_file_path" 301 | fi 302 | 303 | if command -v consul; then 304 | log_info "Consul install complete!"; 305 | else 306 | log_info "Could not find consul command. Aborting."; 307 | exit 1; 308 | fi 309 | } 310 | 311 | install "$@" 312 | -------------------------------------------------------------------------------- /modules/install-dnsmasq/README.md: -------------------------------------------------------------------------------- 1 | # Dnsmasq Install Script 2 | 3 | This folder contains a script for installing [Dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html) and configuring 4 | it to forward requests for a specific domain to Consul. This way, you can easily use Consul as your DNS server for 5 | domain names such as `foo.service.consul`, where `foo` is a service registered with Consul (see the [Registering 6 | Services docs](https://www.consul.io/intro/getting-started/services.html) for instructions on registering your services 7 | with Consul). All other domain names will continue to be resolved via the default resolver on your OS. See the [Consul 8 | DNS Forwarding Guide](https://www.consul.io/docs/guides/forwarding.html) for more info, including trade-offs between using this module and [systemd-resolved](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/setup-systemd-resolved) for DNS forwarding. 9 | 10 | 11 | This script has been tested on the following operating systems: 12 | 13 | * Ubuntu 16.04 14 | * Amazon Linux 2 15 | 16 | There is a good chance it will work on other flavors of Debian, CentOS, and RHEL as well. 17 | 18 | 19 | 20 | ## Quick start 21 | 22 | To install Dnsmasq, use `git` to clone this repository at a specific tag (see the [releases page](../../../../releases) 23 | for all available tags) and run the `install-dnsmasq` script: 24 | 25 | ``` 26 | git clone --branch https://github.com/hashicorp/terraform-aws-consul.git 27 | terraform-aws-consul/modules/install-dnsmasq/install-dnsmasq 28 | ``` 29 | 30 | Note: by default, the `install-dnsmasq` script assumes that a Consul agent is already running locally and connected to 31 | a Consul cluster. After the install completes, restart `dnsmasq` (e.g. `sudo /etc/init.d/dnsmasq restart`) and queries 32 | to the `.consul` domain will be resolved via Consul: 33 | 34 | ``` 35 | dig foo.service.consul 36 | ``` 37 | 38 | We recommend running the `install-dnsmasq` script as part of a [Packer](https://www.packer.io/) template to create an 39 | [Amazon Machine Image (AMI)](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) (see the 40 | [consul-ami example](https://github.com/hashicorp/terraform-aws-consul/tree/master/examples/consul-ami) for sample code). 41 | 42 | 43 | 44 | 45 | ## Command line Arguments 46 | 47 | The `install-dnsmasq` script accepts the following arguments: 48 | 49 | * `consul-domain DOMAIN`: The domain name to point to Consul. Optional. Default: `consul`. 50 | * `consul-ip IP`: The IP address to use for Consul. Optional. Default: `127.0.0.1`. This assumes a Consul agent is 51 | running locally and connected to a Consul cluster. 52 | * `consul-dns-port PORT`: The port Consul uses for DNS requests. Optional. Default: `8600`. 53 | * `dnsmasq-listen-address IP`: The IP address for dnsmasq to listen on. Optional. Defaults to the value of `consul-ip`. 54 | Make sure that the network interface you provide for the IP has already been configured before you pass it to dnsmasq. 55 | 56 | Example: 57 | 58 | ``` 59 | install-dnsmasq 60 | ``` 61 | 62 | 63 | 64 | 65 | ## Troubleshooting 66 | 67 | Add the `+trace` argument to `dig` commands to more clearly see what's going on: 68 | 69 | ``` 70 | dig vault.service.consul +trace 71 | ``` 72 | -------------------------------------------------------------------------------- /modules/install-dnsmasq/install-dnsmasq: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Install Dnsmasq and configure it to forward requests for a specific domain to Consul. This script has been tested 3 | # with the following operating systems: 4 | # 5 | # 1. Ubuntu 16.04 6 | # 2. Amazon Linux 2 7 | 8 | set -e 9 | 10 | readonly DEFAULT_CONSUL_DOMAIN="consul" 11 | readonly DEFAULT_CONSUL_IP="127.0.0.1" 12 | readonly DEFAULT_CONSUL_DNS_PORT=8600 13 | readonly DEFAULT_DNSMASQ_LISTEN_ADDRESS="" 14 | 15 | readonly DNS_MASQ_CONFIG_DIR="/etc/dnsmasq.d" 16 | readonly CONSUL_DNS_MASQ_CONFIG_FILE="$DNS_MASQ_CONFIG_DIR/10-consul" 17 | 18 | readonly SCRIPT_NAME="$(basename "$0")" 19 | 20 | function print_usage { 21 | echo 22 | echo "Usage: install-dnsmasq [OPTIONS]" 23 | echo 24 | echo "Install Dnsmasq and configure it to forward requests for a specific domain to Consul. This script has been tested with Ubuntu 16.04 and Amazon Linux 2." 25 | echo 26 | echo "Options:" 27 | echo 28 | echo -e " --consul-domain\tThe domain name to point to Consul. Optional. Default: $DEFAULT_CONSUL_DOMAIN." 29 | echo -e " --consul-ip\t\tThe IP address to use for Consul. Optional. Default: $DEFAULT_CONSUL_IP." 30 | echo -e " --consul-dns-port\tThe port Consul uses for DNS. Optional. Default: $DEFAULT_CONSUL_DNS_PORT." 31 | echo -e " --dnsmasq-listen-address\t\tThe IP address for dnsmasq to listen on. Optional. Defaults to the value of --consul-ip." 32 | echo 33 | echo "Example:" 34 | echo 35 | echo " install-dnsmasq" 36 | } 37 | 38 | function log { 39 | local -r level="$1" 40 | local -r message="$2" 41 | local -r timestamp=$(date +"%Y-%m-%d %H:%M:%S") 42 | >&2 echo -e "${timestamp} [${level}] [$SCRIPT_NAME] ${message}" 43 | } 44 | 45 | function log_info { 46 | local -r message="$1" 47 | log "INFO" "$message" 48 | } 49 | 50 | function log_warn { 51 | local -r message="$1" 52 | log "WARN" "$message" 53 | } 54 | 55 | function log_error { 56 | local -r message="$1" 57 | log "ERROR" "$message" 58 | } 59 | 60 | function assert_not_empty { 61 | local -r arg_name="$1" 62 | local -r arg_value="$2" 63 | 64 | if [[ -z "$arg_value" ]]; then 65 | log_error "The value for '$arg_name' cannot be empty" 66 | print_usage 67 | exit 1 68 | fi 69 | } 70 | 71 | function has_yum { 72 | [[ -n "$(command -v yum)" ]] 73 | } 74 | 75 | function has_apt_get { 76 | [[ -n "$(command -v apt-get)" ]] 77 | } 78 | 79 | function install_dnsmasq { 80 | local -r consul_ip="$1" 81 | 82 | log_info "Installing Dnsmasq" 83 | 84 | if has_apt_get; then 85 | sudo apt-get update -y 86 | sudo apt-get install -y dnsmasq 87 | elif has_yum; then 88 | sudo yum update -y 89 | sudo yum install -y dnsmasq 90 | echo "prepend domain-name-servers $consul_ip;" | sudo tee -a "/etc/dhcp/dhclient.conf" > /dev/null 91 | echo "conf-dir=$DNS_MASQ_CONFIG_DIR" | sudo tee -a "/etc/dnsmasq.conf" > /dev/null 92 | sudo chkconfig dnsmasq on 93 | else 94 | log_error "Could not find apt-get or yum. Cannot install on this OS." 95 | exit 1 96 | fi 97 | } 98 | 99 | function write_consul_config { 100 | local -r consul_domain="$1" 101 | local -r consul_ip="$2" 102 | local -r consul_port="$3" 103 | local -r dnsmasq_listen_address="$4" 104 | 105 | log_info "Configuring Dnsmasq to forward lookups of the '$consul_domain' domain on '$dnsmasq_listen_address' to $consul_ip:$consul_port in $CONSUL_DNS_MASQ_CONFIG_FILE" 106 | mkdir -p "$DNS_MASQ_CONFIG_DIR" 107 | 108 | sudo tee "$CONSUL_DNS_MASQ_CONFIG_FILE" < https://github.com/hashicorp/terraform-aws-consul.git 22 | terraform-aws-consul/modules/setup-systemd-resolved/setup-systemd-resolved 23 | ``` 24 | 25 | Note: by default, the `setup-systemd-resolved` script assumes that a Consul agent is already running locally and connected to 26 | a Consul cluster. After the install completes, restart `systemd-resolved` (e.g. `sudo systemctl restart systemd-resolved.service`) and queries 27 | to the `.consul` domain will be resolved via Consul: 28 | 29 | ``` 30 | dig foo.service.consul 31 | ``` 32 | 33 | We recommend running the `setup-systemd-resolved` script as part of a [Packer](https://www.packer.io/) template to create an 34 | [Amazon Machine Image (AMI)](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) (see the 35 | [consul-ami example](https://github.com/hashicorp/terraform-aws-consul/tree/master/examples/consul-ami) for sample code). 36 | 37 | 38 | 39 | 40 | ## Command line Arguments 41 | 42 | The `setup-systemd-resolved` script accepts the following arguments: 43 | 44 | * `consul-domain DOMAIN`: The domain name to point to Consul. Optional. Default: `consul`. 45 | * `consul-ip IP`: The IP address to use for Consul. Optional. Default: `127.0.0.1`. This assumes a Consul agent is 46 | running locally and connected to a Consul cluster. 47 | * `consul-dns-port PORT`: The port Consul uses for DNS requests. Optional. Default: `8600`. 48 | 49 | Example: 50 | 51 | ``` 52 | setup-systemd-resolved 53 | ``` 54 | 55 | 56 | 57 | 58 | ## Troubleshooting 59 | 60 | Add the `+trace` argument to `dig` commands to more clearly see what's going on: 61 | 62 | ``` 63 | dig vault.service.consul +trace 64 | ``` 65 | -------------------------------------------------------------------------------- /modules/setup-systemd-resolved/setup-systemd-resolved: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Configure systemd-resolved it to forward requests for a specific domain to Consul. This script has been tested 3 | # with the following operating systems: 4 | # 5 | # 1. Ubuntu 18.04 6 | # See https://learn.hashicorp.com/consul/security-networking/forwarding#systemd-resolved-setup for more details 7 | # Github Issue: https://github.com/hashicorp/consul/issues/4155 8 | 9 | set -e 10 | 11 | readonly DEFAULT_CONSUL_DOMAIN="consul" 12 | readonly DEFAULT_CONSUL_IP="127.0.0.1" 13 | readonly DEFAULT_CONSUL_DNS_PORT=8600 14 | 15 | readonly SYSTEMD_RESVOLDED_CONFIG_FILE="/etc/systemd/resolved.conf" 16 | 17 | readonly SCRIPT_NAME="$(basename "$0")" 18 | 19 | function print_usage { 20 | echo 21 | echo "Usage: setup-systemd-resolved [OPTIONS]" 22 | echo 23 | echo "Configure systemd-resolved to forward requests for a specific domain to Consul. This script has been tested with Ubuntu 18.04." 24 | echo 25 | echo "Options:" 26 | echo 27 | echo -e " --consul-domain\tThe domain name to point to Consul. Optional. Default: $DEFAULT_CONSUL_DOMAIN." 28 | echo -e " --consul-ip\t\tThe IP address to use for Consul. Optional. Default: $DEFAULT_CONSUL_IP." 29 | echo -e " --consul-dns-port\tThe port Consul uses for DNS. Optional. Default: $DEFAULT_CONSUL_DNS_PORT." 30 | echo 31 | echo "Example:" 32 | echo 33 | echo " setup-systemd-resolved" 34 | } 35 | 36 | function log { 37 | local -r level="$1" 38 | local -r message="$2" 39 | local -r timestamp=$(date +"%Y-%m-%d %H:%M:%S") 40 | >&2 echo -e "${timestamp} [${level}] [$SCRIPT_NAME] ${message}" 41 | } 42 | 43 | function log_info { 44 | local -r message="$1" 45 | log "INFO" "$message" 46 | } 47 | 48 | function log_warn { 49 | local -r message="$1" 50 | log "WARN" "$message" 51 | } 52 | 53 | function log_error { 54 | local -r message="$1" 55 | log "ERROR" "$message" 56 | } 57 | 58 | function assert_not_empty { 59 | local -r arg_name="$1" 60 | local -r arg_value="$2" 61 | 62 | if [[ -z "$arg_value" ]]; then 63 | log_error "The value for '$arg_name' cannot be empty" 64 | print_usage 65 | exit 1 66 | fi 67 | } 68 | 69 | function install_dependencies { 70 | local -r consul_ip="$1" 71 | 72 | log_info "Installing dependencies" 73 | sudo apt-get update -y 74 | echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections 75 | echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections 76 | sudo apt-get install -y iptables-persistent 77 | } 78 | 79 | function configure_systemd_resolved { 80 | local -r consul_domain="$1" 81 | local -r consul_ip="$2" 82 | local -r consul_port="$3" 83 | 84 | UBUNTU_VERSION=`lsb_release -s -r` 85 | if [ "$UBUNTU_VERSION" == "18.04" ]; then 86 | log_info "Configuring systemd-resolved to forward lookups of the '$consul_domain' domain to $consul_ip:$consul_port in $CONSUL_DNS_MASQ_CONFIG_FILE" 87 | 88 | sudo iptables -t nat -A OUTPUT -d localhost -p udp -m udp --dport 53 -j REDIRECT --to-ports $consul_port 89 | sudo iptables -t nat -A OUTPUT -d localhost -p tcp -m tcp --dport 53 -j REDIRECT --to-ports $consul_port 90 | sudo iptables-save | sudo tee /etc/iptables/rules.v4 91 | sudo ip6tables-save | sudo tee /etc/iptables/rules.v6 92 | sudo sed -i "s/#DNS=/DNS=${consul_ip}/g" "$SYSTEMD_RESVOLDED_CONFIG_FILE" 93 | sudo sed -i "s/#Domains=/Domains=~${consul_domain}/g" "$SYSTEMD_RESVOLDED_CONFIG_FILE" 94 | else 95 | log_error "Cannot install on this OS." 96 | exit 1 97 | fi 98 | } 99 | 100 | function install { 101 | local consul_domain="$DEFAULT_CONSUL_DOMAIN" 102 | local consul_ip="$DEFAULT_CONSUL_IP" 103 | local consul_dns_port="$DEFAULT_CONSUL_DNS_PORT" 104 | 105 | while [[ $# > 0 ]]; do 106 | local key="$1" 107 | 108 | case "$key" in 109 | --consul-domain) 110 | assert_not_empty "$key" "$2" 111 | consul_domain="$2" 112 | shift 113 | ;; 114 | --consul-ip) 115 | assert_not_empty "$key" "$2" 116 | consul_ip="$2" 117 | shift 118 | ;; 119 | --consul-dns-port) 120 | assert_not_empty "$key" "$2" 121 | consul_dns_port="$2" 122 | shift 123 | ;; 124 | --help) 125 | print_usage 126 | exit 127 | ;; 128 | *) 129 | log_error "Unrecognized argument: $key" 130 | print_usage 131 | exit 1 132 | ;; 133 | esac 134 | 135 | shift 136 | done 137 | 138 | log_info "Configuring systemd-resolved" 139 | install_dependencies 140 | configure_systemd_resolved "$consul_domain" "$consul_ip" "$consul_dns_port" 141 | log_info "systemd-resolved configured!" 142 | } 143 | 144 | install "$@" 145 | -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | output "num_servers" { 2 | value = module.consul_servers.cluster_size 3 | } 4 | 5 | output "asg_name_servers" { 6 | value = module.consul_servers.asg_name 7 | } 8 | 9 | output "launch_config_name_servers" { 10 | value = module.consul_servers.launch_config_name 11 | } 12 | 13 | output "iam_role_arn_servers" { 14 | value = module.consul_servers.iam_role_arn 15 | } 16 | 17 | output "iam_role_id_servers" { 18 | value = module.consul_servers.iam_role_id 19 | } 20 | 21 | output "security_group_id_servers" { 22 | value = module.consul_servers.security_group_id 23 | } 24 | 25 | output "num_clients" { 26 | value = module.consul_clients.cluster_size 27 | } 28 | 29 | output "asg_name_clients" { 30 | value = module.consul_clients.asg_name 31 | } 32 | 33 | output "launch_config_name_clients" { 34 | value = module.consul_clients.launch_config_name 35 | } 36 | 37 | output "iam_role_arn_clients" { 38 | value = module.consul_clients.iam_role_arn 39 | } 40 | 41 | output "iam_role_id_clients" { 42 | value = module.consul_clients.iam_role_id 43 | } 44 | 45 | output "security_group_id_clients" { 46 | value = module.consul_clients.security_group_id 47 | } 48 | 49 | output "aws_region" { 50 | value = data.aws_region.current.name 51 | } 52 | 53 | output "consul_servers_cluster_tag_key" { 54 | value = module.consul_servers.cluster_tag_key 55 | } 56 | 57 | output "consul_servers_cluster_tag_value" { 58 | value = module.consul_servers.cluster_tag_value 59 | } 60 | 61 | -------------------------------------------------------------------------------- /test/README.md: -------------------------------------------------------------------------------- 1 | # Tests 2 | 3 | This folder contains automated tests for this Module. All of the tests are written in [Go](https://golang.org/). 4 | Most of these are "integration tests" that deploy real infrastructure using Terraform and verify that infrastructure 5 | works as expected using a helper library called [Terratest](https://github.com/gruntwork-io/terratest). 6 | 7 | 8 | 9 | ## WARNING WARNING WARNING 10 | 11 | **Note #1**: Many of these tests create real resources in an AWS account and then try to clean those resources up at 12 | the end of a test run. That means these tests may cost you money to run! When adding tests, please be considerate of 13 | the resources you create and take extra care to clean everything up when you're done! 14 | 15 | **Note #2**: Never forcefully shut the tests down (e.g. by hitting `CTRL + C`) or the cleanup tasks won't run! 16 | 17 | **Note #3**: We set `-timeout 60m` on all tests not because they necessarily take that long, but because Go has a 18 | default test timeout of 10 minutes, after which it forcefully kills the tests with a `SIGQUIT`, preventing the cleanup 19 | tasks from running. Therefore, we set an overlying long timeout to make sure all tests have enough time to finish and 20 | clean up. 21 | 22 | 23 | 24 | ## Running the tests 25 | 26 | ### Prerequisites 27 | 28 | - Install the latest version of [Go](https://golang.org/). 29 | - Install [dep](https://github.com/golang/dep) for Go dependency management. 30 | - Install [Terraform](https://www.terraform.io/downloads.html). 31 | - Configure your AWS credentials using one of the [options supported by the AWS 32 | SDK](http://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html). Usually, the easiest option is to 33 | set the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables. 34 | 35 | 36 | ### One-time setup 37 | 38 | Download Go dependencies using dep: 39 | 40 | ``` 41 | cd test 42 | dep ensure 43 | ``` 44 | 45 | 46 | ### Run all the tests 47 | 48 | ```bash 49 | cd test 50 | go test -v -timeout 60m 51 | ``` 52 | 53 | 54 | ### Run a specific test 55 | 56 | To run a specific test called `TestFoo`: 57 | 58 | ```bash 59 | cd test 60 | go test -v -timeout 60m -run TestFoo 61 | ``` 62 | 63 | 64 | -------------------------------------------------------------------------------- /test/aws_helpers.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "github.com/gruntwork-io/terratest/modules/aws" 5 | "testing" 6 | ) 7 | 8 | // Get the IP address from a randomly chosen EC2 Instance in an Auto Scaling Group of the given name in the given 9 | // region 10 | func getIpAddressOfAsgInstance(t *testing.T, asgName string, awsRegion string) string { 11 | instanceIds := aws.GetInstanceIdsForAsg(t, asgName, awsRegion) 12 | 13 | if len(instanceIds) == 0 { 14 | t.Fatalf("Could not find any instances in ASG %s in %s", asgName, awsRegion) 15 | } 16 | 17 | return aws.GetPublicIpOfEc2Instance(t, instanceIds[0], awsRegion) 18 | } 19 | -------------------------------------------------------------------------------- /test/consul_cluster_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | // Test the example in the root folder 8 | func TestConsulClusterWithUbuntu16Ami(t *testing.T) { 9 | t.Parallel() 10 | runConsulClusterTest(t, "ubuntu16-ami", ".", "../examples/consul-ami/consul.json", "ubuntu", "") 11 | } 12 | 13 | // Test the example in the root folder 14 | func TestConsulClusterWithUbuntu18Ami(t *testing.T) { 15 | t.Parallel() 16 | runConsulClusterTest(t, "ubuntu18-ami", ".", "../examples/consul-ami/consul.json", "ubuntu", "") 17 | } 18 | 19 | // Test the example in the root folder 20 | func TestConsulClusterWithAmazonLinuxAmi(t *testing.T) { 21 | t.Parallel() 22 | runConsulClusterTest(t, "amazon-linux-2-ami", ".", "../examples/consul-ami/consul.json", "ec2-user", "") 23 | } 24 | -------------------------------------------------------------------------------- /test/consul_cluster_with_custom_asg_role_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/gruntwork-io/terratest/modules/random" 7 | ) 8 | 9 | func TestConsulClusterWithCustomASGRoleUbuntu16Ami(t *testing.T) { 10 | t.Parallel() 11 | terraformVars := map[string]interface{}{ 12 | "consul_service_linked_role_suffix": random.UniqueId(), 13 | } 14 | runConsulClusterTestWithVars(t, "ubuntu16-ami", "examples/example-with-custom-asg-role", "../examples/consul-ami/consul.json", "ubuntu", terraformVars, "") 15 | } 16 | 17 | func TestConsulClusterWithCustomASGRoleUbuntu18Ami(t *testing.T) { 18 | t.Parallel() 19 | terraformVars := map[string]interface{}{ 20 | "consul_service_linked_role_suffix": random.UniqueId(), 21 | } 22 | runConsulClusterTestWithVars(t, "ubuntu18-ami", "examples/example-with-custom-asg-role", "../examples/consul-ami/consul.json", "ubuntu", terraformVars, "") 23 | } 24 | 25 | func TestConsulClusterWithCustomASGRoleAmazonLinuxAmi(t *testing.T) { 26 | t.Parallel() 27 | terraformVars := map[string]interface{}{ 28 | "consul_service_linked_role_suffix": random.UniqueId(), 29 | } 30 | runConsulClusterTestWithVars(t, "amazon-linux-2-ami", "examples/example-with-custom-asg-role", "../examples/consul-ami/consul.json", "ec2-user", terraformVars, "") 31 | } 32 | -------------------------------------------------------------------------------- /test/consul_cluster_with_encryption_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import "testing" 4 | 5 | func TestConsulClusterWithEncryptionUbuntu16Ami(t *testing.T) { 6 | t.Parallel() 7 | runConsulClusterTest(t, "ubuntu16-ami", "examples/example-with-encryption", "../examples/example-with-encryption/packer/consul-with-certs.json", "ubuntu", "") 8 | } 9 | 10 | func TestConsulClusterWithEncryptionUbuntu18Ami(t *testing.T) { 11 | t.Parallel() 12 | runConsulClusterTest(t, "ubuntu18-ami", "examples/example-with-encryption", "../examples/example-with-encryption/packer/consul-with-certs.json", "ubuntu", "") 13 | } 14 | 15 | func TestConsulClusterWithEncryptionAmazonLinuxAmi(t *testing.T) { 16 | t.Parallel() 17 | runConsulClusterTest(t, "amazon-linux-2-ami", "examples/example-with-encryption", "../examples/example-with-encryption/packer/consul-with-certs.json", "ec2-user", "") 18 | } 19 | -------------------------------------------------------------------------------- /test/consul_enterprise_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | ) 7 | 8 | // Test the example in the root folder 9 | func TestConsulInstallFromURLWithUbuntu16Ami(t *testing.T) { 10 | t.Parallel() 11 | runConsulClusterTest(t, "ubuntu16-ami", ".", "../examples/consul-ami/consul.json", "ubuntu", getUrlFromEnv(t)) 12 | } 13 | 14 | func TestConsulInstallFromURLWithUbuntu18Ami(t *testing.T) { 15 | t.Parallel() 16 | runConsulClusterTest(t, "ubuntu18-ami", ".", "../examples/consul-ami/consul.json", "ubuntu", getUrlFromEnv(t)) 17 | } 18 | 19 | func TestConsulInstallFromURLWithAmazonLinuxAmi(t *testing.T) { 20 | t.Parallel() 21 | runConsulClusterTest(t, "amazon-linux-2-ami", ".", "../examples/consul-ami/consul.json", "ec2-user", getUrlFromEnv(t)) 22 | } 23 | 24 | // To test this on circle ci you need a url set as an environment variable, CONSUL_AMI_TEMPLATE_VAR_DOWNLOAD_URL 25 | // which you would also have to set locally if you want to run this test locally. 26 | func getUrlFromEnv(t *testing.T) string { 27 | url := os.Getenv("CONSUL_AMI_TEMPLATE_VAR_DOWNLOAD_URL") 28 | if url == "" { 29 | t.Fatalf("Please set the environment variable CONSUL_AMI_TEMPLATE_VAR_DOWNLOAD_URL.\n") 30 | } 31 | return url 32 | } 33 | -------------------------------------------------------------------------------- /test/consul_helpers.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "strings" 7 | "testing" 8 | "time" 9 | 10 | "github.com/gruntwork-io/terratest/modules/aws" 11 | "github.com/gruntwork-io/terratest/modules/logger" 12 | "github.com/gruntwork-io/terratest/modules/random" 13 | "github.com/gruntwork-io/terratest/modules/retry" 14 | "github.com/gruntwork-io/terratest/modules/ssh" 15 | "github.com/gruntwork-io/terratest/modules/terraform" 16 | test_structure "github.com/gruntwork-io/terratest/modules/test-structure" 17 | "github.com/hashicorp/consul/api" 18 | ) 19 | 20 | const REPO_ROOT = "../" 21 | const CONSUL_CLUSTER_EXAMPLE_VAR_AMI_ID = "ami_id" 22 | const CONSUL_CLUSTER_EXAMPLE_VAR_SSH_KEY_NAME = "ssh_key_name" 23 | const CONSUL_CLUSTER_EXAMPLE_VAR_CLUSTER_NAME = "cluster_name" 24 | const CONSUL_CLUSTER_EXAMPLE_VAR_NUM_SERVERS = "num_servers" 25 | const CONSUL_CLUSTER_EXAMPLE_VAR_NUM_CLIENTS = "num_clients" 26 | 27 | const CONSUL_CLUSTER_EXAMPLE_DEFAULT_NUM_SERVERS = 3 28 | const CONSUL_CLUSTER_EXAMPLE_DEFAULT_NUM_CLIENTS = 6 29 | 30 | const CONSUL_CLUSTER_EXAMPLE_OUTPUT_SERVER_ASG_NAME = "asg_name_servers" 31 | const CONSUL_CLUSTER_EXAMPLE_OUTPUT_CLIENT_ASG_NAME = "asg_name_clients" 32 | 33 | const SAVED_AWS_REGION = "AwsRegion" 34 | 35 | const AWS_DEFAULT_REGION_ENV_VAR = "AWS_DEFAULT_REGION" 36 | 37 | // Test the consul-cluster example by: 38 | // 39 | // 1. Copying the code in this repo to a temp folder so tests on the Terraform code can run in parallel without the 40 | // state files overwriting each other. 41 | // 2. Building the AMI in the consul-ami example with the given build name 42 | // 3. Deploying that AMI using the consul-cluster Terraform code 43 | // 4. Checking that the Consul cluster comes up within a reasonable time period and can respond to requests 44 | func runConsulClusterTest(t *testing.T, packerBuildName string, examplesFolder string, packerTemplatePath string, sshUser string, enterpriseUrl string) { 45 | runConsulClusterTestWithVars(t, 46 | packerBuildName, 47 | examplesFolder, 48 | packerTemplatePath, 49 | sshUser, 50 | map[string]interface{}{}, 51 | enterpriseUrl) 52 | } 53 | 54 | func runConsulClusterTestWithVars(t *testing.T, packerBuildName string, examplesFolder string, packerTemplatePath string, sshUser string, terraformVarsMerge map[string]interface{}, enterpriseUrl string) { 55 | // Uncomment any of the following to skip that section during the test 56 | //os.Setenv("SKIP_setup_ami", "true") 57 | //os.Setenv("SKIP_deploy", "true") 58 | //os.Setenv("SKIP_validate", "true") 59 | //os.Setenv("SKIP_teardown", "true") 60 | 61 | exampleFolder := test_structure.CopyTerraformFolderToTemp(t, REPO_ROOT, examplesFolder) 62 | 63 | test_structure.RunTestStage(t, "setup_ami", func() { 64 | awsRegion := aws.GetRandomRegion(t, nil, []string{"eu-north-1"}) 65 | test_structure.SaveString(t, exampleFolder, SAVED_AWS_REGION, awsRegion) 66 | 67 | amiId := buildAmi(t, packerTemplatePath, packerBuildName, awsRegion, enterpriseUrl) 68 | test_structure.SaveAmiId(t, exampleFolder, amiId) 69 | }) 70 | 71 | defer test_structure.RunTestStage(t, "teardown", func() { 72 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleFolder) 73 | terraform.Destroy(t, terraformOptions) 74 | 75 | keyPair := test_structure.LoadEc2KeyPair(t, exampleFolder) 76 | aws.DeleteEC2KeyPair(t, keyPair) 77 | 78 | amiId := test_structure.LoadAmiId(t, exampleFolder) 79 | awsRegion := test_structure.LoadString(t, exampleFolder, SAVED_AWS_REGION) 80 | aws.DeleteAmi(t, awsRegion, amiId) 81 | }) 82 | 83 | test_structure.RunTestStage(t, "deploy", func() { 84 | uniqueId := random.UniqueId() 85 | awsRegion := test_structure.LoadString(t, exampleFolder, SAVED_AWS_REGION) 86 | amiId := test_structure.LoadAmiId(t, exampleFolder) 87 | 88 | keyPair := aws.CreateAndImportEC2KeyPair(t, awsRegion, uniqueId) 89 | test_structure.SaveEc2KeyPair(t, exampleFolder, keyPair) 90 | 91 | terraformVars := map[string]interface{}{ 92 | CONSUL_CLUSTER_EXAMPLE_VAR_CLUSTER_NAME: uniqueId, 93 | CONSUL_CLUSTER_EXAMPLE_VAR_NUM_SERVERS: CONSUL_CLUSTER_EXAMPLE_DEFAULT_NUM_SERVERS, 94 | CONSUL_CLUSTER_EXAMPLE_VAR_NUM_CLIENTS: CONSUL_CLUSTER_EXAMPLE_DEFAULT_NUM_CLIENTS, 95 | CONSUL_CLUSTER_EXAMPLE_VAR_AMI_ID: amiId, 96 | CONSUL_CLUSTER_EXAMPLE_VAR_SSH_KEY_NAME: keyPair.Name, 97 | } 98 | 99 | for k, v := range terraformVarsMerge { 100 | terraformVars[k] = v 101 | } 102 | 103 | terraformOptions := &terraform.Options{ 104 | TerraformDir: exampleFolder, 105 | Vars: terraformVars, 106 | EnvVars: map[string]string{ 107 | AWS_DEFAULT_REGION_ENV_VAR: awsRegion, 108 | }, 109 | } 110 | test_structure.SaveTerraformOptions(t, exampleFolder, terraformOptions) 111 | 112 | terraform.InitAndApply(t, terraformOptions) 113 | }) 114 | 115 | test_structure.RunTestStage(t, "validate", func() { 116 | awsRegion := test_structure.LoadString(t, exampleFolder, SAVED_AWS_REGION) 117 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleFolder) 118 | keyPair := test_structure.LoadEc2KeyPair(t, exampleFolder) 119 | 120 | if len(enterpriseUrl) > 0 { 121 | checkEnterpriseInstall(t, CONSUL_CLUSTER_EXAMPLE_OUTPUT_SERVER_ASG_NAME, terraformOptions, awsRegion, sshUser, keyPair) 122 | } 123 | 124 | // Check the Consul servers 125 | checkConsulClusterIsWorking(t, CONSUL_CLUSTER_EXAMPLE_OUTPUT_SERVER_ASG_NAME, terraformOptions, awsRegion) 126 | 127 | // Check the Consul clients 128 | checkConsulClusterIsWorking(t, CONSUL_CLUSTER_EXAMPLE_OUTPUT_CLIENT_ASG_NAME, terraformOptions, awsRegion) 129 | }) 130 | } 131 | 132 | // Check that the Consul cluster comes up within a reasonable time period and can respond to requests 133 | func checkConsulClusterIsWorking(t *testing.T, asgNameOutputVar string, terratestOptions *terraform.Options, awsRegion string) { 134 | asgName := terraform.OutputRequired(t, terratestOptions, asgNameOutputVar) 135 | nodeIpAddress := getIpAddressOfAsgInstance(t, asgName, awsRegion) 136 | testConsulCluster(t, nodeIpAddress) 137 | } 138 | 139 | // Use a Consul client to connect to the given node and use it to verify that: 140 | // 141 | // 1. The Consul cluster has deployed 142 | // 2. The cluster has the expected number of members 143 | // 3. The cluster has elected a leader 144 | func testConsulCluster(t *testing.T, nodeIpAddress string) { 145 | consulClient := createConsulClient(t, nodeIpAddress) 146 | maxRetries := 60 147 | sleepBetweenRetries := 10 * time.Second 148 | expectedMembers := CONSUL_CLUSTER_EXAMPLE_DEFAULT_NUM_CLIENTS + CONSUL_CLUSTER_EXAMPLE_DEFAULT_NUM_SERVERS 149 | 150 | leader := retry.DoWithRetry(t, "Check Consul members", maxRetries, sleepBetweenRetries, func() (string, error) { 151 | members, err := consulClient.Agent().Members(false) 152 | if err != nil { 153 | return "", err 154 | } 155 | 156 | if len(members) != expectedMembers { 157 | return "", fmt.Errorf("Expected the cluster to have %d members, but found %d", expectedMembers, len(members)) 158 | } 159 | 160 | leader, err := consulClient.Status().Leader() 161 | if err != nil { 162 | return "", err 163 | } 164 | 165 | if leader == "" { 166 | return "", errors.New("Consul cluster returned an empty leader response, so a leader must not have been elected yet.") 167 | } 168 | 169 | return leader, nil 170 | }) 171 | 172 | logger.Logf(t, "Consul cluster is properly deployed and has elected leader %s", leader) 173 | } 174 | 175 | // Create a Consul client 176 | func createConsulClient(t *testing.T, ipAddress string) *api.Client { 177 | config := api.DefaultConfig() 178 | config.Address = fmt.Sprintf("%s:8500", ipAddress) 179 | 180 | client, err := api.NewClient(config) 181 | if err != nil { 182 | t.Fatalf("Failed to create Consul client due to error: %v", err) 183 | } 184 | 185 | config.HttpClient.Timeout = 5 * time.Second 186 | 187 | return client 188 | } 189 | 190 | func checkEnterpriseInstall(t *testing.T, asgNameOutputVar string, terratestOptions *terraform.Options, awsRegion string, sshUser string, keyPair *aws.Ec2Keypair) { 191 | asgName := terraform.OutputRequired(t, terratestOptions, asgNameOutputVar) 192 | nodeIpAddress := getIpAddressOfAsgInstance(t, asgName, awsRegion) 193 | 194 | host := ssh.Host{ 195 | Hostname: nodeIpAddress, 196 | SshUserName: sshUser, 197 | SshKeyPair: keyPair.KeyPair, 198 | } 199 | 200 | maxRetries := 10 201 | sleepBetweenRetries := 10 * time.Second 202 | 203 | output := retry.DoWithRetry(t, "Check Enterprise Install", maxRetries, sleepBetweenRetries, func() (string, error) { 204 | out, err := ssh.CheckSshCommandE(t, host, "consul --help") 205 | if err != nil { 206 | return "", fmt.Errorf("Error running consul command: %s\n", err) 207 | } 208 | 209 | return out, nil 210 | }) 211 | 212 | if !strings.Contains(output, "license") { 213 | t.Fatalf("This consul package is not the enterprise version.\n") 214 | } 215 | } 216 | -------------------------------------------------------------------------------- /test/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/hashicorp/terraform-aws-consul/test 2 | 3 | go 1.14 4 | 5 | require ( 6 | github.com/gruntwork-io/terratest v0.27.2 7 | github.com/hashicorp/consul/api v1.4.0 8 | ) 9 | -------------------------------------------------------------------------------- /test/terratest_helpers.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/gruntwork-io/terratest/modules/packer" 8 | ) 9 | 10 | const CONSUL_AMI_TEMPLATE_VAR_REGION = "aws_region" 11 | const CONSUL_AMI_TEMPLATE_VAR_DOWNLOAD_URL = "CONSUL_DOWNLOAD_URL" 12 | 13 | // Use Packer to build the AMI in the given packer template, with the given build name, and return the AMI's ID 14 | func buildAmi(t *testing.T, packerTemplatePath string, packerBuildName string, awsRegion string, downloadUrl string) string { 15 | options := &packer.Options{ 16 | Template: packerTemplatePath, 17 | Only: packerBuildName, 18 | Vars: map[string]string{ 19 | CONSUL_AMI_TEMPLATE_VAR_REGION: awsRegion, 20 | }, 21 | Env: map[string]string{ 22 | CONSUL_AMI_TEMPLATE_VAR_DOWNLOAD_URL: downloadUrl, 23 | }, 24 | RetryableErrors: map[string]string{ 25 | "Error waiting for AMI: Failed with ResourceNotReady error": "https://www.packer.io/docs/builders/amazon.html#resourcenotready-error", 26 | }, 27 | MaxRetries: 3, 28 | TimeBetweenRetries: 10 * time.Second, 29 | } 30 | 31 | return packer.BuildAmi(t, options) 32 | } 33 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # ENVIRONMENT VARIABLES 3 | # Define these secrets as environment variables 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | # AWS_ACCESS_KEY_ID 7 | # AWS_SECRET_ACCESS_KEY 8 | # AWS_DEFAULT_REGION 9 | 10 | # --------------------------------------------------------------------------------------------------------------------- 11 | # OPTIONAL PARAMETERS 12 | # These parameters have reasonable defaults. 13 | # --------------------------------------------------------------------------------------------------------------------- 14 | 15 | variable "ami_id" { 16 | description = "The ID of the AMI to run in the cluster. This should be an AMI built from the Packer template under examples/consul-ami/consul.json. To keep this example simple, we run the same AMI on both server and client nodes, but in real-world usage, your client nodes would also run your apps. If the default value is used, Terraform will look up the latest AMI build automatically." 17 | type = string 18 | default = null 19 | } 20 | 21 | variable "cluster_name" { 22 | description = "What to name the Consul cluster and all of its associated resources" 23 | type = string 24 | default = "consul-example" 25 | } 26 | 27 | variable "num_servers" { 28 | description = "The number of Consul server nodes to deploy. We strongly recommend using 3 or 5." 29 | type = number 30 | default = 3 31 | } 32 | 33 | variable "num_clients" { 34 | description = "The number of Consul client nodes to deploy. You typically run the Consul client alongside your apps, so set this value to however many Instances make sense for your app code." 35 | type = number 36 | default = 6 37 | } 38 | 39 | variable "cluster_tag_key" { 40 | description = "The tag the EC2 Instances will look for to automatically discover each other and form a cluster." 41 | type = string 42 | default = "consul-servers" 43 | } 44 | 45 | variable "ssh_key_name" { 46 | description = "The name of an EC2 Key Pair that can be used to SSH to the EC2 Instances in this cluster. Set to an empty string to not associate a Key Pair." 47 | type = string 48 | default = null 49 | } 50 | 51 | variable "vpc_id" { 52 | description = "The ID of the VPC in which the nodes will be deployed. Uses default VPC if not supplied." 53 | type = string 54 | default = null 55 | } 56 | 57 | variable "spot_price" { 58 | description = "The maximum hourly price to pay for EC2 Spot Instances." 59 | type = number 60 | default = null 61 | } 62 | 63 | variable "owners" { 64 | description = "The maximum hourly price to pay for EC2 Spot Instances." 65 | type = list 66 | default = ["178520105998"] 67 | } 68 | 69 | --------------------------------------------------------------------------------