├── .circleci └── config.yml ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── pull_request_template.md ├── .gitignore ├── .pre-commit-config.yaml ├── CODEOWNERS ├── CONTRIBUTING.md ├── GRUNTWORK_PHILOSOPHY.md ├── LICENSE.txt ├── NOTICE ├── README.md ├── _docs ├── cloud-sql-icon.png ├── cloud-sql.png ├── mysql.png └── postgresql.png ├── examples ├── client-certificate │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── mysql-private-ip │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── mysql-public-ip │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── mysql-replicas │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── postgres-private-ip │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── postgres-public-ip │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ └── variables.tf └── postgres-replicas │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── main.tf ├── modules └── cloud-sql │ ├── README-MySQL.md │ ├── README-PostgreSQL.md │ ├── README.md │ ├── compute_outputs.tf │ ├── core-concepts.md │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── outputs.tf ├── test ├── README.md ├── example_mysql_private_ip_test.go ├── example_mysql_public_ip_test.go ├── example_mysql_replicas_test.go ├── example_postgres_private_ip_test.go ├── example_postgres_public_ip_test.go ├── example_postgres_replicas_test.go ├── go.mod ├── go.sum ├── test_util.go └── validation │ └── validate_all_modules_and_examples_test.go └── variables.tf /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | 3 | defaults: &defaults 4 | machine: 5 | image: ubuntu-2004:202104-01 6 | 7 | env: &env 8 | environment: 9 | GRUNTWORK_INSTALLER_VERSION: v0.0.30 10 | TERRATEST_LOG_PARSER_VERSION: v0.30.4 11 | MODULE_CI_VERSION: v0.38.4 12 | TERRAFORM_VERSION: 1.0.3 13 | TERRAGRUNT_VERSION: NONE 14 | PACKER_VERSION: NONE 15 | GOLANG_VERSION: 1.16 16 | GO111MODULE: auto 17 | 18 | jobs: 19 | precommit: 20 | <<: *env 21 | docker: 22 | - image: circleci/python:3.8.1 23 | steps: 24 | - checkout 25 | 26 | - run: 27 | name: install dependencies 28 | command: | 29 | curl -Ls https://raw.githubusercontent.com/gruntwork-io/gruntwork-installer/master/bootstrap-gruntwork-installer.sh | bash /dev/stdin --version "${GRUNTWORK_INSTALLER_VERSION}" 30 | gruntwork-install --module-name "gruntwork-module-circleci-helpers" --repo "https://github.com/gruntwork-io/terraform-aws-ci" --tag "${MODULE_CI_VERSION}" 31 | configure-environment-for-gruntwork-module \ 32 | --terraform-version ${TERRAFORM_VERSION} \ 33 | --terragrunt-version NONE \ 34 | --packer-version NONE \ 35 | --go-version ${GOLANG_VERSION} 36 | # Fail the build if the pre-commit hooks don't pass. Note: if you run pre-commit install locally, these hooks will 37 | # execute automatically every time before you commit, ensuring the build never fails at this step! 38 | - run: 39 | command: | 40 | pip install pre-commit==1.21.0 cfgv==2.0.1 zipp==1.1.0 yapf 41 | go get golang.org/x/tools/cmd/goimports 42 | export GOPATH=~/go/bin && export PATH=$PATH:$GOPATH 43 | pre-commit install 44 | pre-commit run --all-files 45 | 46 | test: 47 | <<: *defaults 48 | <<: *env 49 | steps: 50 | - checkout 51 | - run: &install_gruntwork_tooling 52 | name: install gruntwork tooling 53 | command: | 54 | sudo apt-get -y update 55 | curl -Ls https://raw.githubusercontent.com/gruntwork-io/gruntwork-installer/master/bootstrap-gruntwork-installer.sh | bash /dev/stdin --version "${GRUNTWORK_INSTALLER_VERSION}" 56 | gruntwork-install --module-name "gruntwork-module-circleci-helpers" --repo "https://github.com/gruntwork-io/terraform-aws-ci" --tag "${MODULE_CI_VERSION}" 57 | gruntwork-install --module-name "git-helpers" --repo "https://github.com/gruntwork-io/terraform-aws-ci" --tag "${MODULE_CI_VERSION}" 58 | gruntwork-install --binary-name "terratest_log_parser" --repo "https://github.com/gruntwork-io/terratest" --tag "${TERRATEST_LOG_PARSER_VERSION}" 59 | configure-environment-for-gruntwork-module --go-src-path ./test --terraform-version ${TERRAFORM_VERSION} --terragrunt-version ${TERRAGRUNT_VERSION} --packer-version ${PACKER_VERSION} --go-version ${GOLANG_VERSION} 60 | 61 | - run: 62 | name: run tests 63 | command: | 64 | # required for gcloud to authenticate correctly 65 | echo $GCLOUD_SERVICE_KEY | gcloud auth activate-service-account --key-file=- 66 | gcloud --quiet config set project ${GOOGLE_PROJECT_ID} 67 | gcloud --quiet config set compute/zone ${GOOGLE_COMPUTE_ZONE} 68 | # required for terraform and terratest to authenticate correctly 69 | echo $GCLOUD_SERVICE_KEY > /tmp/gcloud.json 70 | export GOOGLE_APPLICATION_CREDENTIALS="/tmp/gcloud.json" 71 | # run the tests 72 | mkdir -p /tmp/logs 73 | run-go-tests --path test --timeout 2h | tee /tmp/logs/all.log 74 | no_output_timeout: 1h 75 | 76 | - run: 77 | name: parse test output 78 | command: terratest_log_parser --testlog /tmp/logs/all.log --outputdir /tmp/logs 79 | when: always 80 | 81 | - store_artifacts: 82 | path: /tmp/logs 83 | - store_test_results: 84 | path: /tmp/logs 85 | 86 | workflows: 87 | version: 2 88 | test: 89 | jobs: 90 | - precommit: 91 | context: 92 | - GCP__automated-tests 93 | - GITHUB__PAT__gruntwork-ci 94 | filters: 95 | tags: 96 | only: /^v.*/ 97 | - test: 98 | context: 99 | - GCP__automated-tests 100 | - GITHUB__PAT__gruntwork-ci 101 | requires: 102 | - precommit 103 | filters: 104 | tags: 105 | only: /^v.*/ 106 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a bug report to help us improve. 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | 14 | 15 | **Describe the bug** 16 | A clear and concise description of what the bug is. 17 | 18 | **To Reproduce** 19 | Steps to reproduce the behavior including the relevant Terraform/Terragrunt/Packer version number and any code snippets and module inputs you used. 20 | 21 | ```hcl 22 | // paste code snippets here 23 | ``` 24 | 25 | **Expected behavior** 26 | A clear and concise description of what you expected to happen. 27 | 28 | **Nice to have** 29 | - [ ] Terminal output 30 | - [ ] Screenshots 31 | 32 | **Additional context** 33 | Add any other context about the problem here. 34 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Submit a feature request for this repo. 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | 14 | 15 | **Describe the solution you'd like** 16 | A clear and concise description of what you want to happen. 17 | 18 | **Describe alternatives you've considered** 19 | A clear and concise description of any alternative solutions or features you've considered. 20 | 21 | **Additional context** 22 | Add any other context or screenshots about the feature request here. 23 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | 6 | 7 | ## Description 8 | 9 | 10 | 11 | ### Documentation 12 | 13 | 21 | 22 | 23 | 24 | ## TODOs 25 | 26 | Please ensure all of these TODOs are completed before asking for a review. 27 | 28 | - [ ] Ensure the branch is named correctly with the issue number. e.g: `feature/new-vpc-endpoints-955` or `bug/missing-count-param-434`. 29 | - [ ] Update the docs. 30 | - [ ] Keep the changes backward compatible where possible. 31 | - [ ] Run the pre-commit checks successfully. 32 | - [ ] Run the relevant tests successfully. 33 | - [ ] Ensure any 3rd party code adheres with our [license policy](https://www.notion.so/gruntwork/Gruntwork-licenses-and-open-source-usage-policy-f7dece1f780341c7b69c1763f22b1378) or delete this line if its not applicable. 34 | 35 | 36 | ## Related Issues 37 | 38 | 44 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Terraform files 2 | .terraform 3 | terraform.tfstate 4 | terraform.tfvars 5 | *.tfstate* 6 | *.zip 7 | 8 | # OS X files 9 | .history 10 | .DS_Store 11 | 12 | # IntelliJ files 13 | .idea_modules 14 | *.iml 15 | *.iws 16 | *.ipr 17 | .idea/ 18 | build/ 19 | */build/ 20 | out/ 21 | 22 | # Go best practices dictate that libraries should not include the vendor directory 23 | vendor 24 | 25 | #VIM swap files 26 | *.swp 27 | 28 | .test-data 29 | # Ignore Terraform lock files, as we want to test the Terraform code in these repos with the latest provider 30 | # versions. 31 | .terraform.lock.hcl 32 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/gruntwork-io/pre-commit 3 | rev: v0.1.10 4 | hooks: 5 | - id: terraform-fmt 6 | - id: goimports 7 | 8 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @marinalimeira @robmorgan @ina-stoyanova @gruntwork-io/maintenance-tier-3-orion 2 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contribution Guidelines 2 | 3 | 6 | 7 | Contributions to this Module are very welcome! We follow a fairly standard [pull request process]( 8 | https://help.github.com/articles/about-pull-requests/) for contributions, subject to the following guidelines: 9 | 10 | 1. [File a GitHub issue](#file-a-github-issue) 11 | 1. [Update the documentation](#update-the-documentation) 12 | 1. [Update the tests](#update-the-tests) 13 | 1. [Update the code](#update-the-code) 14 | 1. [Create a pull request](#create-a-pull-request) 15 | 1. [Merge and release](#merge-and-release) 16 | 17 | ## File a GitHub issue 18 | 19 | Before starting any work, we recommend filing a GitHub issue in this repo. This is your chance to ask questions and 20 | get feedback from the maintainers and the community before you sink a lot of time into writing (possibly the wrong) 21 | code. If there is anything you're unsure about, just ask! 22 | 23 | ## Update the documentation 24 | 25 | We recommend updating the documentation *before* updating any code (see [Readme Driven 26 | Development](http://tom.preston-werner.com/2010/08/23/readme-driven-development.html)). This ensures the documentation 27 | stays up to date and allows you to think through the problem at a high level before you get lost in the weeds of 28 | coding. 29 | 30 | ## Update the tests 31 | 32 | We also recommend updating the automated tests *before* updating any code (see [Test Driven 33 | Development](https://en.wikipedia.org/wiki/Test-driven_development)). That means you add or update a test case, 34 | verify that it's failing with a clear error message, and *then* make the code changes to get that test to pass. This 35 | ensures the tests stay up to date and verify all the functionality in this Module, including whatever new 36 | functionality you're adding in your contribution. Check out the [tests](https://github.com/gruntwork-io/terraform-google-sql/tree/master/test) folder for instructions on running the 37 | automated tests. 38 | 39 | ## Update the code 40 | 41 | At this point, make your code changes and use your new test case to verify that everything is working. As you work, 42 | keep in mind two things: 43 | 44 | 1. Backwards compatibility 45 | 1. Downtime 46 | 47 | ### Backwards compatibility 48 | 49 | Please make every effort to avoid unnecessary backwards incompatible changes. With Terraform code, this means: 50 | 51 | 1. Do not delete, rename, or change the type of input variables. 52 | 1. If you add an input variable, it should have a `default`. 53 | 1. Do not delete, rename, or change the type of output variables. 54 | 1. Do not delete or rename a module in the `modules` folder. 55 | 56 | If a backwards incompatible change cannot be avoided, please make sure to call that out when you submit a pull request, 57 | explaining why the change is absolutely necessary. 58 | 59 | ### Downtime 60 | 61 | Bear in mind that the Terraform code in this Module is used by real companies to run real infrastructure in 62 | production, and certain types of changes could cause downtime. For example, consider the following: 63 | 64 | 1. If you rename a resource (e.g. `google_sql_database_instance "foo"` -> `google_sql_database_instance "bar"`), Terraform will see that as deleting 65 | the old resource and creating a new one. 66 | 1. If you change certain attributes of a resource (e.g. the `name` of an `google_compute_instance`), the cloud provider (e.g. Google) may 67 | treat that as an instruction to delete the old resource and a create a new one. 68 | 69 | Deleting certain types of resources (e.g. virtual servers, load balancers) can cause downtime, so when making code 70 | changes, think carefully about how to avoid that. For example, can you avoid downtime by using 71 | [create_before_destroy](https://www.terraform.io/docs/configuration/resources.html#create_before_destroy)? Or via 72 | the `terraform state` command? If so, make sure to note this in our pull request. If downtime cannot be avoided, 73 | please make sure to call that out when you submit a pull request. 74 | 75 | 76 | ### Formatting and pre-commit hooks 77 | 78 | You must run `terraform fmt` on the code before committing. You can configure your computer to do this automatically 79 | using pre-commit hooks managed using [pre-commit](http://pre-commit.com/): 80 | 81 | 1. [Install pre-commit](http://pre-commit.com/#install). E.g.: `brew install pre-commit`. 82 | 1. Install the hooks: `pre-commit install`. 83 | 84 | That's it! Now just write your code, and every time you commit, `terraform fmt` will be run on the files you're 85 | committing. 86 | 87 | 88 | ## Create a pull request 89 | 90 | [Create a pull request](https://help.github.com/articles/creating-a-pull-request/) with your changes. Please make sure 91 | to include the following: 92 | 93 | 1. A description of the change, including a link to your GitHub issue. 94 | 1. The output of your automated test run, preferably in a [GitHub Gist](https://gist.github.com/). We cannot run 95 | automated tests for pull requests automatically due to [security 96 | concerns](https://circleci.com/docs/fork-pr-builds/#security-implications), so we need you to manually provide this 97 | test output so we can verify that everything is working. 98 | 1. Any notes on backwards incompatibility or downtime. 99 | 100 | ## Merge and release 101 | 102 | The maintainers for this repo will review your code and provide feedback. If everything looks good, they will merge the 103 | code and release a new version, which you'll be able to find in the [releases page](../../releases). -------------------------------------------------------------------------------- /GRUNTWORK_PHILOSOPHY.md: -------------------------------------------------------------------------------- 1 | # Gruntwork Philosophy 2 | 3 | At Gruntwork, we strive to accelerate the deployment of production grade infrastructure by prodiving a library of 4 | stable, reusable, and battle tested infrastructure as code organized into a series of [modules](#what-is-a-module) with 5 | [submodules](#what-is-a-submodule). Each module represents a particular set of infrastructure that is componentized into 6 | smaller pieces represented by the submodules within the module. By doing so, we have built a composable library that can 7 | be combined into building out everything from simple single service deployments to complicated microservice setups so 8 | that your infrastructure can grow with your business needs. Every module we provide is built with the [production grade 9 | infrastruture checklist](#production-grade-infrastructure-checklist) in mind, ensuring that the services you deploy are 10 | resilient, fault tolerant, and scalable. 11 | 12 | 13 | ## What is a Module? 14 | 15 | A Module is a reusable, tested, documented, configurable, best-practices definition of a single piece of Infrastructure 16 | (e.g., Docker cluster, VPC, Jenkins, Consul), written using a combination of [Terraform](https://www.terraform.io/), Go, 17 | and Bash. A module contains a set of automated tests, documentation, and examples that have been proven in production, 18 | providing the underlying infrastructure for [Gruntwork's customers](https://www.gruntwork.io/customers). 19 | 20 | Instead of figuring out the details of how to run a piece of infrastructure from scratch, you can reuse existing code 21 | that has been proven in production. And instead of maintaining all that infrastructure code yourself, you can leverage 22 | the work of the community to pick up infrastructure improvements through a version number bump. 23 | 24 | 25 | ## What is a Submodule? 26 | 27 | Each Infrastructure Module consists of one or more orthogonal Submodules that handle some specific aspect of that 28 | Infrastructure Module's functionality. Breaking the code up into multiple submodules makes it easier to reuse and 29 | compose to handle many different use cases. Although Modules are designed to provide an end to end solution to manage 30 | the relevant infrastructure by combining the Submodules defined in the Module, Submodules can be used independently for 31 | specific functionality that you need in your infrastructure code. 32 | 33 | 34 | ## Production Grade Infrastructure Checklist 35 | 36 | At Gruntwork, we have learned over the years that it is not enough to just get the services up and running in a publicly 37 | accessible space to call your application "production-ready." There are many more things to consider, and oftentimes 38 | many of these considerations are missing in the deployment plan of applications. These topics come up as afterthoughts, 39 | and are learned the hard way after the fact. That is why we codified all of them into a checklist that can be used as a 40 | reference to help ensure that they are considered before your application goes to production, and conscious decisions 41 | are made to neglect particular components if needed, as opposed to accidentally omitting them from consideration. 42 | 43 | 47 | 48 | | Task | Description | Example tools | 49 | |--------------------|-------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------| 50 | | Install | Install the software binaries and all dependencies. | Bash, Chef, Ansible, Puppet | 51 | | Configure | Configure the software at runtime. Includes port settings, TLS certs, service discovery, leaders, followers, replication, etc. | Bash, Chef, Ansible, Puppet | 52 | | Provision | Provision the infrastructure. Includes EC2 instances, load balancers, network topology, security gr oups, IAM permissions, etc. | Terraform, CloudFormation | 53 | | Deploy | Deploy the service on top of the infrastructure. Roll out updates with no downtime. Includes blue-green, rolling, and canary deployments. | Scripts, Orchestration tools (ECS, k8s, Nomad) | 54 | | High availability | Withstand outages of individual processes, EC2 instances, services, Availability Zones, and regions. | Multi AZ, multi-region, replication, ASGs, ELBs | 55 | | Scalability | Scale up and down in response to load. Scale horizontally (more servers) and/or vertically (bigger servers). | ASGs, replication, sharding, caching, divide and conquer | 56 | | Performance | Optimize CPU, memory, disk, network, GPU, and usage. Includes query tuning, benchmarking, load testing, and profiling. | Dynatrace, valgrind, VisualVM, ab, Jmeter | 57 | | Networking | Configure static and dynamic IPs, ports, service discovery, firewalls, DNS, SSH access, and VPN access. | EIPs, ENIs, VPCs, NACLs, SGs, Route 53, OpenVPN | 58 | | Security | Encryption in transit (TLS) and on disk, authentication, authorization, secrets management, server hardening. | ACM, EBS Volumes, Cognito, Vault, CIS | 59 | | Metrics | Availability metrics, business metrics, app metrics, server metrics, events, observability, tracing, and alerting. | CloudWatch, DataDog, New Relic, Honeycomb | 60 | | Logs | Rotate logs on disk. Aggregate log data to a central location. | CloudWatch logs, ELK, Sumo Logic, Papertrail | 61 | | Backup and Restore | Make backups of DBs, caches, and other data on a scheduled basis. Replicate to separate region/account. | RDS, ElastiCache, ec2-snapper, Lambda | 62 | | Cost optimization | Pick proper instance types, use spot and reserved instances, use auto scaling, and nuke unused resources. | ASGs, spot instances, reserved instances | 63 | | Documentation | Document your code, architecture, and practices. Create playbooks to respond to incidents. | READMEs, wikis, Slack | 64 | | Tests | Write automated tests for your infrastructure code. Run tests after every commit and nightly. | Terratest | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | terraform-google-sql 2 | Copyright 2019 Gruntwork, Inc. 3 | 4 | This product includes software developed at Gruntwork (https://www.gruntwork.io/). 5 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ### Sunset notice 2 | 3 | We believe there is an opportunity to create a truly outstanding developer experience for deploying to the cloud, however developing this vision requires that we temporarily limit our focus to just one cloud. Gruntwork has hundreds of customers currently using AWS, so we have temporarily suspended our maintenance efforts on this repo. Once we have implemented and validated our vision for the developer experience on the cloud, we look forward to picking this up. In the meantime, you are welcome to use this code in accordance with the open source license, however we will not be responding to GitHub Issues or Pull Requests. 4 | 5 | If you wish to be the maintainer for this project, we are open to considering that. Please contact us at support@gruntwork.io. 6 | 7 | --- 8 | 9 | 20 | 21 | # Cloud SQL Modules 22 | 23 | [![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/gruntwork-io/terraform-google-sql.svg?label=latest)](http://github.com/gruntwork-io/terraform-google-sql/releases/latest) 24 | ![Terraform Version](https://img.shields.io/badge/tf-%3E%3D1.0.x-blue.svg) 25 | 26 | This repo contains modules for running relational databases such as MySQL and PostgreSQL on 27 | [Google Cloud Platform (GCP)](https://cloud.google.com/) using [Cloud SQL](https://cloud.google.com/sql/). 28 | 29 | ## Cloud SQL Architecture 30 | 31 | ![Cloud SQL Architecture](https://github.com/gruntwork-io/terraform-google-sql/blob/master/_docs/cloud-sql.png "Cloud SQL Architecture") 32 | 33 | ## Features 34 | 35 | - Deploy a fully-managed relational database 36 | - Supports MySQL and PostgreSQL 37 | - Optional failover instances 38 | - Optional read replicas 39 | 40 | ## Learn 41 | 42 | This repo is a part of [the Gruntwork Infrastructure as Code Library](https://gruntwork.io/infrastructure-as-code-library/), a collection of reusable, battle-tested, production ready infrastructure code. If you’ve never used the Infrastructure as Code Library before, make sure to read [How to use the Gruntwork Infrastructure as Code Library](https://gruntwork.io/guides/foundations/how-to-use-gruntwork-infrastructure-as-code-library/)! 43 | 44 | ### Core concepts 45 | 46 | - [What is Cloud SQL](https://github.com/gruntwork-io/terraform-google-sql/blob/master/modules/cloud-sql/core-concepts.md#what-is-cloud-sql) 47 | - [Cloud SQL documentation](https://cloud.google.com/sql/docs/) 48 | - **[Designing Data Intensive Applications](https://dataintensive.net/)**: the best book we’ve found for understanding data systems, including relational databases, NoSQL, replication, sharding, consistency, and so on. 49 | 50 | ### Repo organisation 51 | 52 | This repo has the following folder structure: 53 | 54 | - [root](https://github.com/gruntwork-io/terraform-google-sql/tree/master): The root folder contains an example of how 55 | to deploy a private PostgreSQL instance in Cloud SQL. See [postgres-private-ip](https://github.com/gruntwork-io/terraform-google-sql/blob/master/examples/postgres-private-ip) 56 | for the documentation. 57 | 58 | - [modules](https://github.com/gruntwork-io/terraform-google-sql/tree/master/modules): This folder contains the 59 | main implementation code for this Module, broken down into multiple standalone submodules. 60 | 61 | The primary module is: 62 | 63 | - [cloud-sql](https://github.com/gruntwork-io/terraform-google-sql/tree/master/modules/cloud-sql): Deploy a Cloud SQL [MySQL](https://cloud.google.com/sql/docs/mysql/) or [PostgreSQL](https://cloud.google.com/sql/docs/postgres/) database. 64 | 65 | - [examples](https://github.com/gruntwork-io/terraform-google-sql/tree/master/examples): This folder contains 66 | examples of how to use the submodules. 67 | 68 | - [test](https://github.com/gruntwork-io/terraform-google-sql/tree/master/test): Automated tests for the submodules 69 | and examples. 70 | 71 | ## Deploy 72 | 73 | ### Non-production deployment (quick start for learning) 74 | 75 | If you just want to try this repo out for experimenting and learning, check out the following resources: 76 | 77 | - [examples folder](https://github.com/gruntwork-io/terraform-google-sql/blob/master/examples): The `examples` folder contains sample code optimized for learning, experimenting, and testing (but not production usage). 78 | 79 | ### Production deployment 80 | 81 | If you want to deploy this repo in production, check out the following resources: 82 | 83 | - [cloud-sql module in the GCP Reference Architecture](https://github.com/gruntwork-io/infrastructure-modules-google/tree/master/data-stores/cloud-sql): 84 | Production-ready sample code from the GCP Reference Architecture. Note that the repository is private and accessible only with 85 | Gruntwork subscription. To get access, [subscribe now](https://www.gruntwork.io/pricing/) or contact us at [support@gruntwork.io](mailto:support@gruntwork.io) for more information. 86 | 87 | ## Manage 88 | 89 | ### Day-to-day operations 90 | 91 | - [How to connect to a Cloud SQL instance](https://github.com/gruntwork-io/terraform-google-sql/tree/master/modules/cloud-sql/core-concepts.md#how-do-you-connect-to-the-database) 92 | - [How to configure high availability](https://github.com/gruntwork-io/terraform-google-sql/tree/master/modules/cloud-sql/core-concepts.md#how-do-you-configure-high-availability) 93 | - [How to secure the database instance](https://github.com/gruntwork-io/terraform-google-sql/tree/master/modules/cloud-sql/core-concepts.md#how-do-you-secure-the-database) 94 | - [How to scale the database](https://github.com/gruntwork-io/terraform-google-sql/tree/master/modules/cloud-sql/core-concepts.md#how-do-you-secure-the-database) 95 | 96 | ## Support 97 | 98 | If you need help with this repo or anything else related to infrastructure or DevOps, Gruntwork offers [Commercial Support](https://gruntwork.io/support/) via Slack, email, and phone/video. If you’re already a Gruntwork customer, hop on Slack and ask away! If not, [subscribe now](https://www.gruntwork.io/pricing/). If you’re not sure, feel free to email us at [support@gruntwork.io](mailto:support@gruntwork.io). 99 | 100 | ## Contributions 101 | 102 | Contributions to this repo are very welcome and appreciated! If you find a bug or want to add a new feature or even contribute an entirely new module, we are very happy to accept pull requests, provide feedback, and run your changes through our automated test suite. 103 | 104 | Please see [Contributing to the Gruntwork Infrastructure as Code Library](https://gruntwork.io/guides/foundations/how-to-use-gruntwork-infrastructure-as-code-library/#contributing-to-the-gruntwork-infrastructure-as-code-library) for instructions. 105 | 106 | ## License 107 | 108 | Please see [LICENSE](https://github.com/gruntwork-io/terraform-google-sql/blob/master/LICENSE.txt) for details on how the code in this repo is licensed. 109 | 110 | Copyright © 2019 Gruntwork, Inc. 111 | -------------------------------------------------------------------------------- /_docs/cloud-sql-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gruntwork-io/terraform-google-sql/42aeb4ade70e8a5171fed62cefef0e377014eb8c/_docs/cloud-sql-icon.png -------------------------------------------------------------------------------- /_docs/cloud-sql.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gruntwork-io/terraform-google-sql/42aeb4ade70e8a5171fed62cefef0e377014eb8c/_docs/cloud-sql.png -------------------------------------------------------------------------------- /_docs/mysql.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gruntwork-io/terraform-google-sql/42aeb4ade70e8a5171fed62cefef0e377014eb8c/_docs/mysql.png -------------------------------------------------------------------------------- /_docs/postgresql.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gruntwork-io/terraform-google-sql/42aeb4ade70e8a5171fed62cefef0e377014eb8c/_docs/postgresql.png -------------------------------------------------------------------------------- /examples/client-certificate/README.md: -------------------------------------------------------------------------------- 1 | # Client Certificate Example 2 | 3 | This folder contains an example of how to create client certificates for [Cloud SQL](https://cloud.google.com/sql/) database instance. 4 | There can be only one pending operation at a given point of time because of the inherent Cloud SQL system architecture. 5 | This is a limitation on the concurrent writes to a Cloud SQL database. To resolve this issue, 6 | we will create the certificate in a separate module. 7 | 8 | Creating the certificate while there are other operations ongoing will result in `googleapi: Error 409: Operation failed because another operation was already in progress.` 9 | 10 | 11 | ## How do you run this example? 12 | 13 | To run this example, you need to: 14 | 15 | 1. Install [Terraform](https://www.terraform.io/). 16 | 1. Open up `variables.tf` and set secrets at the top of the file as environment variables and fill in any other variables in 17 | the file that don't have defaults. 18 | 1. `terraform init`. 19 | 1. `terraform plan`. 20 | 1. If the plan looks good, run `terraform apply`. 21 | 22 | When the templates are applied, Terraform will output the IP address of the instance and the instance path for [connecting using the Cloud SQL Proxy](https://cloud.google.com/sql/docs/mysql/connect-admin-proxy). 23 | -------------------------------------------------------------------------------- /examples/client-certificate/main.tf: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # CREATE A CLIENT CERTIFICATE FOR CLOUD SQL DATABASE 3 | # ------------------------------------------------------------------------------ 4 | 5 | # ------------------------------------------------------------------------------ 6 | # CONFIGURE OUR GCP CONNECTION 7 | # ------------------------------------------------------------------------------ 8 | 9 | provider "google-beta" { 10 | project = var.project 11 | region = var.region 12 | } 13 | 14 | terraform { 15 | # This module is now only being tested with Terraform 1.0.x. However, to make upgrading easier, we are setting 16 | # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it 17 | # forwards compatible with 1.0.x code. 18 | required_version = ">= 0.12.26" 19 | 20 | required_providers { 21 | google-beta = { 22 | source = "hashicorp/google-beta" 23 | version = "~> 3.57.0" 24 | } 25 | } 26 | } 27 | 28 | # ------------------------------------------------------------------------------ 29 | # CREATE CLIENT CERTIFICATE 30 | # ------------------------------------------------------------------------------ 31 | 32 | resource "google_sql_ssl_cert" "client_cert" { 33 | provider = google-beta 34 | common_name = var.common_name 35 | instance = var.database_instance_name 36 | } 37 | -------------------------------------------------------------------------------- /examples/client-certificate/outputs.tf: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # CLIENT CERTIFICATE OUTPUTS 3 | # ------------------------------------------------------------------------------ 4 | 5 | output "client_ca_cert" { 6 | description = "Certificate data for the client certificate." 7 | value = google_sql_ssl_cert.client_cert.cert 8 | } 9 | 10 | # In real-world cases, the output for the private key should always be encrypted 11 | output "client_private_key" { 12 | description = "Private key associated with the client certificate." 13 | value = google_sql_ssl_cert.client_cert.private_key 14 | sensitive = true 15 | } 16 | -------------------------------------------------------------------------------- /examples/client-certificate/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRED PARAMETERS 3 | # These variables are expected to be passed in by the operator 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "project" { 7 | description = "The project ID to host the database in." 8 | type = string 9 | } 10 | 11 | variable "region" { 12 | description = "The region to host the database in." 13 | type = string 14 | } 15 | 16 | # Note, after a name db instance is used, it cannot be reused for up to one week. 17 | variable "common_name" { 18 | description = "The common name to be used in the certificate to identify the client. Constrained to [a-zA-Z.-_ ]+. Changing this forces a new resource to be created." 19 | type = string 20 | } 21 | 22 | variable "database_instance_name" { 23 | description = "The name of the Cloud SQL instance. Changing this forces a new resource to be created." 24 | type = string 25 | } 26 | -------------------------------------------------------------------------------- /examples/mysql-private-ip/README.md: -------------------------------------------------------------------------------- 1 | # MySQL Cloud SQL Private IP Example 2 | 3 | This folder contains an example of how to use the [Cloud SQL module](https://github.com/gruntwork-io/terraform-google-sql/tree/master/modules/cloud-sql) to create a [Google Cloud SQL](https://cloud.google.com/sql/) 4 | [MySQL](https://cloud.google.com/sql/docs/mysql/) database instance with a [private IP address](https://cloud.google.com/sql/docs/mysql/private-ip). 5 | 6 | ## How do you run this example? 7 | 8 | To run this example, you need to: 9 | 10 | 1. Install [Terraform](https://www.terraform.io/). 11 | 1. Open up `variables.tf` and set secrets at the top of the file as environment variables and fill in any other variables in 12 | the file that don't have defaults. 13 | 1. `terraform init`. 14 | 1. `terraform plan`. 15 | 1. If the plan looks good, run `terraform apply`. 16 | 17 | When the templates are applied, Terraform will output the IP address of the instance 18 | and the instance path for [connecting using the Cloud SQL Proxy](https://cloud.google.com/sql/docs/mysql/connect-admin-proxy). 19 | 20 | Note that you cannot connect to the private IP instance from outside Google Cloud Platform. 21 | If you want to experiment with connecting from your own workstation, see the [public IP example](https://github.com/gruntwork-io/terraform-google-sql/tree/master/examples/mysql-public-ip) 22 | -------------------------------------------------------------------------------- /examples/mysql-private-ip/main.tf: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # LAUNCH A MYSQL CLOUD SQL PRIVATE IP INSTANCE 3 | # ------------------------------------------------------------------------------ 4 | 5 | # ------------------------------------------------------------------------------ 6 | # CONFIGURE OUR GCP CONNECTION 7 | # ------------------------------------------------------------------------------ 8 | 9 | provider "google-beta" { 10 | project = var.project 11 | region = var.region 12 | } 13 | 14 | terraform { 15 | # This module is now only being tested with Terraform 1.0.x. However, to make upgrading easier, we are setting 16 | # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it 17 | # forwards compatible with 1.0.x code. 18 | required_version = ">= 0.12.26" 19 | 20 | required_providers { 21 | google-beta = { 22 | source = "hashicorp/google-beta" 23 | version = "~> 3.57.0" 24 | } 25 | } 26 | } 27 | 28 | # ------------------------------------------------------------------------------ 29 | # CREATE A RANDOM SUFFIX AND PREPARE RESOURCE NAMES 30 | # ------------------------------------------------------------------------------ 31 | 32 | resource "random_id" "name" { 33 | byte_length = 2 34 | } 35 | 36 | locals { 37 | # If name_override is specified, use that - otherwise use the name_prefix with a random string 38 | instance_name = var.name_override == null ? format("%s-%s", var.name_prefix, random_id.name.hex) : var.name_override 39 | private_network_name = "private-network-${random_id.name.hex}" 40 | private_ip_name = "private-ip-${random_id.name.hex}" 41 | } 42 | 43 | # ------------------------------------------------------------------------------ 44 | # CREATE COMPUTE NETWORKS 45 | # ------------------------------------------------------------------------------ 46 | 47 | # Simple network, auto-creates subnetworks 48 | resource "google_compute_network" "private_network" { 49 | provider = google-beta 50 | name = local.private_network_name 51 | } 52 | 53 | # Reserve global internal address range for the peering 54 | resource "google_compute_global_address" "private_ip_address" { 55 | provider = google-beta 56 | name = local.private_ip_name 57 | purpose = "VPC_PEERING" 58 | address_type = "INTERNAL" 59 | prefix_length = 16 60 | network = google_compute_network.private_network.self_link 61 | } 62 | 63 | # Establish VPC network peering connection using the reserved address range 64 | resource "google_service_networking_connection" "private_vpc_connection" { 65 | provider = google-beta 66 | network = google_compute_network.private_network.self_link 67 | service = "servicenetworking.googleapis.com" 68 | reserved_peering_ranges = [google_compute_global_address.private_ip_address.name] 69 | } 70 | 71 | # ------------------------------------------------------------------------------ 72 | # CREATE DATABASE INSTANCE WITH PRIVATE IP 73 | # ------------------------------------------------------------------------------ 74 | 75 | module "mysql" { 76 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 77 | # to a specific version of the modules, such as the following example: 78 | # source = "github.com/gruntwork-io/terraform-google-sql.git//modules/cloud-sql?ref=v0.2.0" 79 | source = "../../modules/cloud-sql" 80 | 81 | project = var.project 82 | region = var.region 83 | name = local.instance_name 84 | db_name = var.db_name 85 | 86 | engine = var.mysql_version 87 | machine_type = var.machine_type 88 | 89 | # To make it easier to test this example, we are disabling deletion protection so we can destroy the databases 90 | # during the tests. By default, we recommend setting deletion_protection to true, to ensure database instances are 91 | # not inadvertently destroyed. 92 | deletion_protection = false 93 | 94 | # These together will construct the master_user privileges, i.e. 95 | # 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. 96 | # These should typically be set as the environment variable TF_VAR_master_user_password, etc. 97 | # so you don't check these into source control." 98 | master_user_password = var.master_user_password 99 | 100 | master_user_name = var.master_user_name 101 | master_user_host = "%" 102 | 103 | # Pass the private network link to the module 104 | private_network = google_compute_network.private_network.self_link 105 | 106 | # Wait for the vpc connection to complete 107 | dependencies = [google_service_networking_connection.private_vpc_connection.network] 108 | 109 | # Set auto-increment flags to test the 110 | # feature during automated testing 111 | database_flags = [ 112 | { 113 | name = "auto_increment_increment" 114 | value = "6" 115 | }, 116 | { 117 | name = "auto_increment_offset" 118 | value = "6" 119 | }, 120 | ] 121 | 122 | custom_labels = { 123 | test-id = "mysql-private-ip-example" 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /examples/mysql-private-ip/outputs.tf: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # MASTER OUTPUTS 3 | # ------------------------------------------------------------------------------ 4 | 5 | output "master_instance_name" { 6 | description = "The name of the database instance" 7 | value = module.mysql.master_instance_name 8 | } 9 | 10 | output "master_ip_addresses" { 11 | description = "All IP addresses of the instance as list of maps, see https://www.terraform.io/docs/providers/google/r/sql_database_instance.html#ip_address-0-ip_address" 12 | value = module.mysql.master_ip_addresses 13 | } 14 | 15 | output "master_private_ip" { 16 | description = "The private IPv4 address of the master instance." 17 | value = module.mysql.master_private_ip_address 18 | } 19 | 20 | output "master_instance" { 21 | description = "Self link to the master instance" 22 | value = module.mysql.master_instance 23 | } 24 | 25 | output "master_proxy_connection" { 26 | description = "Instance path for connecting with Cloud SQL Proxy. Read more at https://cloud.google.com/sql/docs/mysql/sql-proxy" 27 | value = module.mysql.master_proxy_connection 28 | } 29 | 30 | # ------------------------------------------------------------------------------ 31 | # DB OUTPUTS 32 | # ------------------------------------------------------------------------------ 33 | 34 | output "db_name" { 35 | description = "Name of the default database" 36 | value = module.mysql.db_name 37 | } 38 | 39 | output "db" { 40 | description = "Self link to the default database" 41 | value = module.mysql.db 42 | } 43 | -------------------------------------------------------------------------------- /examples/mysql-private-ip/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRED PARAMETERS 3 | # These variables are expected to be passed in by the operator 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "project" { 7 | description = "The project ID to host the database in." 8 | type = string 9 | } 10 | 11 | variable "region" { 12 | description = "The region to host the database in." 13 | type = string 14 | } 15 | 16 | # Note, after a name db instance is used, it cannot be reused for up to one week. 17 | variable "name_prefix" { 18 | description = "The name prefix for the database instance. Will be appended with a random string. Use lowercase letters, numbers, and hyphens. Start with a letter." 19 | type = string 20 | } 21 | 22 | variable "master_user_name" { 23 | description = "The username part for the default user credentials, i.e. 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. This should typically be set as the environment variable TF_VAR_master_user_name so you don't check it into source control." 24 | type = string 25 | } 26 | 27 | variable "master_user_password" { 28 | description = "The password part for the default user credentials, i.e. 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. This should typically be set as the environment variable TF_VAR_master_user_password so you don't check it into source control." 29 | type = string 30 | } 31 | 32 | # --------------------------------------------------------------------------------------------------------------------- 33 | # OPTIONAL PARAMETERS 34 | # Generally, these values won't need to be changed. 35 | # --------------------------------------------------------------------------------------------------------------------- 36 | 37 | variable "mysql_version" { 38 | description = "The engine version of the database, e.g. `MYSQL_5_6` or `MYSQL_5_7`. See https://cloud.google.com/sql/docs/features for supported versions." 39 | type = string 40 | default = "MYSQL_5_7" 41 | } 42 | 43 | variable "machine_type" { 44 | description = "The machine type to use, see https://cloud.google.com/sql/pricing for more details" 45 | type = string 46 | default = "db-f1-micro" 47 | } 48 | 49 | variable "db_name" { 50 | description = "Name for the db" 51 | type = string 52 | default = "default" 53 | } 54 | 55 | variable "name_override" { 56 | description = "You may optionally override the name_prefix + random string by specifying an override" 57 | type = string 58 | default = null 59 | } 60 | -------------------------------------------------------------------------------- /examples/mysql-public-ip/README.md: -------------------------------------------------------------------------------- 1 | # MySQL Cloud SQL Public IP Example 2 | 3 | This folder contains an example of how to use the [Cloud SQL module](https://github.com/gruntwork-io/terraform-google-sql/tree/master/modules/cloud-sql) to create a [Google Cloud SQL](https://cloud.google.com/sql/) 4 | [MySQL](https://cloud.google.com/sql/docs/mysql/) database instance with a [public IP address](https://cloud.google.com/sql/docs/mysql/connect-external-app#appaccessIP). 5 | 6 | ## How do you run this example? 7 | 8 | To run this example, you need to: 9 | 10 | 1. Install [Terraform](https://www.terraform.io/). 11 | 1. Open up `variables.tf` and set secrets at the top of the file as environment variables and fill in any other variables in 12 | the file that don't have defaults. 13 | 1. `terraform init`. 14 | 1. `terraform plan`. 15 | 1. If the plan looks good, run `terraform apply`. 16 | 17 | When the templates are applied, Terraform will output the IP address of the instance and the instance path for [connecting using the Cloud SQL Proxy](https://cloud.google.com/sql/docs/mysql/connect-admin-proxy). -------------------------------------------------------------------------------- /examples/mysql-public-ip/main.tf: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # LAUNCH A MYSQL CLOUD SQL PUBLIC IP INSTANCE 3 | # ------------------------------------------------------------------------------ 4 | 5 | # ------------------------------------------------------------------------------ 6 | # CONFIGURE OUR GCP CONNECTION 7 | # ------------------------------------------------------------------------------ 8 | 9 | provider "google-beta" { 10 | project = var.project 11 | region = var.region 12 | } 13 | 14 | terraform { 15 | # This module is now only being tested with Terraform 1.0.x. However, to make upgrading easier, we are setting 16 | # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it 17 | # forwards compatible with 1.0.x code. 18 | required_version = ">= 0.12.26" 19 | 20 | required_providers { 21 | google-beta = { 22 | source = "hashicorp/google-beta" 23 | version = "~> 3.57.0" 24 | } 25 | } 26 | } 27 | 28 | # ------------------------------------------------------------------------------ 29 | # CREATE A RANDOM SUFFIX AND PREPARE RESOURCE NAMES 30 | # ------------------------------------------------------------------------------ 31 | 32 | resource "random_id" "name" { 33 | byte_length = 2 34 | } 35 | 36 | locals { 37 | # If name_override is specified, use that - otherwise use the name_prefix with a random string 38 | instance_name = var.name_override == null ? format("%s-%s", var.name_prefix, random_id.name.hex) : var.name_override 39 | } 40 | 41 | # ------------------------------------------------------------------------------ 42 | # CREATE DATABASE INSTANCE WITH PUBLIC IP 43 | # ------------------------------------------------------------------------------ 44 | 45 | module "mysql" { 46 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 47 | # to a specific version of the modules, such as the following example: 48 | # source = "github.com/gruntwork-io/terraform-google-sql.git//modules/cloud-sql?ref=v0.2.0" 49 | source = "../../modules/cloud-sql" 50 | 51 | project = var.project 52 | region = var.region 53 | name = local.instance_name 54 | db_name = var.db_name 55 | 56 | engine = var.mysql_version 57 | machine_type = var.machine_type 58 | 59 | # These together will construct the master_user privileges, i.e. 60 | # 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. 61 | # These should typically be set as the environment variable TF_VAR_master_user_password, etc. 62 | # so you don't check these into source control." 63 | master_user_password = var.master_user_password 64 | 65 | master_user_name = var.master_user_name 66 | master_user_host = "%" 67 | 68 | # To make it easier to test this example, we are giving the instances public IP addresses and allowing inbound 69 | # connections from anywhere. We also disable deletion protection so we can destroy the databases during the tests. 70 | # In real-world usage, your instances should live in private subnets, only have private IP addresses, and only allow 71 | # access from specific trusted networks, servers or applications in your VPC. By default, we recommend setting 72 | # deletion_protection to true, to ensure database instances are not inadvertently destroyed. 73 | enable_public_internet_access = true 74 | deletion_protection = false 75 | 76 | # Default setting for this is 'false' in 'variables.tf' 77 | # In the test cases, we're setting this to true, to test forced SSL. 78 | require_ssl = var.require_ssl 79 | 80 | authorized_networks = [ 81 | { 82 | name = "allow-all-inbound" 83 | value = "0.0.0.0/0" 84 | }, 85 | ] 86 | 87 | # Set auto-increment flags to test the 88 | # feature during automated testing 89 | database_flags = [ 90 | { 91 | name = "auto_increment_increment" 92 | value = "5" 93 | }, 94 | { 95 | name = "auto_increment_offset" 96 | value = "5" 97 | }, 98 | ] 99 | 100 | custom_labels = { 101 | test-id = "mysql-public-ip-example" 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /examples/mysql-public-ip/outputs.tf: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # MASTER OUTPUTS 3 | # ------------------------------------------------------------------------------ 4 | 5 | output "master_instance_name" { 6 | description = "The name of the database instance" 7 | value = module.mysql.master_instance_name 8 | } 9 | 10 | output "master_public_ip" { 11 | description = "The public IPv4 address of the master instance." 12 | value = module.mysql.master_public_ip_address 13 | } 14 | 15 | output "master_ca_cert" { 16 | value = module.mysql.master_ca_cert 17 | description = "The CA Certificate used to connect to the SQL Instance via SSL" 18 | } 19 | 20 | output "master_instance" { 21 | description = "Self link to the master instance" 22 | value = module.mysql.master_instance 23 | } 24 | 25 | output "master_proxy_connection" { 26 | description = "Instance path for connecting with Cloud SQL Proxy. Read more at https://cloud.google.com/sql/docs/mysql/sql-proxy" 27 | value = module.mysql.master_proxy_connection 28 | } 29 | 30 | # ------------------------------------------------------------------------------ 31 | # DB OUTPUTS 32 | # ------------------------------------------------------------------------------ 33 | 34 | output "db_name" { 35 | description = "Name of the default database" 36 | value = module.mysql.db_name 37 | } 38 | 39 | output "db" { 40 | description = "Self link to the default database" 41 | value = module.mysql.db 42 | } 43 | -------------------------------------------------------------------------------- /examples/mysql-public-ip/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRED PARAMETERS 3 | # These variables are expected to be passed in by the operator 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "project" { 7 | description = "The project ID to host the database in." 8 | type = string 9 | } 10 | 11 | variable "region" { 12 | description = "The region to host the database in." 13 | type = string 14 | } 15 | 16 | # Note, after a name db instance is used, it cannot be reused for up to one week. 17 | variable "name_prefix" { 18 | description = "The name prefix for the database instance. Will be appended with a random string. Use lowercase letters, numbers, and hyphens. Start with a letter." 19 | type = string 20 | } 21 | 22 | variable "master_user_name" { 23 | description = "The username part for the default user credentials, i.e. 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. This should typically be set as the environment variable TF_VAR_master_user_name so you don't check it into source control." 24 | type = string 25 | } 26 | 27 | variable "master_user_password" { 28 | description = "The password part for the default user credentials, i.e. 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. This should typically be set as the environment variable TF_VAR_master_user_password so you don't check it into source control." 29 | type = string 30 | } 31 | 32 | # --------------------------------------------------------------------------------------------------------------------- 33 | # OPTIONAL PARAMETERS 34 | # Generally, these values won't need to be changed. 35 | # --------------------------------------------------------------------------------------------------------------------- 36 | 37 | variable "mysql_version" { 38 | description = "The engine version of the database, e.g. `MYSQL_5_6` or `MYSQL_5_7`. See https://cloud.google.com/sql/docs/features for supported versions." 39 | type = string 40 | default = "MYSQL_5_7" 41 | } 42 | 43 | variable "machine_type" { 44 | description = "The machine type to use, see https://cloud.google.com/sql/pricing for more details" 45 | type = string 46 | default = "db-f1-micro" 47 | } 48 | 49 | variable "db_name" { 50 | description = "Name for the db" 51 | type = string 52 | default = "default" 53 | } 54 | 55 | variable "name_override" { 56 | description = "You may optionally override the name_prefix + random string by specifying an override" 57 | type = string 58 | default = null 59 | } 60 | 61 | # When configuring a public IP instance, you should only allow secure connections 62 | # For testing purposes, we're initially allowing unsecured connections. 63 | variable "require_ssl" { 64 | description = "True if the instance should require SSL/TLS for users connecting over IP. Note: SSL/TLS is needed to provide security when you connect to Cloud SQL using IP addresses. If you are connecting to your instance only by using the Cloud SQL Proxy or the Java Socket Library, you do not need to configure your instance to use SSL/TLS." 65 | type = bool 66 | default = false 67 | } 68 | -------------------------------------------------------------------------------- /examples/mysql-replicas/README.md: -------------------------------------------------------------------------------- 1 | # MySQL Cloud SQL HA Example 2 | 3 | 6 | 7 | This folder contains an example of how to use the [Cloud SQL module](https://github.com/gruntwork-io/terraform-google-sql/tree/master/modules/cloud-sql) to create a [High Availability](https://cloud.google.com/sql/docs/mysql/configure-ha) [Google Cloud SQL](https://cloud.google.com/sql/) 8 | [MySQL](https://cloud.google.com/sql/docs/mysql/) database cluster with a [public IP](https://cloud.google.com/sql/docs/mysql/connect-external-app#appaccessIP) and failover and [read replicas](https://cloud.google.com/sql/docs/mysql/replication/). 9 | 10 | ## How do you run this example? 11 | 12 | To run this example, you need to: 13 | 14 | 1. Install [Terraform](https://www.terraform.io/). 15 | 1. Open up `variables.tf` and set secrets at the top of the file as environment variables and fill in any other variables in 16 | the file that don't have defaults. 17 | 1. `terraform init`. 18 | 1. `terraform plan`. 19 | 1. If the plan looks good, run `terraform apply`. 20 | 21 | When the templates are applied, Terraform will output the IP address of the instance 22 | and the instance path for [connecting using the Cloud SQL Proxy](https://cloud.google.com/sql/docs/mysql/connect-admin-proxy). 23 | -------------------------------------------------------------------------------- /examples/mysql-replicas/main.tf: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # LAUNCH A MYSQL CLUSTER WITH FAILOVER AND READ REPLICAS 3 | # ------------------------------------------------------------------------------ 4 | 5 | # ------------------------------------------------------------------------------ 6 | # CONFIGURE OUR GCP CONNECTION 7 | # ------------------------------------------------------------------------------ 8 | 9 | provider "google-beta" { 10 | project = var.project 11 | region = var.region 12 | } 13 | 14 | terraform { 15 | # This module is now only being tested with Terraform 1.0.x. However, to make upgrading easier, we are setting 16 | # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it 17 | # forwards compatible with 1.0.x code. 18 | required_version = ">= 0.12.26" 19 | 20 | required_providers { 21 | google-beta = { 22 | source = "hashicorp/google-beta" 23 | version = "~> 3.57.0" 24 | } 25 | } 26 | } 27 | 28 | # ------------------------------------------------------------------------------ 29 | # CREATE A RANDOM SUFFIX AND PREPARE RESOURCE NAMES 30 | # ------------------------------------------------------------------------------ 31 | 32 | resource "random_id" "name" { 33 | byte_length = 2 34 | } 35 | 36 | locals { 37 | # If name_override is specified, use that - otherwise use the name_prefix with a random string 38 | instance_name = var.name_override == null ? format("%s-%s", var.name_prefix, random_id.name.hex) : var.name_override 39 | private_network_name = "private-network-${random_id.name.hex}" 40 | private_ip_name = "private-ip-${random_id.name.hex}" 41 | } 42 | 43 | # ------------------------------------------------------------------------------ 44 | # CREATE DATABASE CLUSTER WITH PUBLIC IP 45 | # ------------------------------------------------------------------------------ 46 | 47 | module "mysql" { 48 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 49 | # to a specific version of the modules, such as the following example: 50 | # source = "github.com/gruntwork-io/terraform-google-sql.git//modules/cloud-sql?ref=v0.2.0" 51 | source = "../../modules/cloud-sql" 52 | 53 | project = var.project 54 | region = var.region 55 | name = local.instance_name 56 | db_name = var.db_name 57 | 58 | engine = var.mysql_version 59 | machine_type = var.machine_type 60 | 61 | master_zone = var.master_zone 62 | 63 | # To make it easier to test this example, we are giving the instances public IP addresses and allowing inbound 64 | # connections from anywhere. We also disable deletion protection so we can destroy the databases during the tests. 65 | # In real-world usage, your instances should live in private subnets, only have private IP addresses, and only allow 66 | # access from specific trusted networks, servers or applications in your VPC. By default, we recommend setting 67 | # deletion_protection to true, to ensure database instances are not inadvertently destroyed. 68 | enable_public_internet_access = true 69 | deletion_protection = false 70 | 71 | authorized_networks = [ 72 | { 73 | name = "allow-all-inbound" 74 | value = "0.0.0.0/0" 75 | }, 76 | ] 77 | 78 | # Indicate that we want to create a failover replica 79 | enable_failover_replica = true 80 | mysql_failover_replica_zone = var.failover_replica_zone 81 | 82 | # Indicate we want read replicas to be created 83 | num_read_replicas = var.num_read_replicas 84 | read_replica_zones = var.read_replica_zones 85 | 86 | # These together will construct the master_user privileges, i.e. 87 | # 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. 88 | # These should typically be set as the environment variable TF_VAR_master_user_password, etc. 89 | # so you don't check these into source control." 90 | master_user_password = var.master_user_password 91 | 92 | master_user_name = var.master_user_name 93 | master_user_host = "%" 94 | 95 | # Set auto-increment flags to test the 96 | # feature during automated testing 97 | database_flags = [ 98 | { 99 | name = "auto_increment_increment" 100 | value = "7" 101 | }, 102 | { 103 | name = "auto_increment_offset" 104 | value = "7" 105 | }, 106 | ] 107 | 108 | custom_labels = { 109 | test-id = "mysql-replicas-example" 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /examples/mysql-replicas/outputs.tf: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # MASTER OUTPUTS 3 | # ------------------------------------------------------------------------------ 4 | 5 | output "master_instance_name" { 6 | description = "The name of the database instance" 7 | value = module.mysql.master_instance_name 8 | } 9 | 10 | output "master_public_ip" { 11 | description = "The public IPv4 address of the master instance." 12 | value = module.mysql.master_public_ip_address 13 | } 14 | 15 | output "master_instance" { 16 | description = "Self link to the master instance" 17 | value = module.mysql.master_instance 18 | } 19 | 20 | output "master_proxy_connection" { 21 | description = "Instance path for connecting with Cloud SQL Proxy. Read more at https://cloud.google.com/sql/docs/mysql/sql-proxy" 22 | value = module.mysql.master_proxy_connection 23 | } 24 | 25 | # ------------------------------------------------------------------------------ 26 | # DB OUTPUTS 27 | # ------------------------------------------------------------------------------ 28 | 29 | output "db_name" { 30 | description = "Name of the default database" 31 | value = module.mysql.db_name 32 | } 33 | 34 | output "db" { 35 | description = "Self link to the default database" 36 | value = module.mysql.db 37 | } 38 | 39 | # ------------------------------------------------------------------------------ 40 | # FAILOVER REPLICA OUTPUTS 41 | # ------------------------------------------------------------------------------ 42 | 43 | output "failover_instance" { 44 | description = "Self link to the failover instance" 45 | value = module.mysql.failover_instance 46 | } 47 | 48 | output "failover_instance_name" { 49 | description = "The name of the failover database instance" 50 | value = module.mysql.failover_instance_name 51 | } 52 | 53 | output "failover_public_ip" { 54 | description = "The public IPv4 address of the failover instance" 55 | value = module.mysql.failover_public_ip_address 56 | } 57 | 58 | output "failover_proxy_connection" { 59 | description = "Failover instance path for connecting with Cloud SQL Proxy. Read more at https://cloud.google.com/sql/docs/mysql/sql-proxy" 60 | value = module.mysql.failover_proxy_connection 61 | } 62 | 63 | # ------------------------------------------------------------------------------ 64 | # READ REPLICA OUTPUTS 65 | # ------------------------------------------------------------------------------ 66 | 67 | output "read_replica_instance_names" { 68 | description = "List of names for the read replica instances" 69 | value = module.mysql.read_replica_instance_names 70 | } 71 | 72 | output "read_replica_public_ips" { 73 | description = "List of public IPv4 addresses of the read replica instances" 74 | value = module.mysql.read_replica_public_ip_addresses 75 | } 76 | 77 | output "read_replica_instances" { 78 | description = "List of self links to the read replica instances" 79 | value = module.mysql.read_replica_instances 80 | } 81 | 82 | output "read_replica_proxy_connections" { 83 | description = "List of read replica instance paths for connecting with Cloud SQL Proxy. Read more at https://cloud.google.com/sql/docs/mysql/sql-proxy" 84 | value = module.mysql.read_replica_proxy_connections 85 | } 86 | 87 | # Although we don't use the values, this output highlights the JSON encoded output we use in certain 88 | # cases where the resource output cannot properly be computed. 89 | # See https://github.com/hashicorp/terraform/issues/17048 90 | output "read_replica_server_ca_certs" { 91 | description = "JSON encoded list of CA Certificates used to connect to the read replica instances via SSL" 92 | value = module.mysql.read_replica_server_ca_certs 93 | } 94 | -------------------------------------------------------------------------------- /examples/mysql-replicas/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRED PARAMETERS 3 | # These variables are expected to be passed in by the operator 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "project" { 7 | description = "The project ID to host the database in." 8 | type = string 9 | } 10 | 11 | variable "region" { 12 | description = "The region to host the database in (e.g. 'us-central1')." 13 | type = string 14 | } 15 | 16 | variable "master_zone" { 17 | description = "The preferred zone for the master instance (e.g. 'us-central1-a'). Must be different than 'failover_replica_zone'." 18 | type = string 19 | } 20 | 21 | variable "failover_replica_zone" { 22 | description = "The preferred zone for the failover instance (e.g. 'us-central1-b'). Must be different than 'master_zone'." 23 | type = string 24 | } 25 | 26 | variable "num_read_replicas" { 27 | description = "The number of read replicas to create. Cloud SQL will replicate all data from the master to these replicas, which you can use to horizontally scale read traffic." 28 | type = number 29 | } 30 | 31 | variable "read_replica_zones" { 32 | description = "A list of compute zones where read replicas should be created. List size should match 'num_read_replicas'" 33 | type = list(string) 34 | 35 | # Example: 36 | # default = ["us-central1-b", "us-central1-c"] 37 | } 38 | 39 | # Note, after a name db instance is used, it cannot be reused for up to one week. 40 | variable "name_prefix" { 41 | description = "The name prefix for the database instance. Will be appended with a random string. Use lowercase letters, numbers, and hyphens. Start with a letter." 42 | type = string 43 | } 44 | 45 | variable "master_user_name" { 46 | description = "The username part for the default user credentials, i.e. 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. This should typically be set as the environment variable TF_VAR_master_user_name so you don't check it into source control." 47 | type = string 48 | } 49 | 50 | variable "master_user_password" { 51 | description = "The password part for the default user credentials, i.e. 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. This should typically be set as the environment variable TF_VAR_master_user_password so you don't check it into source control." 52 | type = string 53 | } 54 | 55 | # --------------------------------------------------------------------------------------------------------------------- 56 | # OPTIONAL PARAMETERS 57 | # Generally, these values won't need to be changed. 58 | # --------------------------------------------------------------------------------------------------------------------- 59 | 60 | variable "mysql_version" { 61 | description = "The engine version of the database, e.g. `MYSQL_5_6` or `MYSQL_5_7`. See https://cloud.google.com/sql/docs/features for supported versions." 62 | type = string 63 | default = "MYSQL_5_7" 64 | } 65 | 66 | variable "machine_type" { 67 | description = "The machine type to use, see https://cloud.google.com/sql/pricing for more details" 68 | type = string 69 | default = "db-f1-micro" 70 | } 71 | 72 | variable "db_name" { 73 | description = "Name for the db" 74 | type = string 75 | default = "default" 76 | } 77 | 78 | variable "name_override" { 79 | description = "You may optionally override the name_prefix + random string by specifying an override" 80 | type = string 81 | default = null 82 | } 83 | -------------------------------------------------------------------------------- /examples/postgres-private-ip/README.md: -------------------------------------------------------------------------------- 1 | # PostgreSQL Cloud SQL Private IP Example 2 | 3 | 6 | 7 | This folder contains an example of how to use the [Cloud SQL module](https://github.com/gruntwork-io/terraform-google-sql/tree/master/modules/cloud-sql) to create a [Google Cloud SQL](https://cloud.google.com/sql/) 8 | [PostgreSQL](https://cloud.google.com/sql/docs/postgres/) database instance with a [private IP address](https://cloud.google.com/sql/docs/postgres/private-ip). 9 | 10 | ## How do you run this example? 11 | 12 | To run this example, you need to: 13 | 14 | 1. Install [Terraform](https://www.terraform.io/). 15 | 1. Open up `variables.tf` and set secrets at the top of the file as environment variables and fill in any other variables in 16 | the file that don't have defaults. 17 | 1. `terraform init`. 18 | 1. `terraform plan`. 19 | 1. If the plan looks good, run `terraform apply`. 20 | 21 | When the templates are applied, Terraform will output the IP address of the instance 22 | and the instance path for [connecting using the Cloud SQL Proxy](https://cloud.google.com/sql/docs/postgres/sql-proxy). 23 | 24 | Note that you cannot connect to the private IP instance from outside Google Cloud Platform. 25 | If you want to experiment with connecting from your own workstation, see the [public IP example](https://github.com/gruntwork-io/terraform-google-sql/tree/master/examples/postgres-public-ip) 26 | -------------------------------------------------------------------------------- /examples/postgres-private-ip/main.tf: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # LAUNCH A POSTGRES CLOUD SQL PRIVATE IP INSTANCE 3 | # ------------------------------------------------------------------------------ 4 | 5 | # ------------------------------------------------------------------------------ 6 | # CONFIGURE OUR GCP CONNECTION 7 | # ------------------------------------------------------------------------------ 8 | 9 | provider "google-beta" { 10 | project = var.project 11 | region = var.region 12 | } 13 | 14 | terraform { 15 | # This module is now only being tested with Terraform 1.0.x. However, to make upgrading easier, we are setting 16 | # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it 17 | # forwards compatible with 1.0.x code. 18 | required_version = ">= 0.12.26" 19 | 20 | required_providers { 21 | google-beta = { 22 | source = "hashicorp/google-beta" 23 | version = "~> 3.57.0" 24 | } 25 | } 26 | } 27 | 28 | # ------------------------------------------------------------------------------ 29 | # CREATE A RANDOM SUFFIX AND PREPARE RESOURCE NAMES 30 | # ------------------------------------------------------------------------------ 31 | 32 | resource "random_id" "name" { 33 | byte_length = 2 34 | } 35 | 36 | locals { 37 | # If name_override is specified, use that - otherwise use the name_prefix with a random string 38 | instance_name = var.name_override == null ? format("%s-%s", var.name_prefix, random_id.name.hex) : var.name_override 39 | private_network_name = "private-network-${random_id.name.hex}" 40 | private_ip_name = "private-ip-${random_id.name.hex}" 41 | } 42 | 43 | # ------------------------------------------------------------------------------ 44 | # CREATE COMPUTE NETWORKS 45 | # ------------------------------------------------------------------------------ 46 | 47 | # Simple network, auto-creates subnetworks 48 | resource "google_compute_network" "private_network" { 49 | provider = google-beta 50 | name = local.private_network_name 51 | } 52 | 53 | # Reserve global internal address range for the peering 54 | resource "google_compute_global_address" "private_ip_address" { 55 | provider = google-beta 56 | name = local.private_ip_name 57 | purpose = "VPC_PEERING" 58 | address_type = "INTERNAL" 59 | prefix_length = 16 60 | network = google_compute_network.private_network.self_link 61 | } 62 | 63 | # Establish VPC network peering connection using the reserved address range 64 | resource "google_service_networking_connection" "private_vpc_connection" { 65 | provider = google-beta 66 | network = google_compute_network.private_network.self_link 67 | service = "servicenetworking.googleapis.com" 68 | reserved_peering_ranges = [google_compute_global_address.private_ip_address.name] 69 | } 70 | 71 | # ------------------------------------------------------------------------------ 72 | # CREATE DATABASE INSTANCE WITH PRIVATE IP 73 | # ------------------------------------------------------------------------------ 74 | 75 | module "postgres" { 76 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 77 | # to a specific version of the modules, such as the following example: 78 | # source = "github.com/gruntwork-io/terraform-google-sql.git//modules/cloud-sql?ref=v0.2.0" 79 | source = "../../modules/cloud-sql" 80 | 81 | project = var.project 82 | region = var.region 83 | name = local.instance_name 84 | db_name = var.db_name 85 | 86 | engine = var.postgres_version 87 | machine_type = var.machine_type 88 | 89 | # To make it easier to test this example, we are disabling deletion protection so we can destroy the databases 90 | # during the tests. By default, we recommend setting deletion_protection to true, to ensure database instances are 91 | # not inadvertently destroyed. 92 | deletion_protection = false 93 | 94 | # These together will construct the master_user privileges, i.e. 95 | # 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. 96 | # These should typically be set as the environment variable TF_VAR_master_user_password, etc. 97 | # so you don't check these into source control." 98 | master_user_password = var.master_user_password 99 | 100 | master_user_name = var.master_user_name 101 | master_user_host = "%" 102 | 103 | # Pass the private network link to the module 104 | private_network = google_compute_network.private_network.self_link 105 | 106 | # Wait for the vpc connection to complete 107 | dependencies = [google_service_networking_connection.private_vpc_connection.network] 108 | 109 | custom_labels = { 110 | test-id = "postgres-private-ip-example" 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /examples/postgres-private-ip/outputs.tf: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # MASTER OUTPUTS 3 | # ------------------------------------------------------------------------------ 4 | 5 | output "master_instance_name" { 6 | description = "The name of the database instance" 7 | value = module.postgres.master_instance_name 8 | } 9 | 10 | output "master_ip_addresses" { 11 | description = "All IP addresses of the instance as list of maps, see https://www.terraform.io/docs/providers/google/r/sql_database_instance.html#ip_address-0-ip_address" 12 | value = module.postgres.master_ip_addresses 13 | } 14 | 15 | output "master_private_ip" { 16 | description = "The private IPv4 address of the master instance" 17 | value = module.postgres.master_private_ip_address 18 | } 19 | 20 | output "master_instance" { 21 | description = "Self link to the master instance" 22 | value = module.postgres.master_instance 23 | } 24 | 25 | output "master_proxy_connection" { 26 | description = "Instance path for connecting with Cloud SQL Proxy. Read more at https://cloud.google.com/sql/docs/mysql/sql-proxy" 27 | value = module.postgres.master_proxy_connection 28 | } 29 | 30 | # ------------------------------------------------------------------------------ 31 | # DB OUTPUTS 32 | # ------------------------------------------------------------------------------ 33 | 34 | output "db_name" { 35 | description = "Name of the default database" 36 | value = module.postgres.db_name 37 | } 38 | 39 | output "db" { 40 | description = "Self link to the default database" 41 | value = module.postgres.db 42 | } 43 | -------------------------------------------------------------------------------- /examples/postgres-private-ip/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRED PARAMETERS 3 | # These variables are expected to be passed in by the operator 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "project" { 7 | description = "The project ID to host the database in." 8 | type = string 9 | } 10 | 11 | variable "region" { 12 | description = "The region to host the database in." 13 | type = string 14 | } 15 | 16 | # Note, after a name db instance is used, it cannot be reused for up to one week. 17 | variable "name_prefix" { 18 | description = "The name prefix for the database instance. Will be appended with a random string. Use lowercase letters, numbers, and hyphens. Start with a letter." 19 | type = string 20 | } 21 | 22 | variable "master_user_name" { 23 | description = "The username part for the default user credentials, i.e. 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. This should typically be set as the environment variable TF_VAR_master_user_name so you don't check it into source control." 24 | type = string 25 | } 26 | 27 | variable "master_user_password" { 28 | description = "The password part for the default user credentials, i.e. 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. This should typically be set as the environment variable TF_VAR_master_user_password so you don't check it into source control." 29 | type = string 30 | } 31 | 32 | # --------------------------------------------------------------------------------------------------------------------- 33 | # OPTIONAL PARAMETERS 34 | # Generally, these values won't need to be changed. 35 | # --------------------------------------------------------------------------------------------------------------------- 36 | 37 | variable "postgres_version" { 38 | description = "The engine version of the database, e.g. `POSTGRES_9_6`. See https://cloud.google.com/sql/docs/db-versions for supported versions." 39 | type = string 40 | default = "POSTGRES_9_6" 41 | } 42 | 43 | variable "machine_type" { 44 | description = "The machine type to use, see https://cloud.google.com/sql/pricing for more details" 45 | type = string 46 | default = "db-f1-micro" 47 | } 48 | 49 | variable "db_name" { 50 | description = "Name for the db" 51 | type = string 52 | default = "default" 53 | } 54 | 55 | variable "name_override" { 56 | description = "You may optionally override the name_prefix + random string by specifying an override" 57 | type = string 58 | default = null 59 | } 60 | -------------------------------------------------------------------------------- /examples/postgres-public-ip/README.md: -------------------------------------------------------------------------------- 1 | # PostgreSQL Cloud SQL Public IP Example 2 | 3 | 6 | 7 | This folder contains an example of how to use the [Cloud SQL module](https://github.com/gruntwork-io/terraform-google-sql/tree/master/modules/cloud-sql) to create a [Google Cloud SQL](https://cloud.google.com/sql/) 8 | [PostgreSQL](https://cloud.google.com/sql/docs/postgres/) database instance with a [public IP address](https://cloud.google.com/sql/docs/postgres/connect-external-app#appaccessIP). 9 | 10 | ## How do you run this example? 11 | 12 | To run this example, you need to: 13 | 14 | 1. Install [Terraform](https://www.terraform.io/). 15 | 1. Open up `variables.tf` and set secrets at the top of the file as environment variables and fill in any other variables in 16 | the file that don't have defaults. 17 | 1. `terraform init`. 18 | 1. `terraform plan`. 19 | 1. If the plan looks good, run `terraform apply`. 20 | 21 | When the templates are applied, Terraform will output the IP address of the instance and the instance path for [connecting using the Cloud SQL Proxy](https://cloud.google.com/sql/docs/mysql/connect-admin-proxy). -------------------------------------------------------------------------------- /examples/postgres-public-ip/main.tf: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # LAUNCH A POSTGRESQL CLOUD SQL PUBLIC IP INSTANCE 3 | # ------------------------------------------------------------------------------ 4 | 5 | # ------------------------------------------------------------------------------ 6 | # CONFIGURE OUR GCP CONNECTION 7 | # ------------------------------------------------------------------------------ 8 | 9 | provider "google-beta" { 10 | project = var.project 11 | region = var.region 12 | } 13 | 14 | terraform { 15 | # This module is now only being tested with Terraform 1.0.x. However, to make upgrading easier, we are setting 16 | # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it 17 | # forwards compatible with 1.0.x code. 18 | required_version = ">= 0.12.26" 19 | 20 | required_providers { 21 | google-beta = { 22 | source = "hashicorp/google-beta" 23 | version = "~> 3.57.0" 24 | } 25 | } 26 | } 27 | 28 | # ------------------------------------------------------------------------------ 29 | # CREATE A RANDOM SUFFIX AND PREPARE RESOURCE NAMES 30 | # ------------------------------------------------------------------------------ 31 | 32 | resource "random_id" "name" { 33 | byte_length = 2 34 | } 35 | 36 | locals { 37 | # If name_override is specified, use that - otherwise use the name_prefix with a random string 38 | instance_name = var.name_override == null ? format("%s-%s", var.name_prefix, random_id.name.hex) : var.name_override 39 | } 40 | 41 | # ------------------------------------------------------------------------------ 42 | # CREATE DATABASE INSTANCE WITH PUBLIC IP 43 | # ------------------------------------------------------------------------------ 44 | 45 | module "postgres" { 46 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 47 | # to a specific version of the modules, such as the following example: 48 | # source = "github.com/gruntwork-io/terraform-google-sql.git//modules/cloud-sql?ref=v0.2.0" 49 | source = "../../modules/cloud-sql" 50 | 51 | project = var.project 52 | region = var.region 53 | name = local.instance_name 54 | db_name = var.db_name 55 | 56 | engine = var.postgres_version 57 | machine_type = var.machine_type 58 | 59 | # These together will construct the master_user privileges, i.e. 60 | # 'master_user_name' IDENTIFIED BY 'master_user_password'. 61 | # These should typically be set as the environment variable TF_VAR_master_user_password, etc. 62 | # so you don't check these into source control." 63 | master_user_password = var.master_user_password 64 | master_user_name = var.master_user_name 65 | 66 | # To make it easier to test this example, we are giving the instances public IP addresses and allowing inbound 67 | # connections from anywhere. We also disable deletion protection so we can destroy the databases during the tests. 68 | # In real-world usage, your instances should live in private subnets, only have private IP addresses, and only allow 69 | # access from specific trusted networks, servers or applications in your VPC. By default, we recommend setting 70 | # deletion_protection to true, to ensure database instances are not inadvertently destroyed. 71 | enable_public_internet_access = true 72 | deletion_protection = false 73 | 74 | # Default setting for this is 'false' in 'variables.tf' 75 | # In the test cases, we're setting this to true, to test forced SSL. 76 | require_ssl = var.require_ssl 77 | 78 | authorized_networks = [ 79 | { 80 | name = "allow-all-inbound" 81 | value = "0.0.0.0/0" 82 | }, 83 | ] 84 | 85 | # Set test flags 86 | # Cloud SQL will complain if they're not applicable to the engine 87 | database_flags = [ 88 | { 89 | name = "autovacuum_naptime" 90 | value = "2" 91 | }, 92 | ] 93 | 94 | custom_labels = { 95 | test-id = "postgres-public-ip-example" 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /examples/postgres-public-ip/outputs.tf: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # MASTER OUTPUTS 3 | # ------------------------------------------------------------------------------ 4 | 5 | output "master_instance_name" { 6 | description = "The name of the database instance" 7 | value = module.postgres.master_instance_name 8 | } 9 | 10 | output "master_public_ip" { 11 | description = "The public IPv4 address of the master instance" 12 | value = module.postgres.master_public_ip_address 13 | } 14 | 15 | output "master_ca_cert" { 16 | description = "The CA Certificate used to connect to the SQL Instance via SSL" 17 | value = module.postgres.master_ca_cert 18 | } 19 | 20 | output "master_instance" { 21 | description = "Self link to the master instance" 22 | value = module.postgres.master_instance 23 | } 24 | 25 | output "master_proxy_connection" { 26 | description = "Instance path for connecting with Cloud SQL Proxy. Read more at https://cloud.google.com/sql/docs/mysql/sql-proxy" 27 | value = module.postgres.master_proxy_connection 28 | } 29 | 30 | # ------------------------------------------------------------------------------ 31 | # DB OUTPUTS 32 | # ------------------------------------------------------------------------------ 33 | 34 | output "db_name" { 35 | description = "Name of the default database" 36 | value = module.postgres.db_name 37 | } 38 | 39 | output "db" { 40 | description = "Self link to the default database" 41 | value = module.postgres.db 42 | } 43 | -------------------------------------------------------------------------------- /examples/postgres-public-ip/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRED PARAMETERS 3 | # These variables are expected to be passed in by the operator 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "project" { 7 | description = "The project ID to host the database in." 8 | type = string 9 | } 10 | 11 | variable "region" { 12 | description = "The region to host the database in." 13 | type = string 14 | } 15 | 16 | # Note, after a name db instance is used, it cannot be reused for up to one week. 17 | variable "name_prefix" { 18 | description = "The name prefix for the database instance. Will be appended with a random string. Use lowercase letters, numbers, and hyphens. Start with a letter." 19 | type = string 20 | } 21 | 22 | variable "master_user_name" { 23 | description = "The username part for the default user credentials, i.e. 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. This should typically be set as the environment variable TF_VAR_master_user_name so you don't check it into source control." 24 | type = string 25 | } 26 | 27 | variable "master_user_password" { 28 | description = "The password part for the default user credentials, i.e. 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. This should typically be set as the environment variable TF_VAR_master_user_password so you don't check it into source control." 29 | type = string 30 | } 31 | 32 | # --------------------------------------------------------------------------------------------------------------------- 33 | # OPTIONAL PARAMETERS 34 | # Generally, these values won't need to be changed. 35 | # --------------------------------------------------------------------------------------------------------------------- 36 | 37 | variable "postgres_version" { 38 | description = "The engine version of the database, e.g. `POSTGRES_9_6`. See https://cloud.google.com/sql/docs/features for supported versions." 39 | type = string 40 | default = "POSTGRES_9_6" 41 | } 42 | 43 | variable "machine_type" { 44 | description = "The machine type to use, see https://cloud.google.com/sql/pricing for more details" 45 | type = string 46 | default = "db-f1-micro" 47 | } 48 | 49 | variable "db_name" { 50 | description = "Name for the db" 51 | type = string 52 | default = "default" 53 | } 54 | 55 | variable "name_override" { 56 | description = "You may optionally override the name_prefix + random string by specifying an override" 57 | type = string 58 | default = null 59 | } 60 | 61 | # When configuring a public IP instance, you should only allow secure connections 62 | # For testing purposes, we're initially allowing unsecured connections. 63 | variable "require_ssl" { 64 | description = "True if the instance should require SSL/TLS for users connecting over IP. Note: SSL/TLS is needed to provide security when you connect to Cloud SQL using IP addresses. If you are connecting to your instance only by using the Cloud SQL Proxy or the Java Socket Library, you do not need to configure your instance to use SSL/TLS." 65 | type = bool 66 | default = false 67 | } 68 | -------------------------------------------------------------------------------- /examples/postgres-replicas/README.md: -------------------------------------------------------------------------------- 1 | # PostgreSQL Cloud SQL HA Example 2 | 3 | 6 | 7 | This folder contains an example of how to use the [Cloud SQL module](https://github.com/gruntwork-io/terraform-google-sql/tree/master/modules/cloud-sql) to create a [High Availability](https://cloud.google.com/sql/docs/postgres/high-availability) [Google Cloud SQL](https://cloud.google.com/sql/) 8 | [PostgreSQL](https://cloud.google.com/sql/docs/postgres/) database cluster with a [public IP address](https://cloud.google.com/sql/docs/postgres/connect-external-app#appaccessIP) and a [read replica](https://cloud.google.com/sql/docs/postgres/replication/). 9 | 10 | ## How do you run this example? 11 | 12 | To run this example, you need to: 13 | 14 | 1. Install [Terraform](https://www.terraform.io/). 15 | 1. Open up `variables.tf` and set secrets at the top of the file as environment variables and fill in any other variables in 16 | the file that don't have defaults. 17 | 1. `terraform init`. 18 | 1. `terraform plan`. 19 | 1. If the plan looks good, run `terraform apply`. 20 | 21 | When the templates are applied, Terraform will output the IP address of the instance 22 | and the instance path for [connecting using the Cloud SQL Proxy](https://cloud.google.com/sql/docs/mysql/connect-admin-proxy). 23 | -------------------------------------------------------------------------------- /examples/postgres-replicas/main.tf: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # LAUNCH A POSTGRES CLUSTER WITH HA AND READ REPLICAS 3 | # ------------------------------------------------------------------------------ 4 | 5 | # ------------------------------------------------------------------------------ 6 | # CONFIGURE OUR GCP CONNECTION 7 | # ------------------------------------------------------------------------------ 8 | 9 | provider "google-beta" { 10 | project = var.project 11 | region = var.region 12 | } 13 | 14 | terraform { 15 | # This module is now only being tested with Terraform 1.0.x. However, to make upgrading easier, we are setting 16 | # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it 17 | # forwards compatible with 1.0.x code. 18 | required_version = ">= 0.12.26" 19 | 20 | required_providers { 21 | google-beta = { 22 | source = "hashicorp/google-beta" 23 | version = "~> 3.57.0" 24 | } 25 | } 26 | } 27 | 28 | # ------------------------------------------------------------------------------ 29 | # CREATE A RANDOM SUFFIX AND PREPARE RESOURCE NAMES 30 | # ------------------------------------------------------------------------------ 31 | 32 | resource "random_id" "name" { 33 | byte_length = 2 34 | } 35 | 36 | locals { 37 | # If name_override is specified, use that - otherwise use the name_prefix with a random string 38 | instance_name = var.name_override == null ? format("%s-%s", var.name_prefix, random_id.name.hex) : var.name_override 39 | private_network_name = "private-network-${random_id.name.hex}" 40 | private_ip_name = "private-ip-${random_id.name.hex}" 41 | } 42 | 43 | # ------------------------------------------------------------------------------ 44 | # CREATE DATABASE CLUSTER WITH PUBLIC IP 45 | # ------------------------------------------------------------------------------ 46 | 47 | module "postgres" { 48 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 49 | # to a specific version of the modules, such as the following example: 50 | # source = "github.com/gruntwork-io/terraform-google-sql.git//modules/cloud-sql?ref=v0.2.0" 51 | source = "../../modules/cloud-sql" 52 | 53 | project = var.project 54 | region = var.region 55 | name = local.instance_name 56 | db_name = var.db_name 57 | 58 | engine = var.postgres_version 59 | machine_type = var.machine_type 60 | 61 | master_zone = var.master_zone 62 | 63 | # To make it easier to test this example, we are giving the instances public IP addresses and allowing inbound 64 | # connections from anywhere. We also disable deletion protection so we can destroy the databases during the tests. 65 | # In real-world usage, your instances should live in private subnets, only have private IP addresses, and only allow 66 | # access from specific trusted networks, servers or applications in your VPC. By default, we recommend setting 67 | # deletion_protection to true, to ensure database instances are not inadvertently destroyed. 68 | enable_public_internet_access = true 69 | deletion_protection = false 70 | 71 | authorized_networks = [ 72 | { 73 | name = "allow-all-inbound" 74 | value = "0.0.0.0/0" 75 | }, 76 | ] 77 | 78 | # Indicate that we want to create a failover replica 79 | enable_failover_replica = true 80 | 81 | # Indicate we want read replicas to be created 82 | num_read_replicas = var.num_read_replicas 83 | read_replica_zones = var.read_replica_zones 84 | 85 | # These together will construct the master_user privileges, i.e. 86 | # 'master_user_name' IDENTIFIED BY 'master_user_password'. 87 | # These should typically be set as the environment variable TF_VAR_master_user_password, etc. 88 | # so you don't check these into source control." 89 | master_user_password = var.master_user_password 90 | master_user_name = var.master_user_name 91 | 92 | custom_labels = { 93 | test-id = "postgres-replicas-example" 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /examples/postgres-replicas/outputs.tf: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # MASTER OUTPUTS 3 | # ------------------------------------------------------------------------------ 4 | 5 | output "master_instance_name" { 6 | description = "The name of the database instance" 7 | value = module.postgres.master_instance_name 8 | } 9 | 10 | output "master_public_ip" { 11 | description = "The public IPv4 address of the master instance" 12 | value = module.postgres.master_public_ip_address 13 | } 14 | 15 | output "master_instance" { 16 | description = "Self link to the master instance" 17 | value = module.postgres.master_instance 18 | } 19 | 20 | output "master_proxy_connection" { 21 | description = "Instance path for connecting with Cloud SQL Proxy. Read more at https://cloud.google.com/sql/docs/mysql/sql-proxy" 22 | value = module.postgres.master_proxy_connection 23 | } 24 | 25 | # ------------------------------------------------------------------------------ 26 | # DB OUTPUTS 27 | # ------------------------------------------------------------------------------ 28 | 29 | output "db_name" { 30 | description = "Name of the default database" 31 | value = module.postgres.db_name 32 | } 33 | 34 | output "db" { 35 | description = "Self link to the default database" 36 | value = module.postgres.db 37 | } 38 | 39 | # ------------------------------------------------------------------------------ 40 | # READ REPLICA OUTPUTS 41 | # ------------------------------------------------------------------------------ 42 | 43 | output "read_replica_instance_names" { 44 | description = "List of names for the read replica instances" 45 | value = module.postgres.read_replica_instance_names 46 | } 47 | 48 | output "read_replica_public_ips" { 49 | description = "List of public IPv4 addresses of the read replica instances" 50 | value = module.postgres.read_replica_public_ip_addresses 51 | } 52 | 53 | output "read_replica_instances" { 54 | description = "List of self links to the read replica instances" 55 | value = module.postgres.read_replica_instances 56 | } 57 | 58 | output "read_replica_proxy_connections" { 59 | description = "List of read replica instance paths for connecting with Cloud SQL Proxy. Read more at https://cloud.google.com/sql/docs/mysql/sql-proxy" 60 | value = module.postgres.read_replica_proxy_connections 61 | } 62 | 63 | # Although we don't use the values, this output highlights the JSON encoded output we use in certain 64 | # cases where the resource output cannot properly be computed. 65 | # See https://github.com/hashicorp/terraform/issues/17048 66 | output "read_replica_server_ca_certs" { 67 | description = "JSON encoded list of CA Certificates used to connect to the read replica instances via SSL" 68 | value = module.postgres.read_replica_server_ca_certs 69 | } 70 | -------------------------------------------------------------------------------- /examples/postgres-replicas/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRED PARAMETERS 3 | # These variables are expected to be passed in by the operator 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "project" { 7 | description = "The project ID to host the database in." 8 | type = string 9 | } 10 | 11 | variable "region" { 12 | description = "The region to host the database in (e.g. 'us-central1')." 13 | type = string 14 | } 15 | 16 | variable "master_zone" { 17 | description = "The preferred zone for the master instance (e.g. 'us-central1-a'). Must be different than 'failover_replica_zone'." 18 | type = string 19 | } 20 | 21 | variable "failover_replica_zone" { 22 | description = "The preferred zone for the failover instance (e.g. 'us-central1-b'). Must be different than 'master_zone'." 23 | type = string 24 | } 25 | 26 | variable "num_read_replicas" { 27 | description = "The number of read replicas to create. Cloud SQL will replicate all data from the master to these replicas, which you can use to horizontally scale read traffic." 28 | type = number 29 | } 30 | 31 | variable "read_replica_zones" { 32 | description = "A list of compute zones where read replicas should be created. List size should match 'num_read_replicas'" 33 | type = list(string) 34 | 35 | # Example: 36 | # default = ["us-central1-b", "us-central1-c"] 37 | } 38 | 39 | # Note, after a name db instance is used, it cannot be reused for up to one week. 40 | variable "name_prefix" { 41 | description = "The name prefix for the database instance. Will be appended with a random string. Use lowercase letters, numbers, and hyphens. Start with a letter." 42 | type = string 43 | } 44 | 45 | variable "master_user_name" { 46 | description = "The username part for the default user credentials, i.e. 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. This should typically be set as the environment variable TF_VAR_master_user_name so you don't check it into source control." 47 | type = string 48 | } 49 | 50 | variable "master_user_password" { 51 | description = "The password part for the default user credentials, i.e. 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. This should typically be set as the environment variable TF_VAR_master_user_password so you don't check it into source control." 52 | type = string 53 | } 54 | 55 | # --------------------------------------------------------------------------------------------------------------------- 56 | # OPTIONAL PARAMETERS 57 | # Generally, these values won't need to be changed. 58 | # --------------------------------------------------------------------------------------------------------------------- 59 | 60 | variable "postgres_version" { 61 | description = "The engine version of the database, e.g. `POSTGRES_9_6`. See https://cloud.google.com/sql/docs/features for supported versions." 62 | type = string 63 | default = "POSTGRES_9_6" 64 | } 65 | 66 | variable "machine_type" { 67 | description = "The machine type to use, see https://cloud.google.com/sql/pricing for more details" 68 | type = string 69 | default = "db-f1-micro" 70 | } 71 | 72 | variable "db_name" { 73 | description = "Name for the db" 74 | type = string 75 | default = "default" 76 | } 77 | 78 | variable "name_override" { 79 | description = "You may optionally override the name_prefix + random string by specifying an override" 80 | type = string 81 | default = null 82 | } 83 | -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # LAUNCH A POSTGRES CLOUD SQL PRIVATE IP INSTANCE 3 | # ------------------------------------------------------------------------------ 4 | 5 | # ------------------------------------------------------------------------------ 6 | # CONFIGURE OUR GCP CONNECTION 7 | # ------------------------------------------------------------------------------ 8 | 9 | provider "google-beta" { 10 | project = var.project 11 | region = var.region 12 | } 13 | 14 | terraform { 15 | # This module is now only being tested with Terraform 1.0.x. However, to make upgrading easier, we are setting 16 | # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it 17 | # forwards compatible with 1.0.x code. 18 | required_version = ">= 0.12.26" 19 | 20 | required_providers { 21 | google-beta = { 22 | source = "hashicorp/google-beta" 23 | version = "~> 3.57.0" 24 | } 25 | } 26 | } 27 | 28 | # ------------------------------------------------------------------------------ 29 | # CREATE A RANDOM SUFFIX AND PREPARE RESOURCE NAMES 30 | # ------------------------------------------------------------------------------ 31 | 32 | resource "random_id" "name" { 33 | byte_length = 2 34 | } 35 | 36 | locals { 37 | # If name_override is specified, use that - otherwise use the name_prefix with a random string 38 | instance_name = length(var.name_override) == 0 ? format("%s-%s", var.name_prefix, random_id.name.hex) : var.name_override 39 | private_network_name = "private-network-${random_id.name.hex}" 40 | private_ip_name = "private-ip-${random_id.name.hex}" 41 | } 42 | 43 | # ------------------------------------------------------------------------------ 44 | # CREATE COMPUTE NETWORKS 45 | # ------------------------------------------------------------------------------ 46 | 47 | # Simple network, auto-creates subnetworks 48 | resource "google_compute_network" "private_network" { 49 | provider = google-beta 50 | name = local.private_network_name 51 | } 52 | 53 | # Reserve global internal address range for the peering 54 | resource "google_compute_global_address" "private_ip_address" { 55 | provider = google-beta 56 | name = local.private_ip_name 57 | purpose = "VPC_PEERING" 58 | address_type = "INTERNAL" 59 | prefix_length = 16 60 | network = google_compute_network.private_network.self_link 61 | } 62 | 63 | # Establish VPC network peering connection using the reserved address range 64 | resource "google_service_networking_connection" "private_vpc_connection" { 65 | provider = google-beta 66 | network = google_compute_network.private_network.self_link 67 | service = "servicenetworking.googleapis.com" 68 | reserved_peering_ranges = [google_compute_global_address.private_ip_address.name] 69 | } 70 | 71 | # ------------------------------------------------------------------------------ 72 | # CREATE DATABASE INSTANCE WITH PRIVATE IP 73 | # ------------------------------------------------------------------------------ 74 | 75 | module "postgres" { 76 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 77 | # to a specific version of the modules, such as the following example: 78 | # source = "github.com/gruntwork-io/terraform-google-sql.git//modules/cloud-sql?ref=v0.2.0" 79 | source = "./modules/cloud-sql" 80 | 81 | project = var.project 82 | region = var.region 83 | name = local.instance_name 84 | db_name = var.db_name 85 | 86 | engine = var.postgres_version 87 | machine_type = var.machine_type 88 | 89 | # These together will construct the master_user privileges, i.e. 90 | # 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. 91 | # These should typically be set as the environment variable TF_VAR_master_user_password, etc. 92 | # so you don't check these into source control." 93 | master_user_password = var.master_user_password 94 | 95 | master_user_name = var.master_user_name 96 | master_user_host = "%" 97 | 98 | # Pass the private network link to the module 99 | private_network = google_compute_network.private_network.self_link 100 | 101 | # Wait for the vpc connection to complete 102 | dependencies = [google_service_networking_connection.private_vpc_connection.network] 103 | 104 | custom_labels = { 105 | test-id = "postgres-private-ip-example" 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /modules/cloud-sql/README-MySQL.md: -------------------------------------------------------------------------------- 1 | 12 | # MySQL 13 | [![Maintained by Gruntwork.io](https://img.shields.io/badge/maintained%20by-gruntwork.io-%235849a6.svg)](https://gruntwork.io/?ref=repo_google_cloudsql) 14 | [![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/gruntwork-io/terraform-google-sql.svg?label=latest)](http://github.com/gruntwork-io/terraform-google-sql/releases/latest) 15 | ![Terraform Version](https://img.shields.io/badge/tf-%3E%3D1.0.x-blue.svg) 16 | 17 | This module deploys MySQL on top of Google's Cloud SQL Service. The cluster is managed by GCP and automatically handles 18 | standby failover, read replicas, backups, patching, and encryption. 19 | 20 | [README.md](./README.md) 21 | -------------------------------------------------------------------------------- /modules/cloud-sql/README-PostgreSQL.md: -------------------------------------------------------------------------------- 1 | 12 | # PostgreSQL 13 | [![Maintained by Gruntwork.io](https://img.shields.io/badge/maintained%20by-gruntwork.io-%235849a6.svg)](https://gruntwork.io/?ref=repo_google_cloudsql) 14 | [![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/gruntwork-io/terraform-google-sql.svg?label=latest)](http://github.com/gruntwork-io/terraform-google-sql/releases/latest) 15 | ![Terraform Version](https://img.shields.io/badge/tf-%3E%3D1.0.x-blue.svg) 16 | 17 | This module deploys PostgreSQL on top of Google's Cloud SQL Service. The cluster is managed by GCP and automatically handles 18 | standby failover, read replicas, backups, patching, and encryption. 19 | 20 | [README.md](./README.md) 21 | -------------------------------------------------------------------------------- /modules/cloud-sql/README.md: -------------------------------------------------------------------------------- 1 | # Cloud SQL Module 2 | 3 | [![Maintained by Gruntwork.io](https://img.shields.io/badge/maintained%20by-gruntwork.io-%235849a6.svg)](https://gruntwork.io/?ref=repo_google_cloudsql) 4 | [![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/gruntwork-io/terraform-google-sql.svg?label=latest)](http://github.com/gruntwork-io/terraform-google-sql/releases/latest) 5 | ![Terraform Version](https://img.shields.io/badge/tf-%3E%3D1.0.x-blue.svg) 6 | 7 | 8 | 11 | 12 | This module creates a [Google Cloud SQL](https://cloud.google.com/sql/) cluster. 13 | The cluster is managed by Google, automating backups, replication, patches, and updates. 14 | 15 | This module helps you run [MySQL](https://cloud.google.com/sql/docs/mysql/) and [PostgreSQL](https://cloud.google.com/sql/docs/postgres/) databases in [Google Cloud](https://cloud.google.com/). 16 | 17 | ## Cloud SQL Architecture 18 | 19 | ![Cloud SQL Architecture](https://github.com/gruntwork-io/terraform-google-sql/blob/master/_docs/cloud-sql.png "Cloud SQL Architecture") 20 | 21 | ## Features 22 | 23 | - Deploy a fully-managed relational database 24 | - Supports MySQL and PostgreSQL 25 | - Optional failover instances 26 | - Optional read replicas 27 | 28 | ## Learn 29 | 30 | This repo is a part of [the Gruntwork Infrastructure as Code Library](https://gruntwork.io/infrastructure-as-code-library/), a collection of reusable, battle-tested, production ready infrastructure code. If you’ve never used the Infrastructure as Code Library before, make sure to read [How to use the Gruntwork Infrastructure as Code Library](https://gruntwork.io/guides/foundations/how-to-use-gruntwork-infrastructure-as-code-library/)! 31 | 32 | ### Core concepts 33 | 34 | - [What is Cloud SQL](https://github.com/gruntwork-io/terraform-google-sql/blob/master/modules/cloud-sql/core-concepts.md#what-is-cloud-sql) 35 | - [Cloud SQL documentation](https://cloud.google.com/sql/docs/) 36 | - **[Designing Data Intensive Applications](https://dataintensive.net/)**: the best book we’ve found for understanding data systems, including relational databases, NoSQL, replication, sharding, consistency, and so on. 37 | 38 | ### Repo organisation 39 | 40 | This repo has the following folder structure: 41 | 42 | - [root](https://github.com/gruntwork-io/terraform-google-sql/tree/master): The root folder contains an example of how 43 | to deploy a private PostgreSQL instance in Cloud SQL. See [postgres-private-ip](https://github.com/gruntwork-io/terraform-google-sql/blob/master/examples/postgres-private-ip) 44 | for the documentation. 45 | 46 | - [modules](https://github.com/gruntwork-io/terraform-google-sql/tree/master/modules): This folder contains the 47 | main implementation code for this Module, broken down into multiple standalone submodules. 48 | 49 | The primary module is: 50 | 51 | - [cloud-sql](https://github.com/gruntwork-io/terraform-google-sql/tree/master/modules/cloud-sql): Deploy a Cloud SQL [MySQL](https://cloud.google.com/sql/docs/mysql/) or [PostgreSQL](https://cloud.google.com/sql/docs/postgres/) database. 52 | 53 | - [examples](https://github.com/gruntwork-io/terraform-google-sql/tree/master/examples): This folder contains 54 | examples of how to use the submodules. 55 | 56 | - [test](https://github.com/gruntwork-io/terraform-google-sql/tree/master/test): Automated tests for the submodules 57 | and examples. 58 | 59 | ## Deploy 60 | 61 | ### Non-production deployment (quick start for learning) 62 | 63 | If you just want to try this repo out for experimenting and learning, check out the following resources: 64 | 65 | - [examples folder](https://github.com/gruntwork-io/terraform-google-sql/blob/master/examples): The `examples` folder contains sample code optimized for learning, experimenting, and testing (but not production usage). 66 | 67 | ### Production deployment 68 | 69 | If you want to deploy this repo in production, check out the following resources: 70 | 71 | - [cloud-sql module in the GCP Reference Architecture](https://github.com/gruntwork-io/infrastructure-modules-google/tree/master/data-stores/cloud-sql): 72 | Production-ready sample code from the GCP Reference Architecture. Note that the repository is private and accessible only with 73 | Gruntwork subscription. To get access, [subscribe now](https://www.gruntwork.io/pricing/) or contact us at [support@gruntwork.io](mailto:support@gruntwork.io) for more information. 74 | 75 | ## Manage 76 | 77 | ### Day-to-day operations 78 | 79 | - [How to connect to a Cloud SQL instance](https://github.com/gruntwork-io/terraform-google-sql/tree/master/modules/cloud-sql/core-concepts.md#how-do-you-connect-to-the-database) 80 | - [How to configure high availability](https://github.com/gruntwork-io/terraform-google-sql/tree/master/modules/cloud-sql/core-concepts.md#how-do-you-configure-high-availability) 81 | - [How to secure the database instance](https://github.com/gruntwork-io/terraform-google-sql/tree/master/modules/cloud-sql/core-concepts.md#how-do-you-secure-the-database) 82 | - [How to scale the database](https://github.com/gruntwork-io/terraform-google-sql/tree/master/modules/cloud-sql/core-concepts.md#how-do-you-secure-the-database) 83 | 84 | ## Known Issues 85 | 86 | ### Instance Recovery 87 | 88 | Due to limitations on the current `terraform` provider for Google, it is not possible to restore backups with `terraform`. 89 | 90 | See https://github.com/terraform-providers/terraform-provider-google/issues/2446 91 | 92 | ## Support 93 | 94 | If you need help with this repo or anything else related to infrastructure or DevOps, Gruntwork offers [Commercial Support](https://gruntwork.io/support/) via Slack, email, and phone/video. If you’re already a Gruntwork customer, hop on Slack and ask away! If not, [subscribe now](https://www.gruntwork.io/pricing/). If you’re not sure, feel free to email us at [support@gruntwork.io](mailto:support@gruntwork.io). 95 | 96 | ## Contributions 97 | 98 | Contributions to this repo are very welcome and appreciated! If you find a bug or want to add a new feature or even contribute an entirely new module, we are very happy to accept pull requests, provide feedback, and run your changes through our automated test suite. 99 | 100 | Please see [Contributing to the Gruntwork Infrastructure as Code Library](https://gruntwork.io/guides/foundations/how-to-use-gruntwork-infrastructure-as-code-library/#contributing-to-the-gruntwork-infrastructure-as-code-library) for instructions. 101 | 102 | ## License 103 | 104 | Please see [LICENSE](https://github.com/gruntwork-io/terraform-google-sql/blob/master/LICENSE.txt) for details on how the code in this repo is licensed. 105 | 106 | Copyright © 2019 Gruntwork, Inc. 107 | -------------------------------------------------------------------------------- /modules/cloud-sql/compute_outputs.tf: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # SEPARATE TERRAFORM FILE TO COMPUTE OUTPUT VALUES AND KEEP THE MAIN MODULE CLEAN 3 | # ------------------------------------------------------------------------------ 4 | 5 | # ------------------------------------------------------------------------------ 6 | # PREPARE LOCALS FOR THE OUTPUTS 7 | # ------------------------------------------------------------------------------ 8 | 9 | locals { 10 | # Replica proxy connection info 11 | failover_proxy_connection = join("", data.template_file.failover_proxy_connection.*.rendered) 12 | 13 | # Replica certificate info 14 | failover_certificate = join("", data.template_file.failover_certificate.*.rendered) 15 | failover_certificate_common_name = join("", data.template_file.failover_certificate_common_name.*.rendered) 16 | failover_certificate_create_time = join("", data.template_file.failover_certificate_create_time.*.rendered) 17 | failover_certificate_expiration_time = join("", data.template_file.failover_certificate_expiration_time.*.rendered) 18 | failover_certificate_sha1_fingerprint = join("", data.template_file.failover_certificate_sha1_fingerprint.*.rendered) 19 | } 20 | 21 | # ------------------------------------------------------------------------------ 22 | # FAILOVER REPLICA PROXY CONNECTION TEMPLATE 23 | # ------------------------------------------------------------------------------ 24 | 25 | data "template_file" "failover_proxy_connection" { 26 | count = local.actual_failover_replica_count 27 | template = "${var.project}:${var.region}:${google_sql_database_instance.failover_replica.0.name}" 28 | } 29 | 30 | # ------------------------------------------------------------------------------ 31 | # FAILOVER REPLICA CERTIFICATE TEMPLATES 32 | # 33 | # We have to produce the certificate outputs via template_file. Using splat syntax would yield: 34 | # Resource 'google_sql_database_instance.failover_replica' does not have attribute 'server_ca_cert.0.cert' 35 | # for variable 'google_sql_database_instance.failover_replica.*.server_ca_cert.0.cert' 36 | # ------------------------------------------------------------------------------ 37 | 38 | data "template_file" "failover_certificate" { 39 | count = local.actual_failover_replica_count 40 | template = google_sql_database_instance.failover_replica.0.server_ca_cert.0.cert 41 | } 42 | 43 | data "template_file" "failover_certificate_common_name" { 44 | count = local.actual_failover_replica_count 45 | template = google_sql_database_instance.failover_replica.0.server_ca_cert.0.common_name 46 | } 47 | 48 | data "template_file" "failover_certificate_create_time" { 49 | count = local.actual_failover_replica_count 50 | template = google_sql_database_instance.failover_replica.0.server_ca_cert.0.create_time 51 | } 52 | 53 | data "template_file" "failover_certificate_expiration_time" { 54 | count = local.actual_failover_replica_count 55 | template = google_sql_database_instance.failover_replica.0.server_ca_cert.0.expiration_time 56 | } 57 | 58 | data "template_file" "failover_certificate_sha1_fingerprint" { 59 | count = local.actual_failover_replica_count 60 | template = google_sql_database_instance.failover_replica.0.server_ca_cert.0.sha1_fingerprint 61 | } 62 | 63 | # ------------------------------------------------------------------------------ 64 | # READ REPLICA PROXY CONNECTION TEMPLATE 65 | # ------------------------------------------------------------------------------ 66 | 67 | data "template_file" "read_replica_proxy_connection" { 68 | count = var.num_read_replicas 69 | template = "${var.project}:${var.region}:${google_sql_database_instance.read_replica.*.name[count.index]}" 70 | } 71 | -------------------------------------------------------------------------------- /modules/cloud-sql/core-concepts.md: -------------------------------------------------------------------------------- 1 | # Core Cloud SQL Concepts 2 | 3 | ## What is Cloud SQL? 4 | 5 | Cloud SQL is Google's fully-managed database service that makes it easy to set up, maintain, manage, and administer 6 | your relational databases on Google Cloud Platform. Cloud SQL automatically includes: 7 | 8 | - Data replication between multiple zones with automatic failover. 9 | - Automated and on-demand backups, and point-in-time recovery. 10 | - Data encryption on networks, database tables, temporary files, and backups. 11 | - Secure external connections with the [Cloud SQL Proxy](https://cloud.google.com/sql/docs/mysql/sql-proxy) or with the SSL/TLS protocol. 12 | 13 | You can learn more about Cloud SQL from [the official documentation](https://cloud.google.com/sql/docs/). 14 | 15 | ## How do you connect to the database? 16 | 17 | **Cloud SQL instances are created in a producer network (a VPC network internal to Google). They are not created in your VPC network. See https://cloud.google.com/sql/docs/mysql/private-ip** 18 | 19 | You can use both public IP and private IP to connect to a Cloud SQL instance. 20 | Neither connection method affects the other; you must protect the public IP connection whether the instance is configured to use private IP or not. 21 | 22 | You can also use the [Cloud SQL Proxy for MySQL](https://cloud.google.com/sql/docs/mysql/sql-proxy) and [Cloud SQL Proxy for PostgreSQL](https://cloud.google.com/sql/docs/postgres/sql-proxy) 23 | to connect to an instance that is also configured to use private IP. The proxy can connect using either the private IP address or a public IP address. 24 | 25 | This module provides the connection details as [Terraform output 26 | variables](https://www.terraform.io/intro/getting-started/outputs.html). Use the public / private addresses depending on your configuration: 27 | 28 | 29 | 1. **Master Public IP Address** `master_public_ip_address`: The public IPv4 address of the master instance. 30 | 1. **Master Private IP Address** `master_private_ip_address`: The private IPv4 address of the master instance. 31 | 1. **Master Proxy connection** `master_proxy_connection`: Instance path for connecting with Cloud SQL Proxy; see [Connecting mysql Client Using the Cloud SQL Proxy](https://cloud.google.com/sql/docs/mysql/connect-admin-proxy). 32 | 1. **Read Replica Public IP Addresses** `read_replica_public_ip_addresses`: A list of read replica public IP addresses in the cluster. Use these addresses for reads (see "How do you scale this database?" below). 33 | 1. **Read Replica Private IP Addresses** `read_replica_private_ip_addresses`: A list of read replica private IP addresses in the cluster. Use these addresses for reads (see "How do you scale this database?" below). 34 | 1. **Read Replica Proxy Connections** `read_replica_proxy_connections`: A list of instance paths for connecting with Cloud SQL Proxy; see [Connecting Using the Cloud SQL Proxy](https://cloud.google.com/sql/docs/mysql/connect-admin-proxy). 35 | 36 | 37 | You can programmatically extract these variables in your Terraform templates and pass them to other resources. 38 | You'll also see the variables at the end of each `terraform apply` call or if you run `terraform output`. 39 | 40 | For full connectivity options and detailed documentation, see [Connecting to Cloud SQL MySQL from External Applications](https://cloud.google.com/sql/docs/mysql/connect-external-app) and [Connecting to Cloud SQL PostgreSQL from External Applications](https://cloud.google.com/sql/docs/postgres/external-connection-methods). 41 | 42 | ## How do you configure High Availability? 43 | 44 | You can enable High Availability using the `enable_failover_replica` input variable. 45 | 46 | ### High Availability for MySQL 47 | 48 | The configuration is made up of a primary instance (master) in the primary zone (`master_zone` input variable) and a failover replica in the secondary zone (`failover_replica_zone` input variable). 49 | The failover replica is configured with the same database flags, users and passwords, authorized applications and networks, and databases as the primary instance. 50 | 51 | For full details about MySQL High Availability, see https://cloud.google.com/sql/docs/mysql/high-availability 52 | 53 | ### High Availability for PostgreSQL 54 | 55 | A Cloud SQL PostgreSQL instance configured for HA is also called a _regional instance_ and is located in a primary and secondary zone within the configured region. Within a regional instance, 56 | the configuration is made up of a primary instance (master) and a standby instance. You control the primary zone for the master instance 57 | with input variable `master_zone` and Google will automatically place the standby instance in another zone. 58 | 59 | For full details about PostgreSQL High Availability, see https://cloud.google.com/sql/docs/postgres/high-availability 60 | 61 | 62 | ## How do you secure the database? 63 | 64 | Cloud SQL customer data is encrypted when stored in database tables, temporary files, and backups. 65 | External connections can be encrypted by using SSL, or by using the Cloud SQL Proxy, which automatically encrypts traffic to and from the database. 66 | If you do not use the proxy, you can enforce SSL for external connections using the `require_ssl` input variable. 67 | 68 | For further information, see https://cloud.google.com/blog/products/gcp/best-practices-for-securing-your-google-cloud-databases and 69 | https://cloud.google.com/sql/faq#encryption 70 | 71 | ## How do you scale the database? 72 | 73 | * **Storage**: Cloud SQL manages storage for you, automatically growing cluster volume up to 10TB You can set the 74 | initial disk size using the `disk_size` input variable. 75 | * **Vertical scaling**: To scale vertically (i.e. bigger DB instances with more CPU and RAM), use the `machine_type` 76 | input variable. For a list of Cloud SQL Machine Types, see [Cloud SQL Pricing](https://cloud.google.com/sql/pricing#2nd-gen-pricing). 77 | * **Horizontal scaling**: To scale horizontally, you can add more replicas using the `num_read_replicas` and `read_replica_zones` input variables, 78 | and the module will automatically deploy the new instances, sync them to the master, and make them available as read 79 | replicas. 80 | -------------------------------------------------------------------------------- /modules/cloud-sql/outputs.tf: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # MASTER INSTANCE OUTPUTS 3 | # ------------------------------------------------------------------------------ 4 | 5 | output "master_instance_name" { 6 | description = "The name of the master database instance" 7 | value = google_sql_database_instance.master.name 8 | } 9 | 10 | output "master_public_ip_address" { 11 | description = "The public IPv4 address of the master instance." 12 | value = google_sql_database_instance.master.public_ip_address 13 | } 14 | 15 | output "master_private_ip_address" { 16 | description = "The public IPv4 address of the master instance." 17 | value = google_sql_database_instance.master.private_ip_address 18 | } 19 | 20 | output "master_ip_addresses" { 21 | description = "All IP addresses of the master instance JSON encoded, see https://www.terraform.io/docs/providers/google/r/sql_database_instance.html#ip_address-0-ip_address" 22 | value = jsonencode(google_sql_database_instance.master.ip_address) 23 | } 24 | 25 | output "master_instance" { 26 | description = "Self link to the master instance" 27 | value = google_sql_database_instance.master.self_link 28 | } 29 | 30 | output "master_proxy_connection" { 31 | description = "Master instance path for connecting with Cloud SQL Proxy. Read more at https://cloud.google.com/sql/docs/mysql/sql-proxy" 32 | value = "${var.project}:${var.region}:${google_sql_database_instance.master.name}" 33 | } 34 | 35 | # ------------------------------------------------------------------------------ 36 | # MASTER CERT OUTPUTS 37 | # ------------------------------------------------------------------------------ 38 | 39 | output "master_ca_cert" { 40 | description = "The CA Certificate used to connect to the master instance via SSL" 41 | value = google_sql_database_instance.master.server_ca_cert.0.cert 42 | } 43 | 44 | output "master_ca_cert_common_name" { 45 | description = "The CN valid for the master instance CA Cert" 46 | value = google_sql_database_instance.master.server_ca_cert.0.common_name 47 | } 48 | 49 | output "master_ca_cert_create_time" { 50 | description = "Creation time of the master instance CA Cert" 51 | value = google_sql_database_instance.master.server_ca_cert.0.create_time 52 | } 53 | 54 | output "master_ca_cert_expiration_time" { 55 | description = "Expiration time of the master instance CA Cert" 56 | value = google_sql_database_instance.master.server_ca_cert.0.expiration_time 57 | } 58 | 59 | output "master_ca_cert_sha1_fingerprint" { 60 | description = "SHA Fingerprint of the master instance CA Cert" 61 | value = google_sql_database_instance.master.server_ca_cert.0.sha1_fingerprint 62 | } 63 | 64 | # ------------------------------------------------------------------------------ 65 | # DATABASE OUTPUTS 66 | # ------------------------------------------------------------------------------ 67 | 68 | output "db" { 69 | description = "Self link to the default database" 70 | value = google_sql_database.default.self_link 71 | } 72 | 73 | output "db_name" { 74 | description = "Name of the default database" 75 | value = google_sql_database.default.name 76 | } 77 | 78 | # ------------------------------------------------------------------------------ 79 | # FAILOVER REPLICA OUTPUTS - ONLY APPLICABLE TO MYSQL 80 | # ------------------------------------------------------------------------------ 81 | 82 | output "failover_instance_name" { 83 | description = "The name of the failover database instance" 84 | value = join("", google_sql_database_instance.failover_replica.*.name) 85 | } 86 | 87 | output "failover_public_ip_address" { 88 | description = "The public IPv4 address of the failover instance." 89 | value = join("", google_sql_database_instance.failover_replica.*.public_ip_address) 90 | } 91 | 92 | output "failover_private_ip_address" { 93 | description = "The private IPv4 address of the failover instance." 94 | value = join("", google_sql_database_instance.failover_replica.*.private_ip_address) 95 | } 96 | 97 | output "failover_ip_addresses" { 98 | description = "All IP addresses of the failover instance JSON encoded, see https://www.terraform.io/docs/providers/google/r/sql_database_instance.html#ip_address-0-ip_address" 99 | value = jsonencode(google_sql_database_instance.failover_replica.*.ip_address) 100 | } 101 | 102 | output "failover_instance" { 103 | description = "Self link to the failover instance" 104 | value = join("", google_sql_database_instance.failover_replica.*.self_link) 105 | } 106 | 107 | output "failover_proxy_connection" { 108 | description = "Failover instance path for connecting with Cloud SQL Proxy. Read more at https://cloud.google.com/sql/docs/mysql/sql-proxy" 109 | value = local.failover_proxy_connection 110 | } 111 | 112 | # ------------------------------------------------------------------------------ 113 | # FAILOVER CERT OUTPUTS - ONLY APPLICABLE TO MYSQL 114 | # ------------------------------------------------------------------------------ 115 | 116 | output "failover_replica_ca_cert" { 117 | description = "The CA Certificate used to connect to the failover instance via SSL" 118 | value = local.failover_certificate 119 | } 120 | 121 | output "failover_replica_ca_cert_common_name" { 122 | description = "The CN valid for the failover instance CA Cert" 123 | value = local.failover_certificate_common_name 124 | } 125 | 126 | output "failover_replica_ca_cert_create_time" { 127 | description = "Creation time of the failover instance CA Cert" 128 | value = local.failover_certificate_create_time 129 | } 130 | 131 | output "failover_replica_ca_cert_expiration_time" { 132 | description = "Expiration time of the failover instance CA Cert" 133 | value = local.failover_certificate_expiration_time 134 | } 135 | 136 | output "failover_replica_ca_cert_sha1_fingerprint" { 137 | description = "SHA Fingerprint of the failover instance CA Cert" 138 | value = local.failover_certificate_sha1_fingerprint 139 | } 140 | 141 | # ------------------------------------------------------------------------------ 142 | # READ REPLICA OUTPUTS 143 | # ------------------------------------------------------------------------------ 144 | 145 | output "read_replica_instance_names" { 146 | description = "List of names for the read replica instances" 147 | value = google_sql_database_instance.read_replica.*.name 148 | } 149 | 150 | output "read_replica_ip_addresses" { 151 | description = "All IP addresses of the read replica instances JSON encoded, see https://www.terraform.io/docs/providers/google/r/sql_database_instance.html#ip_address-0-ip_address" 152 | value = jsonencode(google_sql_database_instance.read_replica.*.ip_address) 153 | } 154 | 155 | output "read_replica_public_ip_addresses" { 156 | description = "List of public IPv4 addresses of the read replica instances." 157 | value = google_sql_database_instance.read_replica.*.public_ip_address 158 | } 159 | 160 | output "read_replica_private_ip_addresses" { 161 | description = "List of private IPv4 addresses of the read replica instances." 162 | value = google_sql_database_instance.read_replica.*.private_ip_address 163 | } 164 | 165 | output "read_replica_instances" { 166 | description = "List of self links to the read replica instances" 167 | value = google_sql_database_instance.read_replica.*.self_link 168 | } 169 | 170 | output "read_replica_proxy_connections" { 171 | description = "List of read replica instance paths for connecting with Cloud SQL Proxy. Read more at https://cloud.google.com/sql/docs/mysql/sql-proxy" 172 | value = data.template_file.read_replica_proxy_connection.*.rendered 173 | } 174 | 175 | output "read_replica_server_ca_certs" { 176 | description = "JSON encoded list of CA Certificates used to connect to the read replica instances via SSL" 177 | value = jsonencode(google_sql_database_instance.read_replica.*.server_ca_cert) 178 | } 179 | 180 | # ------------------------------------------------------------------------------ 181 | # MISC OUTPUTS 182 | # ------------------------------------------------------------------------------ 183 | 184 | output "complete" { 185 | description = "Output signaling that all resources have been created" 186 | value = data.template_file.complete.rendered 187 | } 188 | 189 | output "service_account_email_address" { 190 | description = "Service account email address associated with the CloudSQL instance" 191 | value = google_sql_database_instance.master.service_account_email_address 192 | } 193 | -------------------------------------------------------------------------------- /modules/cloud-sql/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRED PARAMETERS 3 | # These variables are expected to be passed in by the operator 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "project" { 7 | description = "The project ID to host the database in." 8 | type = string 9 | } 10 | 11 | variable "region" { 12 | description = "The region to host the database in." 13 | type = string 14 | } 15 | 16 | variable "name" { 17 | description = "The name of the database instance. Note, after a name is used, it cannot be reused for up to one week. Use lowercase letters, numbers, and hyphens. Start with a letter." 18 | type = string 19 | } 20 | 21 | variable "engine" { 22 | description = "The engine version of the database, e.g. `MYSQL_5_6` or `MYSQL_5_7`. See https://cloud.google.com/sql/docs/features for supported versions." 23 | type = string 24 | } 25 | 26 | variable "machine_type" { 27 | description = "The machine type for the instances. See this page for supported tiers and pricing: https://cloud.google.com/sql/pricing" 28 | type = string 29 | } 30 | 31 | variable "db_name" { 32 | description = "Name of your database. Needs to follow MySQL identifier rules: https://dev.mysql.com/doc/refman/5.7/en/identifiers.html" 33 | type = string 34 | } 35 | 36 | variable "master_user_name" { 37 | description = "The username part for the default user credentials, i.e. 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. This should typically be set as the environment variable TF_VAR_master_user_name so you don't check it into source control." 38 | type = string 39 | } 40 | 41 | variable "master_user_password" { 42 | description = "The password part for the default user credentials, i.e. 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. This should typically be set as the environment variable TF_VAR_master_user_password so you don't check it into source control." 43 | type = string 44 | } 45 | 46 | # --------------------------------------------------------------------------------------------------------------------- 47 | # OPTIONAL PARAMETERS 48 | # Generally, these values won't need to be changed. 49 | # --------------------------------------------------------------------------------------------------------------------- 50 | 51 | variable "activation_policy" { 52 | description = "This specifies when the instance should be active. Can be either `ALWAYS`, `NEVER` or `ON_DEMAND`." 53 | type = string 54 | default = "ALWAYS" 55 | } 56 | 57 | variable "authorized_networks" { 58 | description = "A list of authorized CIDR-formatted IP address ranges that can connect to this DB. Only applies to public IP instances." 59 | type = list(map(string)) 60 | default = [] 61 | 62 | # Example: 63 | # 64 | # authorized_networks = [ 65 | # { 66 | # name = "all-inbound" # optional 67 | # value = "0.0.0.0/0" 68 | # } 69 | # ] 70 | } 71 | 72 | variable "backup_enabled" { 73 | description = "Set to false if you want to disable backup." 74 | type = bool 75 | default = true 76 | } 77 | 78 | variable "backup_start_time" { 79 | description = "HH:MM format (e.g. 04:00) time indicating when backup configuration starts. NOTE: Start time is randomly assigned if backup is enabled and 'backup_start_time' is not set" 80 | type = string 81 | default = "04:00" 82 | } 83 | 84 | variable "postgres_point_in_time_recovery_enabled" { 85 | description = "Will restart database if enabled after instance creation - only applicable to PostgreSQL" 86 | type = bool 87 | default = false 88 | } 89 | 90 | variable "mysql_binary_log_enabled" { 91 | description = "Set to false if you want to disable binary logs - only applicable to MySQL. Note, when using failover or read replicas, master and existing backups need to have binary_log_enabled=true set." 92 | type = bool 93 | default = true 94 | } 95 | 96 | variable "maintenance_window_day" { 97 | description = "Day of week (1-7), starting on Monday, on which system maintenance can occur. Performance may be degraded or there may even be a downtime during maintenance windows." 98 | type = number 99 | default = 7 100 | } 101 | 102 | variable "maintenance_window_hour" { 103 | description = "Hour of day (0-23) on which system maintenance can occur. Ignored if 'maintenance_window_day' not set. Performance may be degraded or there may even be a downtime during maintenance windows." 104 | type = number 105 | default = 7 106 | } 107 | 108 | variable "maintenance_track" { 109 | description = "Receive updates earlier (canary) or later (stable)." 110 | type = string 111 | default = "stable" 112 | } 113 | 114 | variable "db_charset" { 115 | description = "The charset for the default database." 116 | type = string 117 | default = null 118 | } 119 | 120 | variable "db_collation" { 121 | description = "The collation for the default database. Example for MySQL databases: 'utf8_general_ci'." 122 | type = string 123 | default = null 124 | } 125 | 126 | variable "database_flags" { 127 | description = "List of Cloud SQL flags that are applied to the database server" 128 | type = list(any) 129 | default = [] 130 | 131 | # Example: 132 | # 133 | # database_flags = [ 134 | # { 135 | # name = "auto_increment_increment" 136 | # value = "10" 137 | # }, 138 | # { 139 | # name = "auto_increment_offset" 140 | # value = "5" 141 | # }, 142 | #] 143 | } 144 | 145 | variable "disk_autoresize" { 146 | description = "Second Generation only. Configuration to increase storage size automatically." 147 | type = bool 148 | default = true 149 | } 150 | 151 | variable "disk_size" { 152 | description = "Second generation only. The size of data disk, in GB. Size of a running instance cannot be reduced but can be increased." 153 | type = number 154 | default = 10 155 | } 156 | 157 | variable "disk_type" { 158 | description = "The type of storage to use. Must be one of `PD_SSD` or `PD_HDD`." 159 | type = string 160 | default = "PD_SSD" 161 | } 162 | 163 | variable "master_zone" { 164 | description = "Preferred zone for the master instance (e.g. 'us-central1-a'). 'region'. If null, Google will auto-assign a zone." 165 | type = string 166 | default = null 167 | } 168 | 169 | variable "master_user_host" { 170 | description = "The host part for the default user, i.e. 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. Don't set this field for Postgres instances." 171 | type = string 172 | default = "%" 173 | } 174 | 175 | # In nearly all cases, databases should NOT be publicly accessible, however if you're migrating from a PAAS provider like Heroku to GCP, this needs to remain open to the internet. 176 | variable "enable_public_internet_access" { 177 | description = "WARNING: - In nearly all cases a database should NOT be publicly accessible. Only set this to true if you want the database open to the internet." 178 | type = bool 179 | default = false 180 | } 181 | 182 | variable "enable_failover_replica" { 183 | description = "Set to true to enable failover replica." 184 | type = bool 185 | default = false 186 | } 187 | 188 | variable "mysql_failover_replica_zone" { 189 | description = "The preferred zone for the failover instance (e.g. 'us-central1-b'). Must be different than 'master_zone'. Only applicable to MySQL, Postgres will determine this automatically." 190 | type = string 191 | default = null 192 | } 193 | 194 | variable "require_ssl" { 195 | description = "True if the instance should require SSL/TLS for users connecting over IP. Note: SSL/TLS is needed to provide security when you connect to Cloud SQL using IP addresses. If you are connecting to your instance only by using the Cloud SQL Proxy or the Java Socket Library, you do not need to configure your instance to use SSL/TLS." 196 | type = bool 197 | default = false 198 | } 199 | 200 | variable "private_network" { 201 | description = "The resource link for the VPC network from which the Cloud SQL instance is accessible for private IP." 202 | type = string 203 | default = null 204 | } 205 | 206 | variable "num_read_replicas" { 207 | description = "The number of read replicas to create. Cloud SQL will replicate all data from the master to these replicas, which you can use to horizontally scale read traffic." 208 | type = number 209 | default = 0 210 | } 211 | 212 | variable "read_replica_zones" { 213 | description = "A list of compute zones where read replicas should be created. List size should match 'num_read_replicas'" 214 | type = list(string) 215 | default = [] 216 | 217 | # Example: 218 | # default = ["us-central1-b", "us-central1-c"] 219 | } 220 | 221 | variable "custom_labels" { 222 | description = "A map of custom labels to apply to the instance. The key is the label name and the value is the label value." 223 | type = map(string) 224 | default = {} 225 | } 226 | 227 | # Resources are created sequentially. Therefore we increase the default timeouts considerably 228 | # to not have the operations time out. 229 | variable "resource_timeout" { 230 | description = "Timeout for creating, updating and deleting database instances. Valid units of time are s, m, h." 231 | type = string 232 | default = "60m" 233 | } 234 | 235 | # Whether or not to allow Terraform to destroy the instance. 236 | variable "deletion_protection" { 237 | description = "Whether or not to allow Terraform to destroy the instance. Unless this field is set to false in Terraform state, a terraform destroy or terraform apply command that deletes the instance will fail." 238 | type = bool 239 | default = "true" 240 | } 241 | 242 | # --------------------------------------------------------------------------------------------------------------------- 243 | # MODULE DEPENDENCIES 244 | # Workaround Terraform limitation where there is no module depends_on. 245 | # See https://github.com/hashicorp/terraform/issues/1178 for more details. 246 | # This can be used to make sure the module resources are created after other bootstrapping resources have been created. 247 | # For example: 248 | # dependencies = [google_service_networking_connection.private_vpc_connection.network] 249 | # --------------------------------------------------------------------------------------------------------------------- 250 | 251 | variable "dependencies" { 252 | description = "Create a dependency between the resources in this module to the interpolated values in this list (and thus the source resources). In other words, the resources in this module will now depend on the resources backing the values in this list such that those resources need to be created before the resources in this module, and the resources in this module need to be destroyed before the resources in the list." 253 | type = list(string) 254 | default = [] 255 | } 256 | -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # MASTER OUTPUTS 3 | # ------------------------------------------------------------------------------ 4 | 5 | output "master_instance_name" { 6 | description = "The name of the database instance" 7 | value = module.postgres.master_instance_name 8 | } 9 | 10 | output "master_ip_addresses" { 11 | description = "All IP addresses of the instance as list of maps, see https://www.terraform.io/docs/providers/google/r/sql_database_instance.html#ip_address-0-ip_address" 12 | value = module.postgres.master_ip_addresses 13 | } 14 | 15 | output "master_private_ip" { 16 | description = "The private IPv4 address of the master instance" 17 | value = module.postgres.master_private_ip_address 18 | } 19 | 20 | output "master_instance" { 21 | description = "Self link to the master instance" 22 | value = module.postgres.master_instance 23 | } 24 | 25 | output "master_proxy_connection" { 26 | description = "Instance path for connecting with Cloud SQL Proxy. Read more at https://cloud.google.com/sql/docs/mysql/sql-proxy" 27 | value = module.postgres.master_proxy_connection 28 | } 29 | 30 | # ------------------------------------------------------------------------------ 31 | # DB OUTPUTS 32 | # ------------------------------------------------------------------------------ 33 | 34 | output "db_name" { 35 | description = "Name of the default database" 36 | value = module.postgres.db_name 37 | } 38 | 39 | output "db" { 40 | description = "Self link to the default database" 41 | value = module.postgres.db 42 | } 43 | -------------------------------------------------------------------------------- /test/README.md: -------------------------------------------------------------------------------- 1 | # Tests 2 | 3 | This folder contains automated tests for this Module. All of the tests are written in [Go](https://golang.org/). 4 | Most of these are "integration tests" that deploy real infrastructure using Terraform and verify that infrastructure 5 | works as expected using a helper library called [Terratest](https://github.com/gruntwork-io/terratest). 6 | 7 | 8 | 9 | ## WARNING WARNING WARNING 10 | 11 | **Note #1**: Many of these tests create real resources in a GCP project and then try to clean those resources up at 12 | the end of a test run. That means these tests may cost you money to run! When adding tests, please be considerate of 13 | the resources you create and take extra care to clean everything up when you're done! 14 | 15 | **Note #2**: Never forcefully shut the tests down (e.g. by hitting `CTRL + C`) or the cleanup tasks won't run! 16 | 17 | **Note #3**: We set `-timeout 60m` on all tests not because they necessarily take that long, but because Go has a 18 | default test timeout of 10 minutes, after which it forcefully kills the tests with a `SIGQUIT`, preventing the cleanup 19 | tasks from running. Therefore, we set an overlying long timeout to make sure all tests have enough time to finish and 20 | clean up. 21 | 22 | 23 | 24 | ## Running the tests 25 | 26 | ### Prerequisites 27 | 28 | - Install the latest version of [Go](https://golang.org/). 29 | - Install [Terraform](https://www.terraform.io/downloads.html). 30 | - Configure your Google credentials using one of the [options supported by GCP](https://cloud.google.com/docs/authentication/getting-started). 31 | 32 | 33 | ### Run all the tests 34 | 35 | ```bash 36 | cd test 37 | go test -v -timeout 60m 38 | ``` 39 | 40 | 41 | ### Run a specific test 42 | 43 | To run a specific test called `TestFoo`: 44 | 45 | ```bash 46 | cd test 47 | go test -v -timeout 60m -run TestFoo 48 | ``` 49 | -------------------------------------------------------------------------------- /test/example_mysql_private_ip_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "fmt" 5 | "path/filepath" 6 | "strings" 7 | "testing" 8 | 9 | "github.com/gruntwork-io/terratest/modules/gcp" 10 | "github.com/gruntwork-io/terratest/modules/terraform" 11 | test_structure "github.com/gruntwork-io/terratest/modules/test-structure" 12 | "github.com/stretchr/testify/assert" 13 | ) 14 | 15 | const NAME_PREFIX_PRIVATE = "mysql-private" 16 | const EXAMPLE_NAME_PRIVATE = "mysql-private-ip" 17 | 18 | func TestMySqlPrivateIP(t *testing.T) { 19 | t.Parallel() 20 | 21 | //os.Setenv("SKIP_bootstrap", "true") 22 | //os.Setenv("SKIP_deploy", "true") 23 | //os.Setenv("SKIP_validate_outputs", "true") 24 | //os.Setenv("SKIP_teardown", "true") 25 | 26 | _examplesDir := test_structure.CopyTerraformFolderToTemp(t, "../", "examples") 27 | exampleDir := filepath.Join(_examplesDir, EXAMPLE_NAME_PRIVATE) 28 | 29 | test_structure.RunTestStage(t, "bootstrap", func() { 30 | projectId := gcp.GetGoogleProjectIDFromEnvVar(t) 31 | region := getRandomRegion(t, projectId) 32 | 33 | test_structure.SaveString(t, exampleDir, KEY_REGION, region) 34 | test_structure.SaveString(t, exampleDir, KEY_PROJECT, projectId) 35 | }) 36 | 37 | // At the end of the test, run `terraform destroy` to clean up any resources that were created 38 | defer test_structure.RunTestStage(t, "teardown", func() { 39 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 40 | terraform.Destroy(t, terraformOptions) 41 | }) 42 | 43 | test_structure.RunTestStage(t, "deploy", func() { 44 | region := test_structure.LoadString(t, exampleDir, KEY_REGION) 45 | projectId := test_structure.LoadString(t, exampleDir, KEY_PROJECT) 46 | terraformOptions := createTerratestOptionsForCloudSql(projectId, region, exampleDir, NAME_PREFIX_PRIVATE) 47 | test_structure.SaveTerraformOptions(t, exampleDir, terraformOptions) 48 | 49 | terraform.InitAndApply(t, terraformOptions) 50 | }) 51 | 52 | test_structure.RunTestStage(t, "validate_outputs", func() { 53 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 54 | 55 | region := test_structure.LoadString(t, exampleDir, KEY_REGION) 56 | projectId := test_structure.LoadString(t, exampleDir, KEY_PROJECT) 57 | 58 | instanceNameFromOutput := terraform.Output(t, terraformOptions, OUTPUT_MASTER_INSTANCE_NAME) 59 | ipAddressesFromOutput := terraform.Output(t, terraformOptions, OUTPUT_MASTER_IP_ADDRESSES) 60 | privateIPFromOutput := terraform.Output(t, terraformOptions, OUTPUT_MASTER_PRIVATE_IP) 61 | 62 | assert.Contains(t, ipAddressesFromOutput, "PRIVATE", "IP Addresses output has to contain 'PRIVATE'") 63 | assert.Contains(t, ipAddressesFromOutput, privateIPFromOutput, "IP Addresses output has to contain 'private_ip' from output") 64 | 65 | dbNameFromOutput := terraform.Output(t, terraformOptions, OUTPUT_DB_NAME) 66 | proxyConnectionFromOutput := terraform.Output(t, terraformOptions, OUTPUT_MASTER_PROXY_CONNECTION) 67 | 68 | expectedDBConn := fmt.Sprintf("%s:%s:%s", projectId, region, instanceNameFromOutput) 69 | 70 | assert.True(t, strings.HasPrefix(instanceNameFromOutput, NAME_PREFIX_PRIVATE)) 71 | assert.Equal(t, DB_NAME, dbNameFromOutput) 72 | assert.Equal(t, expectedDBConn, proxyConnectionFromOutput) 73 | }) 74 | } 75 | -------------------------------------------------------------------------------- /test/example_mysql_public_ip_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "crypto/tls" 5 | "crypto/x509" 6 | "database/sql" 7 | "fmt" 8 | "path/filepath" 9 | "strings" 10 | "testing" 11 | "time" 12 | 13 | mydialer "github.com/GoogleCloudPlatform/cloudsql-proxy/proxy/dialers/mysql" 14 | "github.com/go-sql-driver/mysql" 15 | "github.com/gruntwork-io/terratest/modules/gcp" 16 | "github.com/gruntwork-io/terratest/modules/logger" 17 | "github.com/gruntwork-io/terratest/modules/terraform" 18 | test_structure "github.com/gruntwork-io/terratest/modules/test-structure" 19 | "github.com/stretchr/testify/assert" 20 | "github.com/stretchr/testify/require" 21 | ) 22 | 23 | const NAME_PREFIX_PUBLIC = "mysql-public" 24 | const EXAMPLE_NAME_PUBLIC = "mysql-public-ip" 25 | const EXAMPLE_NAME_CERT = "client-certificate" 26 | 27 | func TestMySqlPublicIP(t *testing.T) { 28 | t.Parallel() 29 | 30 | //os.Setenv("SKIP_bootstrap", "true") 31 | //os.Setenv("SKIP_deploy", "true") 32 | //os.Setenv("SKIP_validate_outputs", "true") 33 | //os.Setenv("SKIP_sql_tests", "true") 34 | //os.Setenv("SKIP_proxy_tests", "true") 35 | //os.Setenv("SKIP_deploy_cert", "true") 36 | //os.Setenv("SKIP_redeploy", "true") 37 | //os.Setenv("SKIP_ssl_sql_tests", "true") 38 | //os.Setenv("SKIP_teardown_cert", "true") 39 | //os.Setenv("SKIP_teardown", "true") 40 | 41 | _examplesDir := test_structure.CopyTerraformFolderToTemp(t, "../", "examples") 42 | exampleDir := filepath.Join(_examplesDir, EXAMPLE_NAME_PUBLIC) 43 | certExampleDir := filepath.Join(_examplesDir, EXAMPLE_NAME_CERT) 44 | 45 | // BOOTSTRAP VARIABLES FOR THE TESTS 46 | test_structure.RunTestStage(t, "bootstrap", func() { 47 | projectId := gcp.GetGoogleProjectIDFromEnvVar(t) 48 | region := getRandomRegion(t, projectId) 49 | 50 | test_structure.SaveString(t, exampleDir, KEY_REGION, region) 51 | test_structure.SaveString(t, exampleDir, KEY_PROJECT, projectId) 52 | }) 53 | 54 | // AT THE END OF THE TESTS, RUN `terraform destroy` 55 | // TO CLEAN UP ANY RESOURCES THAT WERE CREATED 56 | defer test_structure.RunTestStage(t, "teardown", func() { 57 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 58 | terraform.Destroy(t, terraformOptions) 59 | }) 60 | 61 | defer test_structure.RunTestStage(t, "teardown_cert", func() { 62 | terraformOptions := test_structure.LoadTerraformOptions(t, certExampleDir) 63 | terraform.Destroy(t, terraformOptions) 64 | }) 65 | 66 | test_structure.RunTestStage(t, "deploy", func() { 67 | region := test_structure.LoadString(t, exampleDir, KEY_REGION) 68 | projectId := test_structure.LoadString(t, exampleDir, KEY_PROJECT) 69 | terraformOptions := createTerratestOptionsForCloudSql(projectId, region, exampleDir, NAME_PREFIX_PUBLIC) 70 | test_structure.SaveTerraformOptions(t, exampleDir, terraformOptions) 71 | 72 | terraform.InitAndApply(t, terraformOptions) 73 | }) 74 | 75 | // VALIDATE MODULE OUTPUTS 76 | test_structure.RunTestStage(t, "validate_outputs", func() { 77 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 78 | 79 | region := test_structure.LoadString(t, exampleDir, KEY_REGION) 80 | projectId := test_structure.LoadString(t, exampleDir, KEY_PROJECT) 81 | 82 | instanceNameFromOutput := terraform.Output(t, terraformOptions, OUTPUT_MASTER_INSTANCE_NAME) 83 | dbNameFromOutput := terraform.Output(t, terraformOptions, OUTPUT_DB_NAME) 84 | proxyConnectionFromOutput := terraform.Output(t, terraformOptions, OUTPUT_MASTER_PROXY_CONNECTION) 85 | 86 | expectedDBConn := fmt.Sprintf("%s:%s:%s", projectId, region, instanceNameFromOutput) 87 | 88 | assert.True(t, strings.HasPrefix(instanceNameFromOutput, NAME_PREFIX_PUBLIC)) 89 | assert.Equal(t, DB_NAME, dbNameFromOutput) 90 | assert.Equal(t, expectedDBConn, proxyConnectionFromOutput) 91 | }) 92 | 93 | // TEST REGULAR SQL CLIENT 94 | test_structure.RunTestStage(t, "sql_tests", func() { 95 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 96 | 97 | publicIp := terraform.Output(t, terraformOptions, OUTPUT_MASTER_PUBLIC_IP) 98 | 99 | connectionString := fmt.Sprintf("%s:%s@tcp(%s:3306)/%s", DB_USER, DB_PASS, publicIp, DB_NAME) 100 | 101 | // Does not actually open up the connection - just returns a DB ref 102 | logger.Logf(t, "Connecting to: %s", publicIp) 103 | db, err := sql.Open("mysql", 104 | connectionString) 105 | require.NoError(t, err, "Failed to open DB connection") 106 | 107 | // Make sure we clean up properly 108 | defer db.Close() 109 | 110 | // Run ping to actually test the connection 111 | logger.Log(t, "Ping the DB") 112 | if err = db.Ping(); err != nil { 113 | t.Fatalf("Failed to ping DB: %v", err) 114 | } 115 | 116 | // Create table if not exists 117 | logger.Logf(t, "Create table: %s", MYSQL_CREATE_TEST_TABLE_WITH_AUTO_INCREMENT_STATEMENT) 118 | if _, err = db.Exec(MYSQL_CREATE_TEST_TABLE_WITH_AUTO_INCREMENT_STATEMENT); err != nil { 119 | t.Fatalf("Failed to create table: %v", err) 120 | } 121 | 122 | // Clean up 123 | logger.Logf(t, "Empty table: %s", SQL_EMPTY_TEST_TABLE_STATEMENT) 124 | if _, err = db.Exec(SQL_EMPTY_TEST_TABLE_STATEMENT); err != nil { 125 | t.Fatalf("Failed to clean up table: %v", err) 126 | } 127 | 128 | // Insert data to check that our auto-increment flags worked 129 | logger.Logf(t, "Insert data: %s", MYSQL_INSERT_TEST_ROW) 130 | stmt, err := db.Prepare(MYSQL_INSERT_TEST_ROW) 131 | require.NoError(t, err, "Failed to prepare statement") 132 | 133 | // Execute the statement 134 | res, err := stmt.Exec("Grunt") 135 | require.NoError(t, err, "Failed to execute statement") 136 | 137 | // Get the last insert id 138 | lastId, err := res.LastInsertId() 139 | require.NoError(t, err, "Failed to get last insert id") 140 | 141 | // Since we set the auto increment to 5, modulus should always be 0 142 | assert.Equal(t, int64(0), int64(lastId%5)) 143 | }) 144 | 145 | // TEST CLOUD SQL PROXY 146 | test_structure.RunTestStage(t, "proxy_tests", func() { 147 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 148 | 149 | proxyConn := terraform.Output(t, terraformOptions, OUTPUT_MASTER_PROXY_CONNECTION) 150 | 151 | logger.Logf(t, "Connecting to: %s via Cloud SQL Proxy", proxyConn) 152 | 153 | // Use the Cloud SQL Proxy for queries 154 | // See https://cloud.google.com/sql/docs/mysql/sql-proxy 155 | cfg := mydialer.Cfg(proxyConn, DB_USER, DB_PASS) 156 | cfg.DBName = DB_NAME 157 | cfg.ParseTime = true 158 | 159 | const timeout = 10 * time.Second 160 | cfg.Timeout = timeout 161 | cfg.ReadTimeout = timeout 162 | cfg.WriteTimeout = timeout 163 | 164 | // Dial in. This one actually pings the database already 165 | db, err := mydialer.DialCfg(cfg) 166 | require.NoError(t, err, "Failed to open Proxy DB connection") 167 | 168 | // Make sure we clean up properly 169 | defer db.Close() 170 | 171 | // Run ping to actually test the connection 172 | logger.Log(t, "Ping the DB") 173 | if err = db.Ping(); err != nil { 174 | t.Fatalf("Failed to ping DB via Proxy: %v", err) 175 | } 176 | 177 | // Insert data to check that our auto-increment flags worked 178 | logger.Logf(t, "Insert data: %s", MYSQL_INSERT_TEST_ROW) 179 | stmt, err := db.Prepare(MYSQL_INSERT_TEST_ROW) 180 | require.NoError(t, err, "Failed to prepare proxy statement") 181 | 182 | // Execute the statement 183 | res, err := stmt.Exec("Grunt2") 184 | require.NoError(t, err, "Failed to execute proxy statement") 185 | 186 | // Get the last insert id 187 | lastId, err := res.LastInsertId() 188 | require.NoError(t, err, "Failed to get last proxy insert id") 189 | 190 | // Since we set the auto increment to 5, modulus should always be 0 191 | assert.Equal(t, int64(0), int64(lastId%5)) 192 | }) 193 | 194 | // CREATE CLIENT CERT 195 | test_structure.RunTestStage(t, "deploy_cert", func() { 196 | region := test_structure.LoadString(t, exampleDir, KEY_REGION) 197 | projectId := test_structure.LoadString(t, exampleDir, KEY_PROJECT) 198 | 199 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 200 | instanceNameFromOutput := terraform.Output(t, terraformOptions, OUTPUT_MASTER_INSTANCE_NAME) 201 | commonName := fmt.Sprintf("%s-client", instanceNameFromOutput) 202 | 203 | terraformOptionsForCert := createTerratestOptionsForClientCert(projectId, region, certExampleDir, commonName, instanceNameFromOutput) 204 | test_structure.SaveTerraformOptions(t, certExampleDir, terraformOptionsForCert) 205 | 206 | terraform.InitAndApply(t, terraformOptionsForCert) 207 | }) 208 | 209 | // REDEPLOY WITH FORCED SSL SETTINGS 210 | test_structure.RunTestStage(t, "redeploy", func() { 211 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 212 | 213 | // Force secure connections 214 | terraformOptions.Vars["require_ssl"] = true 215 | terraform.InitAndApply(t, terraformOptions) 216 | }) 217 | 218 | // RUN TESTS WITH SECURED CONNECTION 219 | test_structure.RunTestStage(t, "ssl_sql_tests", func() { 220 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 221 | terraformOptionsForCert := test_structure.LoadTerraformOptions(t, certExampleDir) 222 | 223 | //******************************************************** 224 | // First test that we're not allowed to connect over insecure connection 225 | //******************************************************** 226 | 227 | publicIp := terraform.Output(t, terraformOptions, OUTPUT_MASTER_PUBLIC_IP) 228 | 229 | connectionString := fmt.Sprintf("%s:%s@tcp(%s:3306)/%s", DB_USER, DB_PASS, publicIp, DB_NAME) 230 | 231 | // Does not actually open up the connection - just returns a DB ref 232 | logger.Logf(t, "Connecting to: %s", publicIp) 233 | db, err := sql.Open("mysql", 234 | connectionString) 235 | require.NoError(t, err, "Failed to open DB connection") 236 | 237 | // Make sure we clean up properly 238 | defer db.Close() 239 | 240 | // Run ping to actually test the connection 241 | logger.Log(t, "Ping the DB with forced SSL") 242 | if err = db.Ping(); err != nil { 243 | logger.Logf(t, "Not allowed to ping %s as expected.", publicIp) 244 | } else { 245 | t.Fatalf("Ping %v succeeded against the odds.", publicIp) 246 | } 247 | 248 | //******************************************************** 249 | // Test connection over secure connection 250 | //******************************************************** 251 | 252 | // Prepare certificates 253 | rootCertPool := x509.NewCertPool() 254 | serverCertB := []byte(terraform.Output(t, terraformOptions, OUTPUT_MASTER_CA_CERT)) 255 | clientCertB := []byte(terraform.Output(t, terraformOptionsForCert, OUTPUT_CLIENT_CA_CERT)) 256 | clientPKB := []byte(terraform.Output(t, terraformOptionsForCert, OUTPUT_CLIENT_PRIVATE_KEY)) 257 | 258 | if ok := rootCertPool.AppendCertsFromPEM(serverCertB); !ok { 259 | t.Fatal("Failed to append PEM.") 260 | } 261 | 262 | clientCert := make([]tls.Certificate, 0, 1) 263 | certs, err := tls.X509KeyPair(clientCertB, clientPKB) 264 | require.NoError(t, err, "Failed to create key pair") 265 | 266 | clientCert = append(clientCert, certs) 267 | 268 | // Register MySQL certificate config 269 | // To avoid certificate validation errors complaining about 270 | // missing IP SANs, we set 'InsecureSkipVerify: true' 271 | mysql.RegisterTLSConfig("custom", &tls.Config{ 272 | RootCAs: rootCertPool, 273 | Certificates: clientCert, 274 | InsecureSkipVerify: true, 275 | }) 276 | 277 | // Prepare the secure connection string and ping the DB 278 | sslConnectionString := fmt.Sprintf("%s:%s@tcp(%s:3306)/%s?tls=custom", DB_USER, DB_PASS, publicIp, DB_NAME) 279 | db, err = sql.Open("mysql", sslConnectionString) 280 | 281 | // Run ping to actually test the connection with the SSL config 282 | logger.Log(t, "Ping the DB with forced SSL") 283 | if err = db.Ping(); err != nil { 284 | t.Fatalf("Failed to ping DB with forced SSL: %v", err) 285 | } 286 | }) 287 | } 288 | -------------------------------------------------------------------------------- /test/example_mysql_replicas_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "database/sql" 5 | "fmt" 6 | "path/filepath" 7 | "strings" 8 | "testing" 9 | 10 | "github.com/gruntwork-io/terratest/modules/gcp" 11 | "github.com/gruntwork-io/terratest/modules/logger" 12 | "github.com/gruntwork-io/terratest/modules/terraform" 13 | test_structure "github.com/gruntwork-io/terratest/modules/test-structure" 14 | "github.com/stretchr/testify/assert" 15 | "github.com/stretchr/testify/require" 16 | ) 17 | 18 | const NAME_PREFIX_REPLICAS = "mysql-replicas" 19 | const EXAMPLE_NAME_REPLICAS = "mysql-replicas" 20 | 21 | func TestMySqlReplicas(t *testing.T) { 22 | t.Parallel() 23 | 24 | //os.Setenv("SKIP_bootstrap", "true") 25 | //os.Setenv("SKIP_deploy", "true") 26 | //os.Setenv("SKIP_validate_outputs", "true") 27 | //os.Setenv("SKIP_sql_tests", "true") 28 | //os.Setenv("SKIP_read_replica_tests", "true") 29 | //os.Setenv("SKIP_teardown", "true") 30 | 31 | _examplesDir := test_structure.CopyTerraformFolderToTemp(t, "../", "examples") 32 | exampleDir := filepath.Join(_examplesDir, EXAMPLE_NAME_REPLICAS) 33 | 34 | // BOOTSTRAP VARIABLES FOR THE TESTS 35 | test_structure.RunTestStage(t, "bootstrap", func() { 36 | projectId := gcp.GetGoogleProjectIDFromEnvVar(t) 37 | region := getRandomRegion(t, projectId) 38 | 39 | masterZone, failoverReplicaZone := getTwoDistinctRandomZonesForRegion(t, projectId, region) 40 | readReplicaZone := gcp.GetRandomZoneForRegion(t, projectId, region) 41 | 42 | test_structure.SaveString(t, exampleDir, KEY_REGION, region) 43 | test_structure.SaveString(t, exampleDir, KEY_MASTER_ZONE, masterZone) 44 | test_structure.SaveString(t, exampleDir, KEY_FAILOVER_REPLICA_ZONE, failoverReplicaZone) 45 | test_structure.SaveString(t, exampleDir, KEY_READ_REPLICA_ZONE, readReplicaZone) 46 | test_structure.SaveString(t, exampleDir, KEY_PROJECT, projectId) 47 | }) 48 | 49 | // AT THE END OF THE TESTS, RUN `terraform destroy` 50 | // TO CLEAN UP ANY RESOURCES THAT WERE CREATED 51 | defer test_structure.RunTestStage(t, "teardown", func() { 52 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 53 | terraform.Destroy(t, terraformOptions) 54 | }) 55 | 56 | test_structure.RunTestStage(t, "deploy", func() { 57 | region := test_structure.LoadString(t, exampleDir, KEY_REGION) 58 | projectId := test_structure.LoadString(t, exampleDir, KEY_PROJECT) 59 | masterZone := test_structure.LoadString(t, exampleDir, KEY_MASTER_ZONE) 60 | failoverReplicaZone := test_structure.LoadString(t, exampleDir, KEY_FAILOVER_REPLICA_ZONE) 61 | readReplicaZone := test_structure.LoadString(t, exampleDir, KEY_READ_REPLICA_ZONE) 62 | terraformOptions := createTerratestOptionsForCloudSqlReplicas(projectId, region, exampleDir, NAME_PREFIX_REPLICAS, masterZone, failoverReplicaZone, 1, readReplicaZone) 63 | test_structure.SaveTerraformOptions(t, exampleDir, terraformOptions) 64 | 65 | terraform.InitAndApply(t, terraformOptions) 66 | }) 67 | 68 | // VALIDATE MODULE OUTPUTS 69 | test_structure.RunTestStage(t, "validate_outputs", func() { 70 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 71 | 72 | region := test_structure.LoadString(t, exampleDir, KEY_REGION) 73 | projectId := test_structure.LoadString(t, exampleDir, KEY_PROJECT) 74 | 75 | instanceNameFromOutput := terraform.Output(t, terraformOptions, OUTPUT_MASTER_INSTANCE_NAME) 76 | dbNameFromOutput := terraform.Output(t, terraformOptions, OUTPUT_DB_NAME) 77 | proxyConnectionFromOutput := terraform.Output(t, terraformOptions, OUTPUT_MASTER_PROXY_CONNECTION) 78 | 79 | expectedDBConn := fmt.Sprintf("%s:%s:%s", projectId, region, instanceNameFromOutput) 80 | 81 | assert.True(t, strings.HasPrefix(instanceNameFromOutput, NAME_PREFIX_REPLICAS)) 82 | assert.Equal(t, DB_NAME, dbNameFromOutput) 83 | assert.Equal(t, expectedDBConn, proxyConnectionFromOutput) 84 | 85 | // Failover replica outputs 86 | failoverInstanceNameFromOutput := terraform.Output(t, terraformOptions, OUTPUT_FAILOVER_INSTANCE_NAME) 87 | failoverProxyConnectionFromOutput := terraform.Output(t, terraformOptions, OUTPUT_FAILOVER_PROXY_CONNECTION) 88 | 89 | expectedFailoverDBConn := fmt.Sprintf("%s:%s:%s", projectId, region, failoverInstanceNameFromOutput) 90 | 91 | assert.True(t, strings.HasPrefix(failoverInstanceNameFromOutput, NAME_PREFIX_REPLICAS)) 92 | assert.Equal(t, expectedFailoverDBConn, failoverProxyConnectionFromOutput) 93 | 94 | // Read replica outputs 95 | readReplicaInstanceNameFromOutputList := terraform.OutputList(t, terraformOptions, OUTPUT_READ_REPLICA_INSTANCE_NAMES) 96 | readReplicaProxyConnectionFromOutputList := terraform.OutputList(t, terraformOptions, OUTPUT_READ_REPLICA_PROXY_CONNECTIONS) 97 | 98 | readReplicaInstanceNameFromOutput := readReplicaInstanceNameFromOutputList[0] 99 | readReplicaProxyConnectionFromOutput := readReplicaProxyConnectionFromOutputList[0] 100 | 101 | expectedReadReplicaDBConn := fmt.Sprintf("%s:%s:%s", projectId, region, readReplicaInstanceNameFromOutput) 102 | 103 | assert.True(t, strings.HasPrefix(readReplicaInstanceNameFromOutput, NAME_PREFIX_REPLICAS)) 104 | assert.Equal(t, expectedReadReplicaDBConn, readReplicaProxyConnectionFromOutput) 105 | }) 106 | 107 | // TEST REGULAR SQL CLIENT 108 | test_structure.RunTestStage(t, "sql_tests", func() { 109 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 110 | 111 | publicIp := terraform.Output(t, terraformOptions, OUTPUT_MASTER_PUBLIC_IP) 112 | 113 | connectionString := fmt.Sprintf("%s:%s@tcp(%s:3306)/%s", DB_USER, DB_PASS, publicIp, DB_NAME) 114 | 115 | // Does not actually open up the connection - just returns a DB ref 116 | logger.Logf(t, "Connecting to: %s", publicIp) 117 | db, err := sql.Open("mysql", connectionString) 118 | require.NoError(t, err, "Failed to open DB connection") 119 | 120 | // Make sure we clean up properly 121 | defer db.Close() 122 | 123 | // Run ping to actually test the connection 124 | logger.Log(t, "Ping the DB") 125 | if err = db.Ping(); err != nil { 126 | t.Fatalf("Failed to ping DB: %v", err) 127 | } 128 | 129 | // Create table if not exists 130 | logger.Logf(t, "Create table: %s", MYSQL_CREATE_TEST_TABLE_WITH_AUTO_INCREMENT_STATEMENT) 131 | if _, err = db.Exec(MYSQL_CREATE_TEST_TABLE_WITH_AUTO_INCREMENT_STATEMENT); err != nil { 132 | t.Fatalf("Failed to create table: %v", err) 133 | } 134 | 135 | // Clean up 136 | logger.Logf(t, "Empty table: %s", SQL_EMPTY_TEST_TABLE_STATEMENT) 137 | if _, err = db.Exec(SQL_EMPTY_TEST_TABLE_STATEMENT); err != nil { 138 | t.Fatalf("Failed to clean up table: %v", err) 139 | } 140 | 141 | // Insert data to check that our auto-increment flags worked 142 | logger.Logf(t, "Insert data: %s", MYSQL_INSERT_TEST_ROW) 143 | stmt, err := db.Prepare(MYSQL_INSERT_TEST_ROW) 144 | require.NoError(t, err, "Failed to prepare statement") 145 | 146 | // Execute the statement 147 | res, err := stmt.Exec("Grunt") 148 | require.NoError(t, err, "Failed to execute statement") 149 | 150 | // Get the last insert id 151 | lastId, err := res.LastInsertId() 152 | require.NoError(t, err, "Failed to get last insert id") 153 | 154 | // Since we set the auto increment to 7, modulus should always be 0 155 | assert.Equal(t, int64(0), int64(lastId%7)) 156 | }) 157 | 158 | // TEST READ REPLICA WITH REGULAR SQL CLIENT 159 | test_structure.RunTestStage(t, "read_replica_tests", func() { 160 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 161 | 162 | readReplicaPublicIpList := terraform.OutputList(t, terraformOptions, OUTPUT_READ_REPLICA_PUBLIC_IPS) 163 | readReplicaPublicIp := readReplicaPublicIpList[0] 164 | 165 | connectionString := fmt.Sprintf("%s:%s@tcp(%s:3306)/%s", DB_USER, DB_PASS, readReplicaPublicIp, DB_NAME) 166 | 167 | // Does not actually open up the connection - just returns a DB ref 168 | logger.Logf(t, "Connecting to read replica: %s", readReplicaPublicIp) 169 | db, err := sql.Open("mysql", connectionString) 170 | require.NoError(t, err, "Failed to open DB connection to read replica") 171 | 172 | // Make sure we clean up properly 173 | defer db.Close() 174 | 175 | // Run ping to actually test the connection 176 | logger.Log(t, "Ping the read replica DB") 177 | if err = db.Ping(); err != nil { 178 | t.Fatalf("Failed to ping read replica DB: %v", err) 179 | } 180 | 181 | // Try to insert data to verify we cannot write 182 | logger.Logf(t, "Insert data: %s", MYSQL_INSERT_TEST_ROW) 183 | stmt, err := db.Prepare(MYSQL_INSERT_TEST_ROW) 184 | require.NoError(t, err, "Failed to prepare insert readonly statement") 185 | 186 | // Execute the statement 187 | _, err = stmt.Exec("ReadOnlyGrunt") 188 | // This time we actually expect an error: 189 | // 'The MySQL server is running with the --read-only option so it cannot execute this statement' 190 | require.Error(t, err, "Should not be able to write to read replica") 191 | logger.Logf(t, "Failed to insert data to read replica as expected: %v", err) 192 | 193 | // Prepare statement for reading data 194 | stmtOut, err := db.Prepare(SQL_QUERY_ROW_COUNT) 195 | require.NoError(t, err, "Failed to prepare readonly count statement") 196 | 197 | // Query data, results don't matter... 198 | logger.Logf(t, "Query r/o data: %s", SQL_QUERY_ROW_COUNT) 199 | 200 | var numResults int 201 | 202 | err = stmtOut.QueryRow().Scan(&numResults) 203 | require.NoError(t, err, "Failed to execute query statement on read replica") 204 | 205 | logger.Logf(t, "Number of rows... just for fun: %v", numResults) 206 | 207 | }) 208 | } 209 | -------------------------------------------------------------------------------- /test/example_postgres_private_ip_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "fmt" 5 | "path/filepath" 6 | "strings" 7 | "testing" 8 | 9 | "github.com/gruntwork-io/terratest/modules/gcp" 10 | "github.com/gruntwork-io/terratest/modules/terraform" 11 | test_structure "github.com/gruntwork-io/terratest/modules/test-structure" 12 | "github.com/stretchr/testify/assert" 13 | ) 14 | 15 | const NAME_PREFIX_POSTGRES_PRIVATE = "postgres-private" 16 | const EXAMPLE_NAME_POSTGRES_PRIVATE = "postgres-private-ip" 17 | 18 | func TestPostgresPrivateIP(t *testing.T) { 19 | t.Parallel() 20 | 21 | //os.Setenv("SKIP_bootstrap", "true") 22 | //os.Setenv("SKIP_deploy", "true") 23 | //os.Setenv("SKIP_validate_outputs", "true") 24 | //os.Setenv("SKIP_teardown", "true") 25 | 26 | _examplesDir := test_structure.CopyTerraformFolderToTemp(t, "../", "examples") 27 | exampleDir := filepath.Join(_examplesDir, EXAMPLE_NAME_POSTGRES_PRIVATE) 28 | 29 | test_structure.RunTestStage(t, "bootstrap", func() { 30 | projectId := gcp.GetGoogleProjectIDFromEnvVar(t) 31 | region := getRandomRegion(t, projectId) 32 | 33 | test_structure.SaveString(t, exampleDir, KEY_REGION, region) 34 | test_structure.SaveString(t, exampleDir, KEY_PROJECT, projectId) 35 | }) 36 | 37 | // At the end of the test, run `terraform destroy` to clean up any resources that were created 38 | defer test_structure.RunTestStage(t, "teardown", func() { 39 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 40 | terraform.Destroy(t, terraformOptions) 41 | }) 42 | 43 | test_structure.RunTestStage(t, "deploy", func() { 44 | region := test_structure.LoadString(t, exampleDir, KEY_REGION) 45 | projectId := test_structure.LoadString(t, exampleDir, KEY_PROJECT) 46 | terraformOptions := createTerratestOptionsForCloudSql(projectId, region, exampleDir, NAME_PREFIX_POSTGRES_PRIVATE) 47 | test_structure.SaveTerraformOptions(t, exampleDir, terraformOptions) 48 | 49 | terraform.InitAndApply(t, terraformOptions) 50 | }) 51 | 52 | test_structure.RunTestStage(t, "validate_outputs", func() { 53 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 54 | 55 | region := test_structure.LoadString(t, exampleDir, KEY_REGION) 56 | projectId := test_structure.LoadString(t, exampleDir, KEY_PROJECT) 57 | 58 | instanceNameFromOutput := terraform.Output(t, terraformOptions, OUTPUT_MASTER_INSTANCE_NAME) 59 | ipAddressesFromOutput := terraform.Output(t, terraformOptions, OUTPUT_MASTER_IP_ADDRESSES) 60 | privateIPFromOutput := terraform.Output(t, terraformOptions, OUTPUT_MASTER_PRIVATE_IP) 61 | 62 | assert.Contains(t, ipAddressesFromOutput, "PRIVATE", "IP Addresses output has to contain 'PRIVATE'") 63 | assert.Contains(t, ipAddressesFromOutput, privateIPFromOutput, "IP Addresses output has to contain 'private_ip' from output") 64 | 65 | dbNameFromOutput := terraform.Output(t, terraformOptions, OUTPUT_DB_NAME) 66 | proxyConnectionFromOutput := terraform.Output(t, terraformOptions, OUTPUT_MASTER_PROXY_CONNECTION) 67 | 68 | expectedDBConn := fmt.Sprintf("%s:%s:%s", projectId, region, instanceNameFromOutput) 69 | 70 | assert.True(t, strings.HasPrefix(instanceNameFromOutput, NAME_PREFIX_POSTGRES_PRIVATE)) 71 | assert.Equal(t, DB_NAME, dbNameFromOutput) 72 | assert.Equal(t, expectedDBConn, proxyConnectionFromOutput) 73 | }) 74 | } 75 | -------------------------------------------------------------------------------- /test/example_postgres_public_ip_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "database/sql" 5 | "fmt" 6 | "os" 7 | "path/filepath" 8 | "strings" 9 | "testing" 10 | 11 | _ "github.com/GoogleCloudPlatform/cloudsql-proxy/proxy/dialers/postgres" 12 | "github.com/gruntwork-io/terratest/modules/gcp" 13 | "github.com/gruntwork-io/terratest/modules/logger" 14 | "github.com/gruntwork-io/terratest/modules/terraform" 15 | test_structure "github.com/gruntwork-io/terratest/modules/test-structure" 16 | _ "github.com/lib/pq" 17 | "github.com/stretchr/testify/assert" 18 | "github.com/stretchr/testify/require" 19 | ) 20 | 21 | const NAME_PREFIX_POSTGRES_PUBLIC = "postgres-public" 22 | const EXAMPLE_NAME_POSTGRES_PUBLIC = "postgres-public-ip" 23 | 24 | func TestPostgresPublicIP(t *testing.T) { 25 | t.Parallel() 26 | 27 | //os.Setenv("SKIP_bootstrap", "true") 28 | //os.Setenv("SKIP_deploy", "true") 29 | //os.Setenv("SKIP_validate_outputs", "true") 30 | //os.Setenv("SKIP_sql_tests", "true") 31 | //os.Setenv("SKIP_proxy_tests", "true") 32 | //os.Setenv("SKIP_deploy_cert", "true") 33 | //os.Setenv("SKIP_redeploy", "true") 34 | //os.Setenv("SKIP_ssl_sql_tests", "true") 35 | //os.Setenv("SKIP_teardown_cert", "true") 36 | //os.Setenv("SKIP_teardown", "true") 37 | 38 | _examplesDir := test_structure.CopyTerraformFolderToTemp(t, "../", "examples") 39 | exampleDir := filepath.Join(_examplesDir, EXAMPLE_NAME_POSTGRES_PUBLIC) 40 | certExampleDir := filepath.Join(_examplesDir, EXAMPLE_NAME_CERT) 41 | 42 | // BOOTSTRAP VARIABLES FOR THE TESTS 43 | test_structure.RunTestStage(t, "bootstrap", func() { 44 | projectId := gcp.GetGoogleProjectIDFromEnvVar(t) 45 | region := getRandomRegion(t, projectId) 46 | 47 | test_structure.SaveString(t, exampleDir, KEY_REGION, region) 48 | test_structure.SaveString(t, exampleDir, KEY_PROJECT, projectId) 49 | }) 50 | 51 | // AT THE END OF THE TESTS, RUN `terraform destroy` 52 | // TO CLEAN UP ANY RESOURCES THAT WERE CREATED 53 | defer test_structure.RunTestStage(t, "teardown", func() { 54 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 55 | terraform.Destroy(t, terraformOptions) 56 | }) 57 | 58 | defer test_structure.RunTestStage(t, "teardown_cert", func() { 59 | terraformOptions := test_structure.LoadTerraformOptions(t, certExampleDir) 60 | terraform.Destroy(t, terraformOptions) 61 | }) 62 | 63 | test_structure.RunTestStage(t, "deploy", func() { 64 | region := test_structure.LoadString(t, exampleDir, KEY_REGION) 65 | projectId := test_structure.LoadString(t, exampleDir, KEY_PROJECT) 66 | terraformOptions := createTerratestOptionsForCloudSql(projectId, region, exampleDir, NAME_PREFIX_POSTGRES_PUBLIC) 67 | test_structure.SaveTerraformOptions(t, exampleDir, terraformOptions) 68 | 69 | terraform.InitAndApply(t, terraformOptions) 70 | }) 71 | 72 | // VALIDATE MODULE OUTPUTS 73 | test_structure.RunTestStage(t, "validate_outputs", func() { 74 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 75 | 76 | region := test_structure.LoadString(t, exampleDir, KEY_REGION) 77 | projectId := test_structure.LoadString(t, exampleDir, KEY_PROJECT) 78 | 79 | instanceNameFromOutput := terraform.Output(t, terraformOptions, OUTPUT_MASTER_INSTANCE_NAME) 80 | dbNameFromOutput := terraform.Output(t, terraformOptions, OUTPUT_DB_NAME) 81 | proxyConnectionFromOutput := terraform.Output(t, terraformOptions, OUTPUT_MASTER_PROXY_CONNECTION) 82 | 83 | expectedDBConn := fmt.Sprintf("%s:%s:%s", projectId, region, instanceNameFromOutput) 84 | 85 | assert.True(t, strings.HasPrefix(instanceNameFromOutput, NAME_PREFIX_POSTGRES_PUBLIC)) 86 | assert.Equal(t, DB_NAME, dbNameFromOutput) 87 | assert.Equal(t, expectedDBConn, proxyConnectionFromOutput) 88 | }) 89 | 90 | // TEST REGULAR SQL CLIENT 91 | test_structure.RunTestStage(t, "sql_tests", func() { 92 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 93 | 94 | publicIp := terraform.Output(t, terraformOptions, OUTPUT_MASTER_PUBLIC_IP) 95 | 96 | connectionString := fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=disable", DB_USER, DB_PASS, publicIp, DB_NAME) 97 | 98 | // Does not actually open up the connection - just returns a DB ref 99 | logger.Logf(t, "Connecting to: %s", publicIp) 100 | db, err := sql.Open("postgres", connectionString) 101 | require.NoError(t, err, "Failed to open DB connection") 102 | 103 | // Make sure we clean up properly 104 | defer db.Close() 105 | 106 | // Run ping to actually test the connection 107 | logger.Log(t, "Ping the DB") 108 | if err = db.Ping(); err != nil { 109 | t.Fatalf("Failed to ping DB: %v", err) 110 | } 111 | 112 | // Create table if not exists 113 | logger.Logf(t, "Create table: %s", POSTGRES_CREATE_TEST_TABLE_WITH_SERIAL) 114 | if _, err = db.Exec(POSTGRES_CREATE_TEST_TABLE_WITH_SERIAL); err != nil { 115 | t.Fatalf("Failed to create table: %v", err) 116 | } 117 | 118 | // Clean up 119 | logger.Logf(t, "Empty table: %s", SQL_EMPTY_TEST_TABLE_STATEMENT) 120 | if _, err = db.Exec(SQL_EMPTY_TEST_TABLE_STATEMENT); err != nil { 121 | t.Fatalf("Failed to clean up table: %v", err) 122 | } 123 | 124 | logger.Logf(t, "Insert data: %s", POSTGRES_INSERT_TEST_ROW) 125 | var testid int 126 | err = db.QueryRow(POSTGRES_INSERT_TEST_ROW).Scan(&testid) 127 | require.NoError(t, err, "Failed to insert data") 128 | 129 | assert.True(t, testid > 0, "Data was inserted") 130 | }) 131 | 132 | // TEST CLOUD SQL PROXY 133 | test_structure.RunTestStage(t, "proxy_tests", func() { 134 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 135 | 136 | proxyConn := terraform.Output(t, terraformOptions, OUTPUT_MASTER_PROXY_CONNECTION) 137 | 138 | logger.Logf(t, "Connecting to: %s via Cloud SQL Proxy", proxyConn) 139 | 140 | // Use the Cloud SQL Proxy for queries 141 | // See https://cloud.google.com/sql/docs/postgres/sql-proxy 142 | 143 | // Note that sslmode=disable is required it does not mean that the connection 144 | // is unencrypted. All connections via the proxy are completely encrypted. 145 | datasourceName := fmt.Sprintf("host=%s user=%s dbname=%s password=%s sslmode=disable", proxyConn, DB_USER, DB_NAME, DB_PASS) 146 | db, err := sql.Open("cloudsqlpostgres", datasourceName) 147 | 148 | require.NoError(t, err, "Failed to open Proxy DB connection") 149 | 150 | // Make sure we clean up properly 151 | defer db.Close() 152 | 153 | // Run ping to actually test the connection 154 | logger.Log(t, "Ping the DB via Proxy") 155 | if err = db.Ping(); err != nil { 156 | t.Fatalf("Failed to ping DB via Proxy: %v", err) 157 | } 158 | 159 | logger.Logf(t, "Insert data via Proxy: %s", POSTGRES_INSERT_TEST_ROW) 160 | var testid int 161 | err = db.QueryRow(POSTGRES_INSERT_TEST_ROW).Scan(&testid) 162 | require.NoError(t, err, "Failed to insert data via Proxy") 163 | 164 | assert.True(t, testid > 0, "Assert data was inserted") 165 | }) 166 | 167 | // CREATE CLIENT CERT 168 | test_structure.RunTestStage(t, "deploy_cert", func() { 169 | region := test_structure.LoadString(t, exampleDir, KEY_REGION) 170 | projectId := test_structure.LoadString(t, exampleDir, KEY_PROJECT) 171 | 172 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 173 | instanceNameFromOutput := terraform.Output(t, terraformOptions, OUTPUT_MASTER_INSTANCE_NAME) 174 | commonName := fmt.Sprintf("%s-client", instanceNameFromOutput) 175 | 176 | terraformOptionsForCert := createTerratestOptionsForClientCert(projectId, region, certExampleDir, commonName, instanceNameFromOutput) 177 | test_structure.SaveTerraformOptions(t, certExampleDir, terraformOptionsForCert) 178 | 179 | terraform.InitAndApply(t, terraformOptionsForCert) 180 | }) 181 | 182 | // REDEPLOY WITH FORCED SSL SETTINGS 183 | test_structure.RunTestStage(t, "redeploy", func() { 184 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 185 | 186 | // Force secure connections 187 | terraformOptions.Vars["require_ssl"] = true 188 | terraform.InitAndApply(t, terraformOptions) 189 | }) 190 | 191 | // RUN TESTS WITH SECURED CONNECTION 192 | test_structure.RunTestStage(t, "ssl_sql_tests", func() { 193 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 194 | terraformOptionsForCert := test_structure.LoadTerraformOptions(t, certExampleDir) 195 | 196 | //******************************************************** 197 | // First test that we're not allowed to connect over insecure connection 198 | //******************************************************** 199 | 200 | publicIp := terraform.Output(t, terraformOptions, OUTPUT_MASTER_PUBLIC_IP) 201 | 202 | connectionString := fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=disable", DB_USER, DB_PASS, publicIp, DB_NAME) 203 | 204 | // Does not actually open up the connection - just returns a DB ref 205 | logger.Logf(t, "Connecting to: %s", publicIp) 206 | db, err := sql.Open("postgres", 207 | connectionString) 208 | require.NoError(t, err, "Failed to open DB connection") 209 | 210 | // Make sure we clean up properly 211 | defer db.Close() 212 | 213 | // Run ping to actually test the connection 214 | logger.Log(t, "Ping the DB with forced SSL") 215 | if err = db.Ping(); err != nil { 216 | logger.Logf(t, "Not allowed to ping %s as expected.", publicIp) 217 | } else { 218 | t.Fatalf("Ping %v succeeded against the odds.", publicIp) 219 | } 220 | 221 | //******************************************************** 222 | // Test connection over secure connection 223 | //******************************************************** 224 | 225 | // Prepare certificates 226 | serverCertB := []byte(terraform.Output(t, terraformOptions, OUTPUT_MASTER_CA_CERT)) 227 | clientCertB := []byte(terraform.Output(t, terraformOptionsForCert, OUTPUT_CLIENT_CA_CERT)) 228 | clientPKB := []byte(terraform.Output(t, terraformOptionsForCert, OUTPUT_CLIENT_PRIVATE_KEY)) 229 | 230 | serverCertFile := createTempFile(t, serverCertB) 231 | defer os.Remove(serverCertFile.Name()) 232 | 233 | clientCertFile := createTempFile(t, clientCertB) 234 | defer os.Remove(clientCertFile.Name()) 235 | 236 | clientPKFile := createTempFile(t, clientPKB) 237 | defer os.Remove(clientPKFile.Name()) 238 | 239 | // Prepare the secure connection string and ping the DB 240 | sslConnectionString := fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=require&sslrootcert=%s&sslcert=%s&sslkey=%s", DB_USER, DB_PASS, publicIp, DB_NAME, serverCertFile.Name(), clientCertFile.Name(), clientPKFile.Name()) 241 | 242 | db, err = sql.Open("postgres", sslConnectionString) 243 | 244 | // Run ping to actually test the connection with the SSL config 245 | logger.Log(t, "Ping the DB with forced SSL") 246 | if err = db.Ping(); err != nil { 247 | t.Fatalf("Failed to ping DB with forced SSL: %v", err) 248 | } 249 | 250 | // Drop the test table if it exists 251 | logger.Logf(t, "Drop table: %s", POSTGRES_DROP_TEST_TABLE) 252 | if _, err = db.Exec(POSTGRES_DROP_TEST_TABLE); err != nil { 253 | t.Fatalf("Failed to drop table: %v", err) 254 | } 255 | }) 256 | } 257 | -------------------------------------------------------------------------------- /test/example_postgres_replicas_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "database/sql" 5 | "fmt" 6 | "path/filepath" 7 | "strings" 8 | "testing" 9 | 10 | "github.com/gruntwork-io/terratest/modules/gcp" 11 | "github.com/gruntwork-io/terratest/modules/logger" 12 | "github.com/gruntwork-io/terratest/modules/terraform" 13 | test_structure "github.com/gruntwork-io/terratest/modules/test-structure" 14 | _ "github.com/lib/pq" 15 | "github.com/stretchr/testify/assert" 16 | "github.com/stretchr/testify/require" 17 | ) 18 | 19 | const NAME_PREFIX_POSTGRES_REPLICAS = "postgres-replicas" 20 | const EXAMPLE_NAME_POSTGRES_REPLICAS = "postgres-replicas" 21 | 22 | func TestPostgresReplicas(t *testing.T) { 23 | t.Parallel() 24 | 25 | //os.Setenv("SKIP_bootstrap", "true") 26 | //os.Setenv("SKIP_deploy", "true") 27 | //os.Setenv("SKIP_validate_outputs", "true") 28 | //os.Setenv("SKIP_sql_tests", "true") 29 | //os.Setenv("SKIP_read_replica_tests", "true") 30 | //os.Setenv("SKIP_teardown", "true") 31 | 32 | _examplesDir := test_structure.CopyTerraformFolderToTemp(t, "../", "examples") 33 | exampleDir := filepath.Join(_examplesDir, EXAMPLE_NAME_POSTGRES_REPLICAS) 34 | 35 | // BOOTSTRAP VARIABLES FOR THE TESTS 36 | test_structure.RunTestStage(t, "bootstrap", func() { 37 | projectId := gcp.GetGoogleProjectIDFromEnvVar(t) 38 | region := getRandomRegion(t, projectId) 39 | 40 | masterZone, readReplicaZone := getTwoDistinctRandomZonesForRegion(t, projectId, region) 41 | 42 | test_structure.SaveString(t, exampleDir, KEY_REGION, region) 43 | test_structure.SaveString(t, exampleDir, KEY_MASTER_ZONE, masterZone) 44 | test_structure.SaveString(t, exampleDir, KEY_READ_REPLICA_ZONE, readReplicaZone) 45 | test_structure.SaveString(t, exampleDir, KEY_PROJECT, projectId) 46 | }) 47 | 48 | // AT THE END OF THE TESTS, RUN `terraform destroy` 49 | // TO CLEAN UP ANY RESOURCES THAT WERE CREATED 50 | defer test_structure.RunTestStage(t, "teardown", func() { 51 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 52 | terraform.Destroy(t, terraformOptions) 53 | }) 54 | 55 | // AT THE END OF THE TESTS, CLEAN UP ANY POSTGRES OBJECTS THAT WERE CREATED 56 | defer test_structure.RunTestStage(t, "cleanup_postgres_objects", func() { 57 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 58 | 59 | publicIp := terraform.Output(t, terraformOptions, OUTPUT_MASTER_PUBLIC_IP) 60 | 61 | connectionString := fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=disable", DB_USER, DB_PASS, publicIp, DB_NAME) 62 | 63 | // Does not actually open up the connection - just returns a DB ref 64 | logger.Logf(t, "Connecting to: %s", publicIp) 65 | db, err := sql.Open("postgres", connectionString) 66 | require.NoError(t, err, "Failed to open DB connection") 67 | 68 | // Make sure we clean up properly 69 | defer db.Close() 70 | 71 | // Drop table if it exists 72 | logger.Logf(t, "Drop table: %s", POSTGRES_DROP_TEST_TABLE) 73 | if _, err = db.Exec(POSTGRES_DROP_TEST_TABLE); err != nil { 74 | t.Fatalf("Failed to drop table: %v", err) 75 | } 76 | }) 77 | 78 | test_structure.RunTestStage(t, "deploy", func() { 79 | region := test_structure.LoadString(t, exampleDir, KEY_REGION) 80 | projectId := test_structure.LoadString(t, exampleDir, KEY_PROJECT) 81 | masterZone := test_structure.LoadString(t, exampleDir, KEY_MASTER_ZONE) 82 | readReplicaZone := test_structure.LoadString(t, exampleDir, KEY_READ_REPLICA_ZONE) 83 | terraformOptions := createTerratestOptionsForCloudSqlReplicas(projectId, region, exampleDir, NAME_PREFIX_POSTGRES_REPLICAS, masterZone, "", 1, readReplicaZone) 84 | test_structure.SaveTerraformOptions(t, exampleDir, terraformOptions) 85 | 86 | terraform.InitAndApply(t, terraformOptions) 87 | }) 88 | 89 | // VALIDATE MODULE OUTPUTS 90 | test_structure.RunTestStage(t, "validate_outputs", func() { 91 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 92 | 93 | region := test_structure.LoadString(t, exampleDir, KEY_REGION) 94 | projectId := test_structure.LoadString(t, exampleDir, KEY_PROJECT) 95 | 96 | instanceNameFromOutput := terraform.Output(t, terraformOptions, OUTPUT_MASTER_INSTANCE_NAME) 97 | dbNameFromOutput := terraform.Output(t, terraformOptions, OUTPUT_DB_NAME) 98 | proxyConnectionFromOutput := terraform.Output(t, terraformOptions, OUTPUT_MASTER_PROXY_CONNECTION) 99 | 100 | expectedDBConn := fmt.Sprintf("%s:%s:%s", projectId, region, instanceNameFromOutput) 101 | 102 | assert.True(t, strings.HasPrefix(instanceNameFromOutput, NAME_PREFIX_POSTGRES_REPLICAS)) 103 | assert.Equal(t, DB_NAME, dbNameFromOutput) 104 | assert.Equal(t, expectedDBConn, proxyConnectionFromOutput) 105 | 106 | // Read replica outputs 107 | readReplicaInstanceNameFromOutputList := terraform.OutputList(t, terraformOptions, OUTPUT_READ_REPLICA_INSTANCE_NAMES) 108 | readReplicaProxyConnectionFromOutputList := terraform.OutputList(t, terraformOptions, OUTPUT_READ_REPLICA_PROXY_CONNECTIONS) 109 | 110 | readReplicaInstanceNameFromOutput := readReplicaInstanceNameFromOutputList[0] 111 | readReplicaProxyConnectionFromOutput := readReplicaProxyConnectionFromOutputList[0] 112 | 113 | expectedReadReplicaDBConn := fmt.Sprintf("%s:%s:%s", projectId, region, readReplicaInstanceNameFromOutput) 114 | 115 | assert.True(t, strings.HasPrefix(readReplicaInstanceNameFromOutput, NAME_PREFIX_POSTGRES_REPLICAS)) 116 | assert.Equal(t, expectedReadReplicaDBConn, readReplicaProxyConnectionFromOutput) 117 | }) 118 | 119 | // TEST REGULAR SQL CLIENT 120 | test_structure.RunTestStage(t, "sql_tests", func() { 121 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 122 | 123 | publicIp := terraform.Output(t, terraformOptions, OUTPUT_MASTER_PUBLIC_IP) 124 | 125 | connectionString := fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=disable", DB_USER, DB_PASS, publicIp, DB_NAME) 126 | 127 | // Does not actually open up the connection - just returns a DB ref 128 | logger.Logf(t, "Connecting to: %s", publicIp) 129 | db, err := sql.Open("postgres", connectionString) 130 | require.NoError(t, err, "Failed to open DB connection") 131 | 132 | // Make sure we clean up properly 133 | defer db.Close() 134 | 135 | // Run ping to actually test the connection 136 | logger.Log(t, "Ping the DB") 137 | if err = db.Ping(); err != nil { 138 | t.Fatalf("Failed to ping DB: %v", err) 139 | } 140 | 141 | // Create table if not exists 142 | logger.Logf(t, "Create table: %s", POSTGRES_CREATE_TEST_TABLE_WITH_SERIAL) 143 | if _, err = db.Exec(POSTGRES_CREATE_TEST_TABLE_WITH_SERIAL); err != nil { 144 | t.Fatalf("Failed to create table: %v", err) 145 | } 146 | 147 | // Clean up 148 | logger.Logf(t, "Empty table: %s", SQL_EMPTY_TEST_TABLE_STATEMENT) 149 | if _, err = db.Exec(SQL_EMPTY_TEST_TABLE_STATEMENT); err != nil { 150 | t.Fatalf("Failed to clean up table: %v", err) 151 | } 152 | 153 | logger.Logf(t, "Insert data: %s", POSTGRES_INSERT_TEST_ROW) 154 | var testid int 155 | err = db.QueryRow(POSTGRES_INSERT_TEST_ROW).Scan(&testid) 156 | require.NoError(t, err, "Failed to insert data") 157 | 158 | assert.True(t, testid > 0, "Data was inserted") 159 | }) 160 | 161 | // TEST READ REPLICA WITH REGULAR SQL CLIENT 162 | test_structure.RunTestStage(t, "read_replica_tests", func() { 163 | terraformOptions := test_structure.LoadTerraformOptions(t, exampleDir) 164 | 165 | readReplicaPublicIpList := terraform.OutputList(t, terraformOptions, OUTPUT_READ_REPLICA_PUBLIC_IPS) 166 | readReplicaPublicIp := readReplicaPublicIpList[0] 167 | 168 | connectionString := fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=disable", DB_USER, DB_PASS, readReplicaPublicIp, DB_NAME) 169 | 170 | // Does not actually open up the connection - just returns a DB ref 171 | logger.Logf(t, "Connecting to: %s", readReplicaPublicIp) 172 | db, err := sql.Open("postgres", connectionString) 173 | require.NoError(t, err, "Failed to open DB connection") 174 | 175 | // Make sure we clean up properly 176 | defer db.Close() 177 | 178 | // Run ping to actually test the connection 179 | logger.Log(t, "Ping the DB") 180 | if err = db.Ping(); err != nil { 181 | t.Fatalf("Failed to ping DB: %v", err) 182 | } 183 | 184 | // Try to insert data to verify we cannot write 185 | logger.Logf(t, "Insert data: %s", POSTGRES_INSERT_TEST_ROW) 186 | var testid int 187 | err = db.QueryRow(POSTGRES_INSERT_TEST_ROW).Scan(&testid) 188 | 189 | // This time we actually expect an error: 190 | // 'cannot execute INSERT in a read-only transaction' 191 | require.Error(t, err, "Should not be able to write to read replica") 192 | logger.Logf(t, "Failed to insert data to read replica as expected: %v", err) 193 | 194 | // Query data, results don't matter... 195 | logger.Logf(t, "Query r/o data: %s", SQL_QUERY_ROW_COUNT) 196 | rows, err := db.Query(SQL_QUERY_ROW_COUNT) 197 | require.NoError(t, err, "Failed to execute query statement on read replica") 198 | 199 | assert.True(t, rows.Next(), "We have a result") 200 | }) 201 | } 202 | -------------------------------------------------------------------------------- /test/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/gruntwork-io/terraform-google-sql/test 2 | 3 | go 1.14 4 | 5 | require ( 6 | github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20200504171905-7e668d9ad0ba 7 | github.com/go-sql-driver/mysql v1.5.0 8 | github.com/gruntwork-io/terratest v0.37.5 9 | github.com/lib/pq v1.5.1 10 | github.com/stretchr/testify v1.5.1 11 | ) 12 | -------------------------------------------------------------------------------- /test/test_util.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "io/ioutil" 5 | "os" 6 | "testing" 7 | 8 | "github.com/gruntwork-io/terratest/modules/gcp" 9 | "github.com/gruntwork-io/terratest/modules/terraform" 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | const DB_NAME = "testdb" 14 | const DB_USER = "testuser" 15 | const DB_PASS = "testpassword" 16 | 17 | const KEY_REGION = "region" 18 | const KEY_PROJECT = "project" 19 | const KEY_MASTER_ZONE = "masterZone" 20 | const KEY_FAILOVER_REPLICA_ZONE = "failoverReplicaZone" 21 | const KEY_READ_REPLICA_ZONE = "readReplicaZone" 22 | 23 | const OUTPUT_MASTER_IP_ADDRESSES = "master_ip_addresses" 24 | const OUTPUT_MASTER_INSTANCE_NAME = "master_instance_name" 25 | const OUTPUT_FAILOVER_INSTANCE_NAME = "failover_instance_name" 26 | const OUTPUT_MASTER_PROXY_CONNECTION = "master_proxy_connection" 27 | const OUTPUT_FAILOVER_PROXY_CONNECTION = "failover_proxy_connection" 28 | const OUTPUT_READ_REPLICA_PROXY_CONNECTIONS = "read_replica_proxy_connections" 29 | const OUTPUT_READ_REPLICA_INSTANCE_NAMES = "read_replica_instance_names" 30 | const OUTPUT_READ_REPLICA_PUBLIC_IPS = "read_replica_public_ips" 31 | const OUTPUT_MASTER_PUBLIC_IP = "master_public_ip" 32 | const OUTPUT_MASTER_PRIVATE_IP = "master_private_ip" 33 | const OUTPUT_MASTER_CA_CERT = "master_ca_cert" 34 | const OUTPUT_CLIENT_CA_CERT = "client_ca_cert" 35 | const OUTPUT_CLIENT_PRIVATE_KEY = "client_private_key" 36 | 37 | const OUTPUT_DB_NAME = "db_name" 38 | 39 | const MYSQL_CREATE_TEST_TABLE_WITH_AUTO_INCREMENT_STATEMENT = "CREATE TABLE IF NOT EXISTS test (id int NOT NULL AUTO_INCREMENT, name varchar(10) NOT NULL, PRIMARY KEY (ID))" 40 | const MYSQL_INSERT_TEST_ROW = "INSERT INTO test(name) VALUES(?)" 41 | 42 | const SQL_EMPTY_TEST_TABLE_STATEMENT = "DELETE FROM test" 43 | const SQL_QUERY_ROW_COUNT = "SELECT count(*) FROM test" 44 | 45 | const POSTGRES_CREATE_TEST_TABLE_WITH_SERIAL = "CREATE TABLE IF NOT EXISTS test (id SERIAL, name varchar(10) NOT NULL, PRIMARY KEY (ID))" 46 | const POSTGRES_INSERT_TEST_ROW = "INSERT INTO test(name) VALUES('Grunty') RETURNING id" 47 | const POSTGRES_DROP_TEST_TABLE = "DROP TABLE IF EXISTS test" 48 | 49 | func getRandomRegion(t *testing.T, projectID string) string { 50 | approvedRegions := []string{"europe-north1", "europe-west1", "europe-west2", "europe-west3", "us-central1", "us-east1", "us-west1"} 51 | //approvedRegions := []string{"europe-north1"} 52 | return gcp.GetRandomRegion(t, projectID, approvedRegions, []string{}) 53 | } 54 | 55 | func getTwoDistinctRandomZonesForRegion(t *testing.T, projectID string, region string) (string, string) { 56 | firstZone := gcp.GetRandomZoneForRegion(t, projectID, region) 57 | secondZone := gcp.GetRandomZoneForRegion(t, projectID, region) 58 | for { 59 | if firstZone != secondZone { 60 | break 61 | } 62 | secondZone = gcp.GetRandomZoneForRegion(t, projectID, region) 63 | } 64 | 65 | return firstZone, secondZone 66 | } 67 | 68 | func createTerratestOptionsForCloudSql(projectId string, region string, exampleDir string, namePrefix string) *terraform.Options { 69 | terratestOptions := &terraform.Options{ 70 | // The path to where your Terraform code is located 71 | TerraformDir: exampleDir, 72 | Vars: map[string]interface{}{ 73 | "region": region, 74 | "project": projectId, 75 | "name_prefix": namePrefix, 76 | "db_name": DB_NAME, 77 | "master_user_name": DB_USER, 78 | "master_user_password": DB_PASS, 79 | }, 80 | } 81 | 82 | return terratestOptions 83 | } 84 | 85 | func createTerratestOptionsForCloudSqlReplicas(projectId string, region string, exampleDir string, namePrefix string, masterZone string, failoverReplicaZone string, numReadReplicas int, readReplicaZone string) *terraform.Options { 86 | terratestOptions := &terraform.Options{ 87 | // The path to where your Terraform code is located 88 | TerraformDir: exampleDir, 89 | Vars: map[string]interface{}{ 90 | "region": region, 91 | "master_zone": masterZone, 92 | "num_read_replicas": numReadReplicas, 93 | "read_replica_zones": []string{readReplicaZone}, 94 | "failover_replica_zone": failoverReplicaZone, 95 | "project": projectId, 96 | "name_prefix": namePrefix, 97 | "db_name": DB_NAME, 98 | "master_user_name": DB_USER, 99 | "master_user_password": DB_PASS, 100 | }, 101 | } 102 | 103 | return terratestOptions 104 | } 105 | 106 | func createTerratestOptionsForClientCert(projectId string, region string, exampleDir string, commonName string, instanceName string) *terraform.Options { 107 | 108 | terratestOptions := &terraform.Options{ 109 | // The path to where your Terraform code is located 110 | TerraformDir: exampleDir, 111 | Vars: map[string]interface{}{ 112 | "region": region, 113 | "project": projectId, 114 | "common_name": commonName, 115 | "database_instance_name": instanceName, 116 | }, 117 | } 118 | 119 | return terratestOptions 120 | } 121 | 122 | func createTempFile(t *testing.T, content []byte) *os.File { 123 | tmpFile, err := ioutil.TempFile(os.TempDir(), "temp-") 124 | require.NoError(t, err, "Failed to create temp file") 125 | _, err = tmpFile.Write(content) 126 | require.NoError(t, err, "Failed to write to temp file") 127 | err = tmpFile.Close() 128 | require.NoError(t, err, "Failed to close temp file") 129 | return tmpFile 130 | } 131 | -------------------------------------------------------------------------------- /test/validation/validate_all_modules_and_examples_test.go: -------------------------------------------------------------------------------- 1 | package testvalidate 2 | 3 | import ( 4 | "os" 5 | "path/filepath" 6 | "testing" 7 | 8 | test_structure "github.com/gruntwork-io/terratest/modules/test-structure" 9 | 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | // TestValidateAllTerraformModulesAndExamples recursively finds all modules and examples (by default) subdirectories in 14 | // the repo and runs Terraform InitAndValidate on them to flush out missing variables, typos, unused vars, etc 15 | func TestValidateAllTerraformModulesAndExamples(t *testing.T) { 16 | t.Parallel() 17 | 18 | cwd, err := os.Getwd() 19 | require.NoError(t, err) 20 | 21 | opts, optsErr := test_structure.NewValidationOptions(filepath.Join(cwd, "../.."), []string{}, []string{}) 22 | require.NoError(t, optsErr) 23 | 24 | test_structure.ValidateAllTerraformModules(t, opts) 25 | } 26 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRED PARAMETERS 3 | # These variables are expected to be passed in by the operator 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "project" { 7 | description = "The project ID to host the database in." 8 | } 9 | 10 | variable "region" { 11 | description = "The region to host the database in." 12 | } 13 | 14 | # Note, after a name db instance is used, it cannot be reused for up to one week. 15 | variable "name_prefix" { 16 | description = "The name prefix for the database instance. Will be appended with a random string. Use lowercase letters, numbers, and hyphens. Start with a letter." 17 | } 18 | 19 | variable "master_user_name" { 20 | description = "The username part for the default user credentials, i.e. 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. This should typically be set as the environment variable TF_VAR_master_user_name so you don't check it into source control." 21 | } 22 | 23 | variable "master_user_password" { 24 | description = "The password part for the default user credentials, i.e. 'master_user_name'@'master_user_host' IDENTIFIED BY 'master_user_password'. This should typically be set as the environment variable TF_VAR_master_user_password so you don't check it into source control." 25 | } 26 | 27 | # --------------------------------------------------------------------------------------------------------------------- 28 | # OPTIONAL PARAMETERS 29 | # Generally, these values won't need to be changed. 30 | # --------------------------------------------------------------------------------------------------------------------- 31 | variable "postgres_version" { 32 | description = "The engine version of the database, e.g. `POSTGRES_9_6`. See https://cloud.google.com/sql/docs/db-versions for supported versions." 33 | default = "POSTGRES_9_6" 34 | } 35 | 36 | variable "machine_type" { 37 | description = "The machine type to use, see https://cloud.google.com/sql/pricing for more details" 38 | default = "db-f1-micro" 39 | } 40 | 41 | variable "db_name" { 42 | description = "Name for the db" 43 | default = "default" 44 | } 45 | 46 | variable "name_override" { 47 | description = "You may optionally override the name_prefix + random string by specifying an override" 48 | default = "" 49 | } 50 | --------------------------------------------------------------------------------