├── test ├── charts │ └── minimal-pod │ │ ├── values.yaml │ │ ├── Chart.yaml │ │ ├── .helmignore │ │ └── templates │ │ ├── pod.yaml │ │ └── _helpers.tpl ├── go.mod ├── validation │ └── validate_all_modules_and_examples_test.go ├── test_helpers.go ├── terratest_options.go ├── README.md ├── gke_cluster_test.go ├── gke_basic_helm_test.go └── go.sum ├── CODEOWNERS ├── NOTICE ├── .pre-commit-config.yaml ├── modules ├── gke-service-account │ ├── outputs.tf │ ├── README.md │ ├── variables.tf │ └── main.tf └── gke-cluster │ ├── outputs.tf │ ├── main.tf │ ├── README.md │ └── variables.tf ├── examples ├── gke-private-cluster │ ├── example-app │ │ └── nginx.yml │ ├── outputs.tf │ ├── README.md │ ├── variables.tf │ └── main.tf ├── gke-public-cluster │ ├── outputs.tf │ ├── README.md │ ├── variables.tf │ └── main.tf └── gke-basic-helm │ └── README.md ├── .gitignore ├── outputs.tf ├── .github ├── ISSUE_TEMPLATE │ ├── feature_request.md │ └── bug_report.md └── pull_request_template.md ├── .circleci └── config.yml ├── CONTRIBUTING.md ├── variables.tf ├── GRUNTWORK_PHILOSOPHY.md ├── README.md ├── LICENSE └── main.tf /test/charts/minimal-pod/values.yaml: -------------------------------------------------------------------------------- 1 | image: "" 2 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @marinalimeira @robmorgan @ina-stoyanova @gruntwork-io/maintenance-tier-3-orion 2 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | terraform-google-gke 2 | Copyright 2019 Gruntwork, Inc. 3 | 4 | This product includes software developed at Gruntwork (https://www.gruntwork.io/). 5 | -------------------------------------------------------------------------------- /test/charts/minimal-pod/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: "1.0" 3 | description: A Helm chart for Kubernetes 4 | name: minimal-pod 5 | version: 0.1.0 6 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/gruntwork-io/pre-commit 3 | rev: v0.1.10 4 | hooks: 5 | - id: terraform-fmt 6 | - id: goimports 7 | 8 | -------------------------------------------------------------------------------- /test/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/gruntwork-io/terraform-google-gke/test 2 | 3 | go 1.13 4 | 5 | require ( 6 | github.com/gruntwork-io/terratest v0.36.3 7 | github.com/stretchr/testify v1.4.0 8 | ) 9 | -------------------------------------------------------------------------------- /modules/gke-service-account/outputs.tf: -------------------------------------------------------------------------------- 1 | output "email" { 2 | # This may seem redundant with the `name` input, but it serves an important 3 | # purpose. Terraform won't establish a dependency graph without this to interpolate on. 4 | description = "The email address of the custom service account." 5 | value = google_service_account.service_account.email 6 | } 7 | -------------------------------------------------------------------------------- /test/charts/minimal-pod/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /examples/gke-private-cluster/example-app/nginx.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | 4 | metadata: 5 | name: nginx 6 | labels: 7 | app: nginx 8 | tier: backend 9 | spec: 10 | replicas: 2 11 | selector: 12 | matchLabels: 13 | app: nginx 14 | template: 15 | metadata: 16 | labels: 17 | app: nginx 18 | tier: backend 19 | spec: 20 | containers: 21 | - name: nginx 22 | image: nginx 23 | ports: 24 | - containerPort: 80 25 | -------------------------------------------------------------------------------- /test/charts/minimal-pod/templates/pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: {{ include "minimal-pod.fullname" . }} 5 | labels: 6 | app.kubernetes.io/name: {{ include "minimal-pod.name" . }} 7 | helm.sh/chart: {{ include "minimal-pod.chart" . }} 8 | app.kubernetes.io/instance: {{ .Release.Name }} 9 | app.kubernetes.io/managed-by: {{ .Release.Service }} 10 | spec: 11 | containers: 12 | - name: {{ .Chart.Name }} 13 | image: "{{ .Values.image }}" 14 | ports: 15 | - name: http 16 | containerPort: 80 17 | protocol: TCP 18 | 19 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Terraform files 2 | .terraform 3 | terraform.tfstate 4 | terraform.tfvars 5 | *.tfstate* 6 | 7 | # OS X files 8 | .history 9 | .DS_Store 10 | 11 | # IntelliJ files 12 | .idea_modules 13 | *.iml 14 | *.iws 15 | *.ipr 16 | .idea/ 17 | build/ 18 | */build/ 19 | out/ 20 | 21 | # Go best practices dictate that libraries should not include the vendor directory 22 | vendor 23 | 24 | # Folder used to store temporary test data by Terratest 25 | .test-data 26 | 27 | # Mock user-data log file 28 | mock-user-data.log 29 | 30 | # Ignore Terraform lock files, as we want to test the Terraform code in these repos with the latest provider 31 | # versions. 32 | .terraform.lock.hcl 33 | -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | output "cluster_endpoint" { 2 | description = "The IP address of the cluster master." 3 | sensitive = true 4 | value = module.gke_cluster.endpoint 5 | } 6 | 7 | output "client_certificate" { 8 | description = "Public certificate used by clients to authenticate to the cluster endpoint." 9 | value = module.gke_cluster.client_certificate 10 | } 11 | 12 | output "client_key" { 13 | description = "Private key used by clients to authenticate to the cluster endpoint." 14 | sensitive = true 15 | value = module.gke_cluster.client_key 16 | } 17 | 18 | output "cluster_ca_certificate" { 19 | description = "The public certificate that is the root of trust for the cluster." 20 | sensitive = true 21 | value = module.gke_cluster.cluster_ca_certificate 22 | } 23 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Submit a feature request for this repo. 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | 14 | 15 | **Describe the solution you'd like** 16 | A clear and concise description of what you want to happen. 17 | 18 | **Describe alternatives you've considered** 19 | A clear and concise description of any alternative solutions or features you've considered. 20 | 21 | **Additional context** 22 | Add any other context or screenshots about the feature request here. 23 | -------------------------------------------------------------------------------- /examples/gke-private-cluster/outputs.tf: -------------------------------------------------------------------------------- 1 | output "cluster_endpoint" { 2 | description = "The IP address of the cluster master." 3 | sensitive = true 4 | value = module.gke_cluster.endpoint 5 | } 6 | 7 | output "client_certificate" { 8 | description = "Public certificate used by clients to authenticate to the cluster endpoint." 9 | value = module.gke_cluster.client_certificate 10 | } 11 | 12 | output "client_key" { 13 | description = "Private key used by clients to authenticate to the cluster endpoint." 14 | sensitive = true 15 | value = module.gke_cluster.client_key 16 | } 17 | 18 | output "cluster_ca_certificate" { 19 | description = "The public certificate that is the root of trust for the cluster." 20 | sensitive = true 21 | value = module.gke_cluster.cluster_ca_certificate 22 | } 23 | -------------------------------------------------------------------------------- /examples/gke-public-cluster/outputs.tf: -------------------------------------------------------------------------------- 1 | output "cluster_endpoint" { 2 | description = "The IP address of the cluster master." 3 | sensitive = true 4 | value = module.gke_cluster.endpoint 5 | } 6 | 7 | output "client_certificate" { 8 | description = "Public certificate used by clients to authenticate to the cluster endpoint." 9 | value = module.gke_cluster.client_certificate 10 | } 11 | 12 | output "client_key" { 13 | description = "Private key used by clients to authenticate to the cluster endpoint." 14 | sensitive = true 15 | value = module.gke_cluster.client_key 16 | } 17 | 18 | output "cluster_ca_certificate" { 19 | description = "The public certificate that is the root of trust for the cluster." 20 | sensitive = true 21 | value = module.gke_cluster.cluster_ca_certificate 22 | } 23 | -------------------------------------------------------------------------------- /test/validation/validate_all_modules_and_examples_test.go: -------------------------------------------------------------------------------- 1 | package testvalidate 2 | 3 | import ( 4 | "os" 5 | "path/filepath" 6 | "testing" 7 | 8 | test_structure "github.com/gruntwork-io/terratest/modules/test-structure" 9 | 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | // TestValidateAllTerraformModulesAndExamples recursively finds all modules and examples (by default) subdirectories in 14 | // the repo and runs Terraform InitAndValidate on them to flush out missing variables, typos, unused vars, etc 15 | func TestValidateAllTerraformModulesAndExamples(t *testing.T) { 16 | t.Parallel() 17 | 18 | cwd, err := os.Getwd() 19 | require.NoError(t, err) 20 | 21 | opts, optsErr := test_structure.NewValidationOptions(filepath.Join(cwd, "../.."), []string{}, []string{}) 22 | require.NoError(t, optsErr) 23 | 24 | test_structure.ValidateAllTerraformModules(t, opts) 25 | } 26 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a bug report to help us improve. 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | 14 | 15 | **Describe the bug** 16 | A clear and concise description of what the bug is. 17 | 18 | **To Reproduce** 19 | Steps to reproduce the behavior including the relevant Terraform/Terragrunt/Packer version number and any code snippets and module inputs you used. 20 | 21 | ```hcl 22 | // paste code snippets here 23 | ``` 24 | 25 | **Expected behavior** 26 | A clear and concise description of what you expected to happen. 27 | 28 | **Nice to have** 29 | - [ ] Terminal output 30 | - [ ] Screenshots 31 | 32 | **Additional context** 33 | Add any other context about the problem here. 34 | -------------------------------------------------------------------------------- /modules/gke-service-account/README.md: -------------------------------------------------------------------------------- 1 | # GKE Service Account Module 2 | 3 | The GKE Service Account module is used to create a GCP service account for use with a GKE cluster. It is based on 4 | the best practices referenced in this article: 5 | https://cloud.google.com/kubernetes-engine/docs/tutorials/authenticating-to-cloud-platform. 6 | 7 | ## How do you use this module? 8 | 9 | * See the [root README](https://github.com/gruntwork-io/terraform-google-gke/blob/master/README.md) for instructions on 10 | using Terraform modules. 11 | * See the [examples](https://github.com/gruntwork-io/terraform-google-gke/tree/master/examples) folder for example usage. 12 | * See [variables.tf](https://github.com/gruntwork-io/terraform-google-gke/blob/master/modules/gke-service-account/variables.tf) for all the 13 | variables you can set on this module. 14 | * See [outputs.tf](https://github.com/gruntwork-io/terraform-google-gke/blob/master/modules/gke-service-account/outputs.tf) for all the variables 15 | that are outputted by this module. 16 | -------------------------------------------------------------------------------- /test/charts/minimal-pod/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "minimal-pod.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "minimal-pod.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "minimal-pod.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | -------------------------------------------------------------------------------- /modules/gke-service-account/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRED MODULE PARAMETERS 3 | # These parameters must be supplied when consuming this module. 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "project" { 7 | description = "The name of the GCP Project where all resources will be launched." 8 | type = string 9 | } 10 | 11 | variable "name" { 12 | description = "The name of the custom service account. This parameter is limited to a maximum of 28 characters." 13 | type = string 14 | } 15 | 16 | # --------------------------------------------------------------------------------------------------------------------- 17 | # OPTIONAL MODULE PARAMETERS 18 | # These parameters have reasonable defaults. 19 | # --------------------------------------------------------------------------------------------------------------------- 20 | 21 | variable "description" { 22 | description = "The description of the custom service account." 23 | type = string 24 | default = "" 25 | } 26 | 27 | variable "service_account_roles" { 28 | description = "Additional roles to be added to the service account." 29 | type = list(string) 30 | default = [] 31 | } 32 | -------------------------------------------------------------------------------- /modules/gke-cluster/outputs.tf: -------------------------------------------------------------------------------- 1 | output "name" { 2 | # This may seem redundant with the `name` input, but it serves an important 3 | # purpose. Terraform won't establish a dependency graph without this to interpolate on. 4 | description = "The name of the cluster master. This output is used for interpolation with node pools, other modules." 5 | 6 | value = google_container_cluster.cluster.name 7 | } 8 | 9 | output "master_version" { 10 | description = "The Kubernetes master version." 11 | value = google_container_cluster.cluster.master_version 12 | } 13 | 14 | output "endpoint" { 15 | description = "The IP address of the cluster master." 16 | sensitive = true 17 | value = google_container_cluster.cluster.endpoint 18 | } 19 | 20 | # The following outputs allow authentication and connectivity to the GKE Cluster. 21 | output "client_certificate" { 22 | description = "Public certificate used by clients to authenticate to the cluster endpoint." 23 | value = base64decode(google_container_cluster.cluster.master_auth[0].client_certificate) 24 | } 25 | 26 | output "client_key" { 27 | description = "Private key used by clients to authenticate to the cluster endpoint." 28 | value = base64decode(google_container_cluster.cluster.master_auth[0].client_key) 29 | } 30 | 31 | output "cluster_ca_certificate" { 32 | description = "The public certificate that is the root of trust for the cluster." 33 | value = base64decode(google_container_cluster.cluster.master_auth[0].cluster_ca_certificate) 34 | } 35 | -------------------------------------------------------------------------------- /test/test_helpers.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "testing" 7 | "time" 8 | 9 | "github.com/gruntwork-io/terratest/modules/k8s" 10 | "github.com/gruntwork-io/terratest/modules/logger" 11 | "github.com/gruntwork-io/terratest/modules/retry" 12 | "github.com/stretchr/testify/assert" 13 | ) 14 | 15 | // kubeWaitUntilNumNodes continuously polls the Kubernetes cluster until there are the expected number of nodes 16 | // registered (regardless of readiness). 17 | func kubeWaitUntilNumNodes(t *testing.T, kubectlOptions *k8s.KubectlOptions, numNodes int, retries int, sleepBetweenRetries time.Duration) { 18 | statusMsg := fmt.Sprintf("Wait for %d Kube Nodes to be registered.", numNodes) 19 | message, err := retry.DoWithRetryE( 20 | t, 21 | statusMsg, 22 | retries, 23 | sleepBetweenRetries, 24 | func() (string, error) { 25 | nodes, err := k8s.GetNodesE(t, kubectlOptions) 26 | if err != nil { 27 | return "", err 28 | } 29 | if len(nodes) != numNodes { 30 | return "", errors.New("Not enough nodes") 31 | } 32 | return "All nodes registered", nil 33 | }, 34 | ) 35 | if err != nil { 36 | logger.Logf(t, "Error waiting for expected number of nodes: %s", err) 37 | t.Fatal(err) 38 | } 39 | logger.Logf(t, message) 40 | } 41 | 42 | // Verify that all the nodes in the cluster reach the Ready state. 43 | func verifyGkeNodesAreReady(t *testing.T, kubectlOptions *k8s.KubectlOptions) { 44 | kubeWaitUntilNumNodes(t, kubectlOptions, 3, 30, 10*time.Second) 45 | k8s.WaitUntilAllNodesReady(t, kubectlOptions, 30, 10*time.Second) 46 | readyNodes := k8s.GetReadyNodes(t, kubectlOptions) 47 | assert.Equal(t, len(readyNodes), 3) 48 | } 49 | -------------------------------------------------------------------------------- /modules/gke-service-account/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | # This module is now only being tested with Terraform 1.0.x. However, to make upgrading easier, we are setting 3 | # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it 4 | # forwards compatible with 1.0.x code. 5 | required_version = ">= 0.12.26" 6 | } 7 | 8 | # ---------------------------------------------------------------------------------------------------------------------- 9 | # CREATE SERVICE ACCOUNT 10 | # ---------------------------------------------------------------------------------------------------------------------- 11 | resource "google_service_account" "service_account" { 12 | project = var.project 13 | account_id = var.name 14 | display_name = var.description 15 | } 16 | 17 | # ---------------------------------------------------------------------------------------------------------------------- 18 | # ADD ROLES TO SERVICE ACCOUNT 19 | # Grant the service account the minimum necessary roles and permissions in order to run the GKE cluster 20 | # plus any other roles added through the 'service_account_roles' variable 21 | # ---------------------------------------------------------------------------------------------------------------------- 22 | locals { 23 | all_service_account_roles = concat(var.service_account_roles, [ 24 | "roles/logging.logWriter", 25 | "roles/monitoring.metricWriter", 26 | "roles/monitoring.viewer", 27 | "roles/stackdriver.resourceMetadata.writer" 28 | ]) 29 | } 30 | 31 | resource "google_project_iam_member" "service_account-roles" { 32 | for_each = toset(local.all_service_account_roles) 33 | 34 | project = var.project 35 | role = each.value 36 | member = "serviceAccount:${google_service_account.service_account.email}" 37 | } 38 | -------------------------------------------------------------------------------- /test/terratest_options.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "github.com/gruntwork-io/terratest/modules/terraform" 8 | ) 9 | 10 | func createTestGKEBasicHelmTerraformOptions( 11 | uniqueID, 12 | project string, 13 | region string, 14 | templatePath string, 15 | kubeConfigPath string, 16 | ) *terraform.Options { 17 | gkeClusterName := strings.ToLower(fmt.Sprintf("gke-cluster-%s", uniqueID)) 18 | gkeServiceAccountName := strings.ToLower(fmt.Sprintf("gke-cluster-sa-%s", uniqueID)) 19 | 20 | terraformVars := map[string]interface{}{ 21 | "region": region, 22 | "location": region, 23 | "project": project, 24 | "cluster_name": gkeClusterName, 25 | "cluster_service_account_name": gkeServiceAccountName, 26 | "kubectl_config_path": kubeConfigPath, 27 | } 28 | 29 | terratestOptions := terraform.Options{ 30 | TerraformDir: templatePath, 31 | Vars: terraformVars, 32 | } 33 | 34 | return &terratestOptions 35 | } 36 | 37 | func createTestGKEClusterTerraformOptions( 38 | uniqueID, 39 | project string, 40 | region string, 41 | templatePath string, 42 | ) *terraform.Options { 43 | gkeClusterName := strings.ToLower(fmt.Sprintf("gke-cluster-%s", uniqueID)) 44 | gkeServiceAccountName := strings.ToLower(fmt.Sprintf("gke-cluster-sa-%s", uniqueID)) 45 | 46 | terraformVars := map[string]interface{}{ 47 | "region": region, 48 | "location": region, 49 | "project": project, 50 | "cluster_name": gkeClusterName, 51 | "cluster_service_account_name": gkeServiceAccountName, 52 | } 53 | 54 | terratestOptions := terraform.Options{ 55 | TerraformDir: templatePath, 56 | Vars: terraformVars, 57 | } 58 | 59 | return &terratestOptions 60 | } 61 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | 6 | 7 | ## Description 8 | 9 | 10 | 11 | ### Documentation 12 | 13 | 21 | 22 | 23 | 24 | ## TODOs 25 | 26 | Please ensure all of these TODOs are completed before asking for a review. 27 | 28 | - [ ] Ensure the branch is named correctly with the issue number. e.g: `feature/new-vpc-endpoints-955` or `bug/missing-count-param-434`. 29 | - [ ] Update the docs. 30 | - [ ] Keep the changes backward compatible where possible. 31 | - [ ] Run the pre-commit checks successfully. 32 | - [ ] Run the relevant tests successfully. 33 | - [ ] Ensure any 3rd party code adheres with our [license policy](https://www.notion.so/gruntwork/Gruntwork-licenses-and-open-source-usage-policy-f7dece1f780341c7b69c1763f22b1378) or delete this line if its not applicable. 34 | 35 | 36 | ## Related Issues 37 | 38 | 44 | -------------------------------------------------------------------------------- /test/README.md: -------------------------------------------------------------------------------- 1 | # Tests 2 | 3 | This folder contains automated tests for this Module. All of the tests are written in [Go](https://golang.org/). 4 | Most of these are "integration tests" that deploy real infrastructure using Terraform and verify that infrastructure 5 | works as expected using a helper library called [Terratest](https://github.com/gruntwork-io/terratest). 6 | 7 | 8 | 9 | ## WARNING: These Tests May Cost You Money! 10 | 11 | **Note #1**: Many of these tests create real resources in a GCP account and then try to clean those resources up at 12 | the end of a test run. That means these tests may cost you money to run! When adding tests, please be considerate of 13 | the resources you create and take extra care to clean everything up when you're done! 14 | 15 | **Note #2**: Never forcefully shut the tests down (e.g. by hitting `CTRL + C`) or the cleanup tasks won't run! 16 | 17 | **Note #3**: We set `-timeout 60m` on all tests not because they necessarily take that long, but because Go has a 18 | default test timeout of 10 minutes, after which it forcefully kills the tests with a `SIGQUIT`, preventing the cleanup 19 | tasks from running. Therefore, we set an overlying long timeout to make sure all tests have enough time to finish and 20 | clean up. 21 | 22 | 23 | 24 | ## Running the tests 25 | 26 | ### Prerequisites 27 | 28 | - Install the latest version of [Go](https://golang.org/). 29 | - Install [Terraform](https://www.terraform.io/downloads.html). 30 | - Configure your GCP credentials using one of the [options supported by the Google Cloud SDK](https://cloud.google.com/sdk/docs/authorizing). 31 | 32 | 33 | ### One-time setup 34 | 35 | Download Go dependencies: 36 | 37 | ``` 38 | cd test 39 | go mod download 40 | ``` 41 | 42 | 43 | ### Run all the tests 44 | 45 | ```bash 46 | cd test 47 | go test -v -timeout 60m 48 | ``` 49 | 50 | 51 | ### Run a specific test 52 | 53 | To run a specific test called `TestFoo`: 54 | 55 | ```bash 56 | cd test 57 | go test -v -timeout 60m -run TestFoo 58 | ``` 59 | -------------------------------------------------------------------------------- /examples/gke-public-cluster/README.md: -------------------------------------------------------------------------------- 1 | # GKE Public Cluster 2 | 3 | This example creates a Public GKE Cluster. 4 | 5 | With this example, you can create either a regional or zonal cluster. Generally, using a regional cluster is recommended 6 | over a zonal cluster. 7 | 8 | Zonal clusters have nodes in a single zones, and will have an outage if that zone has an outage. Regional GKE Clusters 9 | are high-availability clusters where the cluster master is spread across multiple GCP zones. During a zonal outage, the 10 | Kubernetes control plane and a subset of your nodes will still be available, provided that at least 1 zone that your 11 | cluster is running in is still available. Regional control planes remain accessible during upgrades versus zonal control 12 | planes which do not. 13 | 14 | By default, regional clusters will create nodes across 3 zones in a region. If you're interested in how nodes are 15 | distributed in regional clusters, read the GCP docs about [balancing across zones](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-autoscaler#balancing_across_zones). 16 | 17 | The example follows best-practices and enables workload-identity on the cluster for access to GKE resources to follow the 18 | principle least privilege. However you will need to ensure that the Identity and Access Management (IAM) API has been 19 | enabled for the given project. This can be enabled in the Google API Console: 20 | https://console.developers.google.com/apis/api/iam.googleapis.com/overview. See "Why use Workload Identity?" for more information. 21 | 22 | **Important:** Nodes in a public cluster are accessible from the public internet; try using a private cluster such as in 23 | [`gke-private-cluster`](../gke-private-cluster) to limit access to/from your nodes. Private clusters are recommended for 24 | running most apps and services. 25 | 26 | ## Why use Workload Identity? 27 | 28 | Workload Identity is the recommended way to access Google Cloud services from applications running within GKE due to its improved security properties and manageability (https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity). 29 | 30 | 31 | Applications running on GKE must authenticate to use Google Cloud APIs such as the Compute APIs, Storage and Database APIs, or Machine Learning APIs. 32 | 33 | With Workload Identity, you can configure a Kubernetes service account to act as a Google service account. Any application running as the Kubernetes service account automatically authenticates as the Google service account when accessing Google Cloud APIs. This enables you to assign fine-grained identity and authorization for applications in your cluster. 34 | 35 | 36 | ## Limitations 37 | 38 | When using a regional cluster, no region shares GPU types across all of their zones; you will need to explicitly specify 39 | the zones your cluster's node pools run in in order to use GPUs. 40 | 41 | Node Pools cannot be created in zones without a master cluster; you can update the zones of your cluster master provided 42 | your new zones are within the region your cluster is present in. 43 | 44 | ## How do you run these examples? 45 | 46 | 1. Install [Terraform](https://learn.hashicorp.com/terraform/getting-started/install.html) v0.14.8 or later. 47 | 1. Open `variables.tf`, and fill in any required variables that don't have a default. 48 | 1. Run `terraform get`. 49 | 1. Run `terraform plan`. 50 | 1. If the plan looks good, run `terraform apply`. 51 | 1. To setup `kubectl` to access the deployed cluster, run `gcloud beta container clusters get-credentials $CLUSTER_NAME 52 | --region $REGION --project $PROJECT`, where `CLUSTER_NAME`, `REGION` and `PROJECT` correspond to what you set for the 53 | input variables. 54 | -------------------------------------------------------------------------------- /examples/gke-private-cluster/README.md: -------------------------------------------------------------------------------- 1 | # GKE Private Cluster 2 | 3 | This example creates a Private GKE Cluster. 4 | 5 | With this example, you can create either a regional or zonal cluster. Generally, using a regional cluster is recommended 6 | over a zonal cluster. 7 | 8 | Zonal clusters have nodes in a single zones, and will have an outage if that zone has an outage. Regional GKE Clusters 9 | are high-availability clusters where the cluster master is spread across multiple GCP zones. During a zonal outage, the 10 | Kubernetes control plane and a subset of your nodes will still be available, provided that at least 1 zone that your 11 | cluster is running in is still available. Regional control planes remain accessible during upgrades versus zonal control 12 | planes which do not. 13 | 14 | By default, regional clusters will create nodes across 3 zones in a region. If you're interested in how nodes are 15 | distributed in regional clusters, read the GCP docs about [balancing across zones](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-autoscaler#balancing_across_zones). 16 | 17 | Nodes in a private cluster are only granted private IP addresses; they're not accessible from the public internet, as 18 | part of a defense-in-depth strategy. A private cluster can use a GCP HTTP(S) or Network load balancer to accept public 19 | traffic, or an internal load balancer from within your VPC network. 20 | 21 | Private clusters use [Private Google Access](https://cloud.google.com/vpc/docs/private-access-options) to access Google 22 | APIs such as Stackdriver, and to pull container images from Google Container Registry. To use other APIs and services 23 | over the internet, you can use a [`gke-public-cluster`](../gke-public-cluster). Private clusters are 24 | recommended for running most apps and services. 25 | 26 | ## Limitations 27 | 28 | When using a regional cluster, no region shares GPU types across all of their zones; you will need to explicitly specify 29 | the zones your cluster's node pools run in in order to use GPUs. 30 | 31 | Node Pools cannot be created in zones without a master cluster; you can update the zones of your cluster master provided 32 | your new zones are within the region your cluster is present in. 33 | 34 | 36 | Currently, you cannot use a proxy to reach the cluster master of a regional cluster through its private IP address. 37 | 38 | ## How do you run these examples? 39 | 40 | 1. Install [Terraform](https://learn.hashicorp.com/terraform/getting-started/install.html) v0.14.8 or later. 41 | 1. Open `variables.tf` and fill in any required variables that don't have a default. 42 | 1. Run `terraform get`. 43 | 1. Run `terraform plan`. 44 | 1. If the plan looks good, run `terraform apply`. 45 | 46 | #### Optional: Deploy a sample application 47 | 48 | 1. To setup `kubectl` to access the deployed cluster, run `gcloud beta container clusters get-credentials $CLUSTER_NAME 49 | --region $REGION --project $PROJECT`, where `CLUSTER_NAME`, `REGION` and `PROJECT` correspond to what you set for the 50 | input variables. 51 | 1. Run `kubectl apply -f example-app/nginx.yml` to create a deployment in your cluster. 52 | 1. Run `kubectl get pods` to view the pod status and check that it is ready. 53 | 1. Run `kubectl get deployment` to view the deployment status. 54 | 1. Run `kubectl port-forward deployment/nginx 8080:80` 55 | 56 | Now you should be able to access your `nginx` deployment on http://localhost:8080 57 | 58 | #### Destroy the created resources 59 | 60 | 1. If you deployed the sample application, run `kubectl delete -f example-app/nginx.yml`. 61 | 1. Run `terraform destroy`. 62 | -------------------------------------------------------------------------------- /examples/gke-basic-helm/README.md: -------------------------------------------------------------------------------- 1 | # GKE Basic Helm Example 2 | 3 | The root folder contains an example of how to deploy a GKE Public Cluster with an example chart 4 | using [Helm](https://helm.sh/). 5 | 6 | ## Overview 7 | 8 | In this guide we will walk through the steps necessary to get up and running with GKE and Helm. Here are the steps: 9 | 10 | 1. [Install the necessary tools](#installing-necessary-tools) 11 | 1. [Apply the Terraform code](#apply-the-terraform-code) 12 | 1. [Verify the Deployed Chart](#verify-the-deployed-chart) 13 | 1. [Destroy the Deployed Resources](#destroy-the-deployed-resources) 14 | 15 | ## Installing necessary tools 16 | 17 | In addition to `terraform`, this example relies on `gcloud` and `kubectl` and `helm` tools to manage the cluster. 18 | 19 | This means that your system needs to be configured to be able to find `terraform`, `gcloud`, `kubectl` and `helm` 20 | client utilities on the system `PATH`. Here are the installation guides for each tool: 21 | 22 | 1. [`gcloud`](https://cloud.google.com/sdk/gcloud/) 23 | 1. [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) 24 | 1. [`terraform`](https://learn.hashicorp.com/terraform/getting-started/install.html) 25 | 1. [`helm`](https://docs.helm.sh/using_helm/#installing-helm) (Minimum version v3.0) 26 | 27 | Make sure the binaries are discoverable in your `PATH` variable. See [this Stack Overflow 28 | post](https://stackoverflow.com/questions/14637979/how-to-permanently-set-path-on-linux-unix) for instructions on 29 | setting up your `PATH` on Unix, and [this 30 | post](https://stackoverflow.com/questions/1618280/where-can-i-set-path-to-make-exe-on-windows) for instructions on 31 | Windows. 32 | 33 | ## Apply the Terraform Code 34 | 35 | Now that all the prerequisite tools are installed, we are ready to deploy the GKE cluster! 36 | 37 | 1. If you haven't already, clone this repo: 38 | - `git clone https://github.com/gruntwork-io/terraform-google-gke.git` 39 | 1. Make sure you are in the root project folder: 40 | - `cd terraform-google-gke` 41 | 1. Fill in the required variables in `variables.tf` based on your needs 42 | 1. Authenticate to GCP: 43 | - `gcloud auth login` 44 | - `gcloud auth application-default login` 45 | 1. Initialize terraform: 46 | - `terraform init` 47 | 1. Check the terraform plan: 48 | - `terraform plan` 49 | 1. Apply the terraform code: 50 | - `terraform apply` 51 | 52 | At the end of the `terraform apply`, you should now have a working GKE cluster and `kubectl` context configured. 53 | So let's verify that in the next step! 54 | 55 | ## Verify the Deployed Chart 56 | 57 | The example configures your `kubectl` context, so you can use `kubectl` and `helm` commands without further configuration. 58 | 59 | To see the created resources, run the following commands: 60 | 61 | ``` 62 | ❯ kubectl get deployments -n default 63 | NAME READY UP-TO-DATE AVAILABLE AGE 64 | nginx 1/1 1 1 92m 65 | 66 | ❯ kubectl get service -n default 67 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 68 | kubernetes ClusterIP 10.2.0.1 443/TCP 113m 69 | nginx LoadBalancer 10.2.5.84 34.77.188.186 80:31588/TCP,443:31332/TCP 99m 70 | 71 | ❯ kubectl get pods -n default 72 | NAME READY STATUS RESTARTS AGE 73 | nginx-57454964b8-l4w9w 1/1 Running 0 99m 74 | ``` 75 | 76 | If you wish to access the deployed service, you use the `kubectl port-forward` command to forward a local port to the deployed service: 77 | 78 | ``` 79 | ❯ kubectl port-forward deployment/nginx 8080:8080 -n default 80 | Forwarding from 127.0.0.1:8080 -> 8080 81 | Forwarding from [::1]:8080 -> 8080 82 | ``` 83 | 84 | You can now access the deployed service by opening your web browser to `http://localhost:8080`. 85 | 86 | ## Destroy the deployed resources 87 | 88 | To destroy all resources created by the example, just run `terraform destroy`. 89 | -------------------------------------------------------------------------------- /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | 3 | defaults: &defaults 4 | machine: 5 | image: ubuntu-2004:202104-01 6 | 7 | env: &env 8 | environment: 9 | GRUNTWORK_INSTALLER_VERSION: v0.0.30 10 | TERRATEST_LOG_PARSER_VERSION: v0.30.4 11 | MODULE_CI_VERSION: v0.37.5 12 | TERRAFORM_VERSION: 1.0.4 13 | TERRAGRUNT_VERSION: NONE 14 | PACKER_VERSION: NONE 15 | GOLANG_VERSION: 1.16 16 | GO111MODULE: auto 17 | HELM_VERSION: v3.1.2 18 | K8S_VERSION: v1.10.0 19 | KUBECONFIG: /home/circleci/.kube/config 20 | 21 | jobs: 22 | precommit: 23 | <<: *env 24 | docker: 25 | - image: circleci/python:3.8.1 26 | steps: 27 | - checkout 28 | 29 | - run: 30 | name: install dependencies 31 | command: | 32 | curl -Ls https://raw.githubusercontent.com/gruntwork-io/gruntwork-installer/master/bootstrap-gruntwork-installer.sh | bash /dev/stdin --version "${GRUNTWORK_INSTALLER_VERSION}" 33 | gruntwork-install --module-name "gruntwork-module-circleci-helpers" --repo "https://github.com/gruntwork-io/terraform-aws-ci" --tag "${MODULE_CI_VERSION}" 34 | configure-environment-for-gruntwork-module \ 35 | --terraform-version ${TERRAFORM_VERSION} \ 36 | --terragrunt-version NONE \ 37 | --packer-version NONE \ 38 | --go-version ${GOLANG_VERSION} 39 | # Fail the build if the pre-commit hooks don't pass. Note: if you run pre-commit install locally, these hooks will 40 | # execute automatically every time before you commit, ensuring the build never fails at this step! 41 | - run: 42 | command: | 43 | pip install pre-commit==1.21.0 cfgv==2.0.1 zipp==1.1.0 yapf 44 | go get golang.org/x/tools/cmd/goimports 45 | export GOPATH=~/go/bin && export PATH=$PATH:$GOPATH 46 | pre-commit install 47 | pre-commit run --all-files 48 | 49 | test: 50 | <<: *defaults 51 | <<: *env 52 | steps: 53 | - checkout 54 | - run: &install_gruntwork_tooling 55 | name: install gruntwork tooling 56 | command: | 57 | sudo apt-get -y update 58 | curl -Ls https://raw.githubusercontent.com/gruntwork-io/gruntwork-installer/master/bootstrap-gruntwork-installer.sh | bash /dev/stdin --version "${GRUNTWORK_INSTALLER_VERSION}" 59 | gruntwork-install --module-name "gruntwork-module-circleci-helpers" --repo "https://github.com/gruntwork-io/terraform-aws-ci" --tag "${MODULE_CI_VERSION}" 60 | gruntwork-install --module-name "git-helpers" --repo "https://github.com/gruntwork-io/terraform-aws-ci" --tag "${MODULE_CI_VERSION}" 61 | gruntwork-install --binary-name "terratest_log_parser" --repo "https://github.com/gruntwork-io/terratest" --tag "${TERRATEST_LOG_PARSER_VERSION}" 62 | configure-environment-for-gruntwork-module --go-src-path ./test --terraform-version ${TERRAFORM_VERSION} --terragrunt-version ${TERRAGRUNT_VERSION} --packer-version ${PACKER_VERSION} --go-version ${GOLANG_VERSION} 63 | 64 | - run: 65 | name: install helm client 66 | command: | 67 | # install helm client 68 | curl -Lo helm.tar.gz https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz 69 | tar -xvf helm.tar.gz 70 | chmod +x linux-amd64/helm 71 | sudo mv linux-amd64/helm /usr/local/bin/ 72 | 73 | # Install external dependencies 74 | - run: 75 | name: install gcloud dependencies 76 | command: | 77 | gcloud --quiet components update beta kubectl 78 | 79 | - run: 80 | name: configure kubectl 81 | command: | 82 | mkdir -p ${HOME}/.kube 83 | touch ${HOME}/.kube/config 84 | 85 | - run: 86 | name: run tests 87 | command: | 88 | # required for gcloud and kubectl to authenticate correctly 89 | echo $GCLOUD_SERVICE_KEY | gcloud auth activate-service-account --key-file=- 90 | gcloud --quiet config set project ${GOOGLE_PROJECT_ID} 91 | gcloud --quiet config set compute/zone ${GOOGLE_COMPUTE_ZONE} 92 | # required for terraform and terratest to authenticate correctly 93 | echo $GCLOUD_SERVICE_KEY > /tmp/gcloud.json 94 | export GOOGLE_APPLICATION_CREDENTIALS="/tmp/gcloud.json" 95 | # run the tests 96 | mkdir -p /tmp/logs 97 | run-go-tests --path test --timeout 2h | tee /tmp/logs/all.log 98 | no_output_timeout: 1h 99 | 100 | - run: 101 | name: parse test output 102 | command: terratest_log_parser --testlog /tmp/logs/all.log --outputdir /tmp/logs 103 | when: always 104 | 105 | - store_artifacts: 106 | path: /tmp/logs 107 | - store_test_results: 108 | path: /tmp/logs 109 | 110 | workflows: 111 | version: 2 112 | test: 113 | jobs: 114 | - precommit: 115 | context: 116 | - GCP__automated-tests 117 | - GITHUB__PAT__gruntwork-ci 118 | filters: 119 | tags: 120 | only: /^v.*/ 121 | - test: 122 | context: 123 | - GCP__automated-tests 124 | - GITHUB__PAT__gruntwork-ci 125 | requires: 126 | - precommit 127 | filters: 128 | tags: 129 | only: /^v.*/ 130 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contribution Guidelines 2 | 3 | Contributions to this Module are very welcome! We follow a fairly standard [pull request process]( 4 | https://help.github.com/articles/about-pull-requests/) for contributions, subject to the following guidelines: 5 | 6 | 1. [File a GitHub issue](#file-a-github-issue) 7 | 1. [Update the documentation](#update-the-documentation) 8 | 1. [Update the tests](#update-the-tests) 9 | 1. [Update the code](#update-the-code) 10 | 1. [Create a pull request](#create-a-pull-request) 11 | 1. [Merge and release](#merge-and-release) 12 | 13 | ## File a GitHub issue 14 | 15 | Before starting any work, we recommend filing a GitHub issue in this repo. This is your chance to ask questions and 16 | get feedback from the maintainers and the community before you sink a lot of time into writing (possibly the wrong) 17 | code. If there is anything you're unsure about, just ask! 18 | 19 | ## Update the documentation 20 | 21 | We recommend updating the documentation *before* updating any code (see [Readme Driven 22 | Development](http://tom.preston-werner.com/2010/08/23/readme-driven-development.html)). This ensures the documentation 23 | stays up to date and allows you to think through the problem at a high level before you get lost in the weeds of 24 | coding. 25 | 26 | ## Update the tests 27 | 28 | We also recommend updating the automated tests *before* updating any code (see [Test Driven 29 | Development](https://en.wikipedia.org/wiki/Test-driven_development)). That means you add or update a test case, 30 | verify that it's failing with a clear error message, and *then* make the code changes to get that test to pass. This 31 | ensures the tests stay up to date and verify all the functionality in this Module, including whatever new 32 | functionality you're adding in your contribution. Check out the [tests](https://github.com/gruntwork-io/terraform-aws-couchbase/tree/master/test) folder for instructions on running the 33 | automated tests. 34 | 35 | ## Update the code 36 | 37 | At this point, make your code changes and use your new test case to verify that everything is working. As you work, 38 | keep in mind two things: 39 | 40 | 1. Backwards compatibility 41 | 1. Downtime 42 | 43 | ### Backwards compatibility 44 | 45 | Please make every effort to avoid unnecessary backwards incompatible changes. With Terraform code, this means: 46 | 47 | 1. Do not delete, rename, or change the type of input variables. 48 | 1. If you add an input variable, it should have a `default`. 49 | 1. Do not delete, rename, or change the type of output variables. 50 | 1. Do not delete or rename a module in the `modules` folder. 51 | 52 | If a backwards incompatible change cannot be avoided, please make sure to call that out when you submit a pull request, 53 | explaining why the change is absolutely necessary. 54 | 55 | ### Downtime 56 | 57 | Bear in mind that the Terraform code in this Module is used by real companies to run real infrastructure in 58 | production, and certain types of changes could cause downtime. For example, consider the following: 59 | 60 | 1. If you rename a resource (e.g. `aws_instance "foo"` -> `aws_instance "bar"`), Terraform will see that as deleting 61 | the old resource and creating a new one. 62 | 1. If you change certain attributes of a resource (e.g. the `name` of an `aws_elb`), the cloud provider (e.g. AWS) may 63 | treat that as an instruction to delete the old resource and a create a new one. 64 | 65 | Deleting certain types of resources (e.g. virtual servers, load balancers) can cause downtime, so when making code 66 | changes, think carefully about how to avoid that. For example, can you avoid downtime by using 67 | [create_before_destroy](https://www.terraform.io/docs/configuration/resources.html#create_before_destroy)? Or via 68 | the `terraform state` command? If so, make sure to note this in our pull request. If downtime cannot be avoided, 69 | please make sure to call that out when you submit a pull request. 70 | 71 | 72 | ### Formatting and pre-commit hooks 73 | 74 | You must run `terraform fmt` on the code before committing. You can configure your computer to do this automatically 75 | using pre-commit hooks managed using [pre-commit](http://pre-commit.com/): 76 | 77 | 1. [Install pre-commit](http://pre-commit.com/#install). E.g.: `brew install pre-commit`. 78 | 1. Install the hooks: `pre-commit install`. 79 | 80 | That's it! Now just write your code, and every time you commit, `terraform fmt` will be run on the files you're 81 | committing. 82 | 83 | 84 | ## Create a pull request 85 | 86 | [Create a pull request](https://help.github.com/articles/creating-a-pull-request/) with your changes. Please make sure 87 | to include the following: 88 | 89 | 1. A description of the change, including a link to your GitHub issue. 90 | 1. The output of your automated test run, preferably in a [GitHub Gist](https://gist.github.com/). We cannot run 91 | automated tests for pull requests automatically due to [security 92 | concerns](https://circleci.com/docs/fork-pr-builds/#security-implications), so we need you to manually provide this 93 | test output so we can verify that everything is working. 94 | 1. Any notes on backwards incompatibility or downtime. 95 | 96 | ## Merge and release 97 | 98 | The maintainers for this repo will review your code and provide feedback. If everything looks good, they will merge the 99 | code and release a new version, which you'll be able to find in the [releases page](../../releases). 100 | -------------------------------------------------------------------------------- /examples/gke-private-cluster/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRED PARAMETERS 3 | # These variables are expected to be passed in by the operator. 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "project" { 7 | description = "The project ID where all resources will be launched." 8 | type = string 9 | } 10 | 11 | variable "location" { 12 | description = "The location (region or zone) of the GKE cluster." 13 | type = string 14 | } 15 | 16 | variable "region" { 17 | description = "The region for the network. If the cluster is regional, this must be the same region. Otherwise, it should be the region of the zone." 18 | type = string 19 | } 20 | 21 | # --------------------------------------------------------------------------------------------------------------------- 22 | # OPTIONAL PARAMETERS 23 | # These parameters have reasonable defaults. 24 | # --------------------------------------------------------------------------------------------------------------------- 25 | 26 | variable "cluster_name" { 27 | description = "The name of the Kubernetes cluster." 28 | type = string 29 | default = "example-private-cluster" 30 | } 31 | 32 | variable "cluster_service_account_name" { 33 | description = "The name of the custom service account used for the GKE cluster. This parameter is limited to a maximum of 28 characters." 34 | type = string 35 | default = "example-private-cluster-sa" 36 | } 37 | 38 | variable "cluster_service_account_description" { 39 | description = "A description of the custom service account used for the GKE cluster." 40 | type = string 41 | default = "Example GKE Cluster Service Account managed by Terraform" 42 | } 43 | 44 | variable "master_ipv4_cidr_block" { 45 | description = "The IP range in CIDR notation (size must be /28) to use for the hosted master network. This range will be used for assigning internal IP addresses to the master or set of masters, as well as the ILB VIP. This range must not overlap with any other ranges in use within the cluster's network." 46 | type = string 47 | default = "10.5.0.0/28" 48 | } 49 | 50 | # For the example, we recommend a /16 network for the VPC. Note that when changing the size of the network, 51 | # you will have to adjust the 'cidr_subnetwork_width_delta' in the 'vpc_network' -module accordingly. 52 | variable "vpc_cidr_block" { 53 | description = "The IP address range of the VPC in CIDR notation. A prefix of /16 is recommended. Do not use a prefix higher than /27." 54 | type = string 55 | default = "10.3.0.0/16" 56 | } 57 | 58 | # For the example, we recommend a /16 network for the secondary range. Note that when changing the size of the network, 59 | # you will have to adjust the 'cidr_subnetwork_width_delta' in the 'vpc_network' -module accordingly. 60 | variable "vpc_secondary_cidr_block" { 61 | description = "The IP address range of the VPC's secondary address range in CIDR notation. A prefix of /16 is recommended. Do not use a prefix higher than /27." 62 | type = string 63 | default = "10.4.0.0/16" 64 | } 65 | 66 | variable "public_subnetwork_secondary_range_name" { 67 | description = "The name associated with the pod subnetwork secondary range, used when adding an alias IP range to a VM instance. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork." 68 | type = string 69 | default = "public-cluster" 70 | } 71 | 72 | variable "public_services_secondary_range_name" { 73 | description = "The name associated with the services subnetwork secondary range, used when adding an alias IP range to a VM instance. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork." 74 | type = string 75 | default = "public-services" 76 | } 77 | 78 | variable "public_services_secondary_cidr_block" { 79 | description = "The IP address range of the VPC's public services secondary address range in CIDR notation. A prefix of /16 is recommended. Do not use a prefix higher than /27. Note: this variable is optional and is used primarily for backwards compatibility, if not specified a range will be calculated using var.secondary_cidr_block, var.secondary_cidr_subnetwork_width_delta and var.secondary_cidr_subnetwork_spacing." 80 | type = string 81 | default = null 82 | } 83 | 84 | variable "private_services_secondary_cidr_block" { 85 | description = "The IP address range of the VPC's private services secondary address range in CIDR notation. A prefix of /16 is recommended. Do not use a prefix higher than /27. Note: this variable is optional and is used primarily for backwards compatibility, if not specified a range will be calculated using var.secondary_cidr_block, var.secondary_cidr_subnetwork_width_delta and var.secondary_cidr_subnetwork_spacing." 86 | type = string 87 | default = null 88 | } 89 | 90 | variable "secondary_cidr_subnetwork_width_delta" { 91 | description = "The difference between your network and subnetwork's secondary range netmask; an /16 network and a /20 subnetwork would be 4." 92 | type = number 93 | default = 4 94 | } 95 | 96 | variable "secondary_cidr_subnetwork_spacing" { 97 | description = "How many subnetwork-mask sized spaces to leave between each subnetwork type's secondary ranges." 98 | type = number 99 | default = 0 100 | } 101 | 102 | variable "enable_vertical_pod_autoscaling" { 103 | description = "Enable vertical pod autoscaling" 104 | type = string 105 | default = true 106 | } 107 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRED PARAMETERS 3 | # These variables are expected to be passed in by the operator. 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "project" { 7 | description = "The project ID where all resources will be launched." 8 | type = string 9 | } 10 | 11 | variable "location" { 12 | description = "The location (region or zone) of the GKE cluster." 13 | type = string 14 | } 15 | 16 | variable "region" { 17 | description = "The region for the network. If the cluster is regional, this must be the same region. Otherwise, it should be the region of the zone." 18 | type = string 19 | } 20 | 21 | # --------------------------------------------------------------------------------------------------------------------- 22 | # OPTIONAL PARAMETERS 23 | # These parameters have reasonable defaults. 24 | # --------------------------------------------------------------------------------------------------------------------- 25 | 26 | variable "cluster_name" { 27 | description = "The name of the Kubernetes cluster." 28 | type = string 29 | default = "example-cluster" 30 | } 31 | 32 | variable "cluster_service_account_name" { 33 | description = "The name of the custom service account used for the GKE cluster. This parameter is limited to a maximum of 28 characters." 34 | type = string 35 | default = "example-cluster-sa" 36 | } 37 | 38 | variable "cluster_service_account_description" { 39 | description = "A description of the custom service account used for the GKE cluster." 40 | type = string 41 | default = "Example GKE Cluster Service Account managed by Terraform" 42 | } 43 | 44 | # Kubectl options 45 | 46 | variable "kubectl_config_path" { 47 | description = "Path to the kubectl config file. Defaults to $HOME/.kube/config" 48 | type = string 49 | default = "" 50 | } 51 | 52 | variable "master_ipv4_cidr_block" { 53 | description = "The IP range in CIDR notation (size must be /28) to use for the hosted master network. This range will be used for assigning internal IP addresses to the master or set of masters, as well as the ILB VIP. This range must not overlap with any other ranges in use within the cluster's network." 54 | type = string 55 | default = "10.5.0.0/28" 56 | } 57 | 58 | # For the example, we recommend a /16 network for the VPC. Note that when changing the size of the network, 59 | # you will have to adjust the 'cidr_subnetwork_width_delta' in the 'vpc_network' -module accordingly. 60 | variable "vpc_cidr_block" { 61 | description = "The IP address range of the VPC in CIDR notation. A prefix of /16 is recommended. Do not use a prefix higher than /27." 62 | type = string 63 | default = "10.3.0.0/16" 64 | } 65 | 66 | variable "public_subnetwork_secondary_range_name" { 67 | description = "The name associated with the pod subnetwork secondary range, used when adding an alias IP range to a VM instance. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork." 68 | type = string 69 | default = "public-cluster" 70 | } 71 | 72 | variable "public_services_secondary_range_name" { 73 | description = "The name associated with the services subnetwork secondary range, used when adding an alias IP range to a VM instance. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork." 74 | type = string 75 | default = "public-services" 76 | } 77 | 78 | variable "public_services_secondary_cidr_block" { 79 | description = "The IP address range of the VPC's public services secondary address range in CIDR notation. A prefix of /16 is recommended. Do not use a prefix higher than /27. Note: this variable is optional and is used primarily for backwards compatibility, if not specified a range will be calculated using var.secondary_cidr_block, var.secondary_cidr_subnetwork_width_delta and var.secondary_cidr_subnetwork_spacing." 80 | type = string 81 | default = null 82 | } 83 | 84 | variable "private_services_secondary_cidr_block" { 85 | description = "The IP address range of the VPC's private services secondary address range in CIDR notation. A prefix of /16 is recommended. Do not use a prefix higher than /27. Note: this variable is optional and is used primarily for backwards compatibility, if not specified a range will be calculated using var.secondary_cidr_block, var.secondary_cidr_subnetwork_width_delta and var.secondary_cidr_subnetwork_spacing." 86 | type = string 87 | default = null 88 | } 89 | 90 | variable "secondary_cidr_subnetwork_width_delta" { 91 | description = "The difference between your network and subnetwork's secondary range netmask; an /16 network and a /20 subnetwork would be 4." 92 | type = number 93 | default = 4 94 | } 95 | 96 | variable "secondary_cidr_subnetwork_spacing" { 97 | description = "How many subnetwork-mask sized spaces to leave between each subnetwork type's secondary ranges." 98 | type = number 99 | default = 0 100 | } 101 | 102 | # For the example, we recommend a /16 network for the secondary range. Note that when changing the size of the network, 103 | # you will have to adjust the 'cidr_subnetwork_width_delta' in the 'vpc_network' -module accordingly. 104 | variable "vpc_secondary_cidr_block" { 105 | description = "The IP address range of the VPC's secondary address range in CIDR notation. A prefix of /16 is recommended. Do not use a prefix higher than /27." 106 | type = string 107 | default = "10.4.0.0/16" 108 | } 109 | -------------------------------------------------------------------------------- /test/gke_cluster_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "os" 5 | "path/filepath" 6 | "testing" 7 | 8 | "github.com/gruntwork-io/terratest/modules/gcp" 9 | "github.com/gruntwork-io/terratest/modules/k8s" 10 | "github.com/gruntwork-io/terratest/modules/logger" 11 | "github.com/gruntwork-io/terratest/modules/random" 12 | "github.com/gruntwork-io/terratest/modules/shell" 13 | "github.com/gruntwork-io/terratest/modules/terraform" 14 | test_structure "github.com/gruntwork-io/terratest/modules/test-structure" 15 | "github.com/stretchr/testify/assert" 16 | "github.com/stretchr/testify/require" 17 | ) 18 | 19 | func TestGKECluster(t *testing.T) { 20 | t.Parallel() 21 | 22 | // For convenience - uncomment these when doing local testing if you need to skip any sections. 23 | //os.Setenv("SKIP_", "true") // Does not skip any sections, but avoids copying to a temp dir 24 | //os.Setenv("SKIP_create_test_copy_of_examples", "true") 25 | //os.Setenv("SKIP_create_terratest_options", "true") 26 | //os.Setenv("SKIP_terraform_apply", "true") 27 | //os.Setenv("SKIP_configure_kubectl", "true") 28 | //os.Setenv("SKIP_wait_for_workers", "true") 29 | //os.Setenv("SKIP_terraform_verify_plan_noop", "true") 30 | //os.Setenv("SKIP_cleanup", "true") 31 | 32 | var testcases = []struct { 33 | testName string 34 | exampleFolder string 35 | overrideDefaultSA bool 36 | }{ 37 | { 38 | "PublicCluster", 39 | "gke-public-cluster", 40 | false, 41 | }, 42 | { 43 | "PrivateCluster", 44 | "gke-private-cluster", 45 | false, 46 | }, 47 | { 48 | "PublicClusterWithCustomSA", 49 | "gke-public-cluster", 50 | true, 51 | }, 52 | } 53 | 54 | for _, testCase := range testcases { 55 | // The following is necessary to make sure testCase's values don't 56 | // get updated due to concurrency within the scope of t.Run(..) below 57 | testCase := testCase 58 | 59 | t.Run(testCase.testName, func(t *testing.T) { 60 | t.Parallel() 61 | 62 | // Create a directory path that won't conflict 63 | workingDir := filepath.Join(".", "stages", testCase.testName) 64 | 65 | test_structure.RunTestStage(t, "create_test_copy_of_examples", func() { 66 | testFolder := test_structure.CopyTerraformFolderToTemp(t, "..", "examples") 67 | logger.Logf(t, "path to test folder %s\n", testFolder) 68 | terraformModulePath := filepath.Join(testFolder, testCase.exampleFolder) 69 | test_structure.SaveString(t, workingDir, "gkeClusterTerraformModulePath", terraformModulePath) 70 | }) 71 | 72 | test_structure.RunTestStage(t, "create_terratest_options", func() { 73 | gkeClusterTerraformModulePath := test_structure.LoadString(t, workingDir, "gkeClusterTerraformModulePath") 74 | tmpKubeConfigPath := k8s.CopyHomeKubeConfigToTemp(t) 75 | kubectlOptions := k8s.NewKubectlOptions("", tmpKubeConfigPath, "kube-system") 76 | uniqueID := random.UniqueId() 77 | project := gcp.GetGoogleProjectIDFromEnvVar(t) 78 | region := gcp.GetRandomRegion(t, project, nil, nil) 79 | gkeClusterTerratestOptions := createTestGKEClusterTerraformOptions(uniqueID, project, region, gkeClusterTerraformModulePath) 80 | if testCase.overrideDefaultSA { 81 | gkeClusterTerratestOptions.Vars["override_default_node_pool_service_account"] = "1" 82 | } 83 | test_structure.SaveString(t, workingDir, "uniqueID", uniqueID) 84 | test_structure.SaveString(t, workingDir, "project", project) 85 | test_structure.SaveString(t, workingDir, "region", region) 86 | test_structure.SaveTerraformOptions(t, workingDir, gkeClusterTerratestOptions) 87 | test_structure.SaveKubectlOptions(t, workingDir, kubectlOptions) 88 | }) 89 | 90 | defer test_structure.RunTestStage(t, "cleanup", func() { 91 | gkeClusterTerratestOptions := test_structure.LoadTerraformOptions(t, workingDir) 92 | terraform.Destroy(t, gkeClusterTerratestOptions) 93 | 94 | kubectlOptions := test_structure.LoadKubectlOptions(t, workingDir) 95 | err := os.Remove(kubectlOptions.ConfigPath) 96 | require.NoError(t, err) 97 | }) 98 | 99 | test_structure.RunTestStage(t, "terraform_apply", func() { 100 | gkeClusterTerratestOptions := test_structure.LoadTerraformOptions(t, workingDir) 101 | terraform.InitAndApply(t, gkeClusterTerratestOptions) 102 | }) 103 | 104 | test_structure.RunTestStage(t, "configure_kubectl", func() { 105 | gkeClusterTerratestOptions := test_structure.LoadTerraformOptions(t, workingDir) 106 | kubectlOptions := test_structure.LoadKubectlOptions(t, workingDir) 107 | project := test_structure.LoadString(t, workingDir, "project") 108 | region := test_structure.LoadString(t, workingDir, "region") 109 | clusterName := gkeClusterTerratestOptions.Vars["cluster_name"].(string) 110 | 111 | // gcloud beta container clusters get-credentials example-cluster --region australia-southeast1 --project dev-sandbox-123456 112 | cmd := shell.Command{ 113 | Command: "gcloud", 114 | Args: []string{"beta", "container", "clusters", "get-credentials", clusterName, "--region", region, "--project", project}, 115 | Env: map[string]string{ 116 | "KUBECONFIG": kubectlOptions.ConfigPath, 117 | }, 118 | } 119 | 120 | shell.RunCommand(t, cmd) 121 | }) 122 | 123 | test_structure.RunTestStage(t, "wait_for_workers", func() { 124 | kubectlOptions := test_structure.LoadKubectlOptions(t, workingDir) 125 | verifyGkeNodesAreReady(t, kubectlOptions) 126 | }) 127 | 128 | test_structure.RunTestStage(t, "terraform_verify_plan_noop", func() { 129 | gkeClusterTerratestOptions := test_structure.LoadTerraformOptions(t, workingDir) 130 | planResult := terraform.InitAndPlan(t, gkeClusterTerratestOptions) 131 | resourceCount := terraform.GetResourceCount(t, planResult) 132 | assert.Equal(t, 0, resourceCount.Change) 133 | assert.Equal(t, 0, resourceCount.Add) 134 | }) 135 | }) 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /examples/gke-public-cluster/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRED PARAMETERS 3 | # These variables are expected to be passed in by the operator. 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "project" { 7 | description = "The project ID where all resources will be launched." 8 | type = string 9 | } 10 | 11 | variable "location" { 12 | description = "The location (region or zone) of the GKE cluster." 13 | type = string 14 | } 15 | 16 | variable "region" { 17 | description = "The region for the network. If the cluster is regional, this must be the same region. Otherwise, it should be the region of the zone." 18 | type = string 19 | } 20 | 21 | # --------------------------------------------------------------------------------------------------------------------- 22 | # OPTIONAL PARAMETERS 23 | # These parameters have reasonable defaults. 24 | # --------------------------------------------------------------------------------------------------------------------- 25 | 26 | variable "cluster_name" { 27 | description = "The name of the Kubernetes cluster." 28 | type = string 29 | default = "example-cluster" 30 | } 31 | 32 | variable "cluster_service_account_name" { 33 | description = "The name of the custom service account used for the GKE cluster. This parameter is limited to a maximum of 28 characters." 34 | type = string 35 | default = "example-cluster-sa" 36 | } 37 | 38 | variable "cluster_service_account_description" { 39 | description = "A description of the custom service account used for the GKE cluster." 40 | type = string 41 | default = "Example GKE Cluster Service Account managed by Terraform" 42 | } 43 | 44 | # For the example, we recommend a /16 network for the VPC. Note that when changing the size of the network, 45 | # you will have to adjust the 'cidr_subnetwork_width_delta' in the 'vpc_network' -module accordingly. 46 | variable "vpc_cidr_block" { 47 | description = "The IP address range of the VPC in CIDR notation. A prefix of /16 is recommended. Do not use a prefix higher than /27." 48 | type = string 49 | default = "10.6.0.0/16" 50 | } 51 | 52 | # For the example, we recommend a /16 network for the secondary range. Note that when changing the size of the network, 53 | # you will have to adjust the 'cidr_subnetwork_width_delta' in the 'vpc_network' -module accordingly. 54 | variable "vpc_secondary_cidr_block" { 55 | description = "The IP address range of the VPC's secondary address range in CIDR notation. A prefix of /16 is recommended. Do not use a prefix higher than /27." 56 | type = string 57 | default = "10.7.0.0/16" 58 | } 59 | 60 | variable "public_subnetwork_secondary_range_name" { 61 | description = "The name associated with the pod subnetwork secondary range, used when adding an alias IP range to a VM instance. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork." 62 | type = string 63 | default = "public-cluster" 64 | } 65 | 66 | variable "public_services_secondary_range_name" { 67 | description = "The name associated with the services subnetwork secondary range, used when adding an alias IP range to a VM instance. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork." 68 | type = string 69 | default = "public-services" 70 | } 71 | 72 | variable "public_services_secondary_cidr_block" { 73 | description = "The IP address range of the VPC's public services secondary address range in CIDR notation. A prefix of /16 is recommended. Do not use a prefix higher than /27. Note: this variable is optional and is used primarily for backwards compatibility, if not specified a range will be calculated using var.secondary_cidr_block, var.secondary_cidr_subnetwork_width_delta and var.secondary_cidr_subnetwork_spacing." 74 | type = string 75 | default = null 76 | } 77 | 78 | variable "private_services_secondary_cidr_block" { 79 | description = "The IP address range of the VPC's private services secondary address range in CIDR notation. A prefix of /16 is recommended. Do not use a prefix higher than /27. Note: this variable is optional and is used primarily for backwards compatibility, if not specified a range will be calculated using var.secondary_cidr_block, var.secondary_cidr_subnetwork_width_delta and var.secondary_cidr_subnetwork_spacing." 80 | type = string 81 | default = null 82 | } 83 | 84 | variable "secondary_cidr_subnetwork_width_delta" { 85 | description = "The difference between your network and subnetwork's secondary range netmask; an /16 network and a /20 subnetwork would be 4." 86 | type = number 87 | default = 4 88 | } 89 | 90 | variable "secondary_cidr_subnetwork_spacing" { 91 | description = "How many subnetwork-mask sized spaces to leave between each subnetwork type's secondary ranges." 92 | type = number 93 | default = 0 94 | } 95 | 96 | variable "enable_vertical_pod_autoscaling" { 97 | description = "Enable vertical pod autoscaling" 98 | type = string 99 | default = true 100 | } 101 | 102 | variable "enable_workload_identity" { 103 | description = "Enable Workload Identity on the cluster" 104 | default = true 105 | type = bool 106 | } 107 | 108 | # --------------------------------------------------------------------------------------------------------------------- 109 | # TEST PARAMETERS 110 | # These parameters are only used during testing and should not be touched. 111 | # --------------------------------------------------------------------------------------------------------------------- 112 | 113 | variable "override_default_node_pool_service_account" { 114 | description = "When true, this will use the service account that is created for use with the default node pool that comes with all GKE clusters" 115 | type = bool 116 | default = false 117 | } 118 | -------------------------------------------------------------------------------- /test/gke_basic_helm_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "strings" 8 | "testing" 9 | "time" 10 | 11 | "github.com/gruntwork-io/terratest/modules/gcp" 12 | "github.com/gruntwork-io/terratest/modules/helm" 13 | http_helper "github.com/gruntwork-io/terratest/modules/http-helper" 14 | "github.com/gruntwork-io/terratest/modules/k8s" 15 | "github.com/gruntwork-io/terratest/modules/logger" 16 | "github.com/gruntwork-io/terratest/modules/random" 17 | "github.com/gruntwork-io/terratest/modules/terraform" 18 | test_structure "github.com/gruntwork-io/terratest/modules/test-structure" 19 | "github.com/stretchr/testify/require" 20 | ) 21 | 22 | func TestGKEBasicHelm(t *testing.T) { 23 | t.Parallel() 24 | 25 | // Uncomment any of the following to skip that section during the test 26 | //os.Setenv("SKIP_create_test_copy_of_examples", "true") 27 | //os.Setenv("SKIP_create_terratest_options", "true") 28 | //os.Setenv("SKIP_terraform_apply", "true") 29 | //os.Setenv("SKIP_wait_for_workers", "true") 30 | //os.Setenv("SKIP_helm_install", "true") 31 | //os.Setenv("SKIP_cleanup", "true") 32 | 33 | // Create a directory path that won't conflict 34 | workingDir := filepath.Join(".", "stages", t.Name()) 35 | 36 | test_structure.RunTestStage(t, "create_test_copy_of_examples", func() { 37 | // The example is the root example 38 | testFolder := test_structure.CopyTerraformFolderToTemp(t, "../", ".") 39 | logger.Logf(t, "path to test folder %s\n", testFolder) 40 | terraformModulePath := filepath.Join(testFolder, ".") 41 | test_structure.SaveString(t, workingDir, "gkeBasicHelmTerraformModulePath", terraformModulePath) 42 | }) 43 | 44 | test_structure.RunTestStage(t, "create_terratest_options", func() { 45 | gkeBasicHelmTerraformModulePath := test_structure.LoadString(t, workingDir, "gkeBasicHelmTerraformModulePath") 46 | tmpKubeConfigPath := k8s.CopyHomeKubeConfigToTemp(t) 47 | kubectlOptions := k8s.NewKubectlOptions("", tmpKubeConfigPath, "kube-system") 48 | uniqueID := random.UniqueId() 49 | project := gcp.GetGoogleProjectIDFromEnvVar(t) 50 | region := gcp.GetRandomRegion(t, project, nil, nil) 51 | gkeClusterTerratestOptions := createTestGKEBasicHelmTerraformOptions(uniqueID, project, region, 52 | gkeBasicHelmTerraformModulePath, tmpKubeConfigPath) 53 | test_structure.SaveString(t, workingDir, "uniqueID", uniqueID) 54 | test_structure.SaveString(t, workingDir, "project", project) 55 | test_structure.SaveString(t, workingDir, "region", region) 56 | test_structure.SaveTerraformOptions(t, workingDir, gkeClusterTerratestOptions) 57 | test_structure.SaveKubectlOptions(t, workingDir, kubectlOptions) 58 | }) 59 | 60 | defer test_structure.RunTestStage(t, "cleanup", func() { 61 | gkeClusterTerratestOptions := test_structure.LoadTerraformOptions(t, workingDir) 62 | terraform.Destroy(t, gkeClusterTerratestOptions) 63 | 64 | // Delete the kubectl entry we created 65 | kubectlOptions := test_structure.LoadKubectlOptions(t, workingDir) 66 | err := os.Remove(kubectlOptions.ConfigPath) 67 | require.NoError(t, err) 68 | }) 69 | 70 | test_structure.RunTestStage(t, "terraform_apply", func() { 71 | gkeClusterTerratestOptions := test_structure.LoadTerraformOptions(t, workingDir) 72 | terraform.InitAndApply(t, gkeClusterTerratestOptions) 73 | }) 74 | 75 | test_structure.RunTestStage(t, "wait_for_workers", func() { 76 | kubectlOptions := test_structure.LoadKubectlOptions(t, workingDir) 77 | verifyGkeNodesAreReady(t, kubectlOptions) 78 | }) 79 | 80 | // Do an additional helm install 81 | test_structure.RunTestStage(t, "helm_install", func() { 82 | // Path to the helm chart we will test 83 | helmChartPath := "charts/minimal-pod" 84 | 85 | // Load the temporary kubectl config file and use its current context 86 | // We also specify that we are working in the default namespace (required to get the Pod) 87 | kubectlOptions := test_structure.LoadKubectlOptions(t, workingDir) 88 | kubectlOptions.Namespace = "default" 89 | 90 | // We generate a unique release name so that we can refer to after deployment. 91 | // By doing so, we can schedule the delete call here so that at the end of the test, we run 92 | // `helm delete RELEASE_NAME` to clean up any resources that were created. 93 | releaseName := fmt.Sprintf("nginx-%s", strings.ToLower(random.UniqueId())) 94 | 95 | // Setup the args. For this test, we will set the following input values: 96 | // - image=nginx:1.15.8 97 | // - fullnameOverride=minimal-pod-RANDOM_STRING 98 | // We use a fullnameOverride so we can find the Pod later during verification 99 | podName := fmt.Sprintf("%s-minimal-pod", releaseName) 100 | options := &helm.Options{ 101 | SetValues: map[string]string{ 102 | "image": "nginx:1.15.8", 103 | "fullnameOverride": podName, 104 | }, 105 | KubectlOptions: kubectlOptions, 106 | } 107 | 108 | // Deploy the chart using `helm install`. Note that we use the version without `E`, since we want to assert the 109 | // install succeeds without any errors. 110 | helm.Install(t, options, helmChartPath, releaseName) 111 | 112 | // Now that the chart is deployed, verify the deployment. This function will open a tunnel to the Pod and hit the 113 | // nginx container endpoint. 114 | verifyNginxPod(t, kubectlOptions, podName) 115 | }) 116 | } 117 | 118 | // verifyNginxPod will open a tunnel to the Pod and hit the endpoint to verify the nginx welcome page is shown. 119 | func verifyNginxPod(t *testing.T, kubectlOptions *k8s.KubectlOptions, podName string) { 120 | // Wait for the pod to come up. It takes some time for the Pod to start, so retry a few times. 121 | retries := 15 122 | sleep := 5 * time.Second 123 | k8s.WaitUntilPodAvailable(t, kubectlOptions, podName, retries, sleep) 124 | 125 | // We will first open a tunnel to the pod, making sure to close it at the end of the test. 126 | tunnel := k8s.NewTunnel(kubectlOptions, k8s.ResourceTypePod, podName, 0, 80) 127 | defer tunnel.Close() 128 | tunnel.ForwardPort(t) 129 | 130 | // ... and now that we have the tunnel, we will verify that we get back a 200 OK with the nginx welcome page. 131 | // It takes some time for the Pod to start, so retry a few times. 132 | endpoint := fmt.Sprintf("http://%s", tunnel.Endpoint()) 133 | http_helper.HttpGetWithRetryWithCustomValidation( 134 | t, 135 | endpoint, 136 | nil, 137 | retries, 138 | sleep, 139 | func(statusCode int, body string) bool { 140 | return statusCode == 200 && strings.Contains(body, "Welcome to nginx") 141 | }, 142 | ) 143 | } 144 | -------------------------------------------------------------------------------- /GRUNTWORK_PHILOSOPHY.md: -------------------------------------------------------------------------------- 1 | # Gruntwork Philosophy 2 | 3 | At Gruntwork, we strive to accelerate the deployment of production grade infrastructure by prodiving a library of 4 | stable, reusable, and battle tested infrastructure as code organized into a series of [modules](#what-is-a-module) with 5 | [submodules](#what-is-a-submodule). Each module represents a particular set of infrastructure that is componentized into 6 | smaller pieces represented by the submodules within the module. By doing so, we have built a composable library that can 7 | be combined into building out everything from simple single service deployments to complicated microservice setups so 8 | that your infrastructure can grow with your business needs. Every module we provide is built with the [production grade 9 | infrastruture checklist](#production-grade-infrastructure-checklist) in mind, ensuring that the services you deploy are 10 | resilient, fault tolerant, and scalable. 11 | 12 | 13 | ## What is a Module? 14 | 15 | A Module is a reusable, tested, documented, configurable, best-practices definition of a single piece of Infrastructure 16 | (e.g., Docker cluster, VPC, Jenkins, Consul), written using a combination of [Terraform](https://www.terraform.io/), Go, 17 | and Bash. A module contains a set of automated tests, documentation, and examples that have been proven in production, 18 | providing the underlying infrastructure for [Gruntwork's customers](https://www.gruntwork.io/customers). 19 | 20 | Instead of figuring out the details of how to run a piece of infrastructure from scratch, you can reuse existing code 21 | that has been proven in production. And instead of maintaining all that infrastructure code yourself, you can leverage 22 | the work of the community to pick up infrastructure improvements through a version number bump. 23 | 24 | 25 | ## What is a Submodule? 26 | 27 | Each Infrastructure Module consists of one or more orthogonal Submodules that handle some specific aspect of that 28 | Infrastructure Module's functionality. Breaking the code up into multiple submodules makes it easier to reuse and 29 | compose to handle many different use cases. Although Modules are designed to provide an end to end solution to manage 30 | the relevant infrastructure by combining the Submodules defined in the Module, Submodules can be used independently for 31 | specific functionality that you need in your infrastructure code. 32 | 33 | 34 | ## Production Grade Infrastructure Checklist 35 | 36 | At Gruntwork, we have learned over the years that it is not enough to just get the services up and running in a publicly 37 | accessible space to call your application "production-ready." There are many more things to consider, and oftentimes 38 | many of these considerations are missing in the deployment plan of applications. These topics come up as afterthoughts, 39 | and are learned the hard way after the fact. That is why we codified all of them into a checklist that can be used as a 40 | reference to help ensure that they are considered before your application goes to production, and conscious decisions 41 | are made to neglect particular components if needed, as opposed to accidentally omitting them from consideration. 42 | 43 | 47 | 48 | | Task | Description | Example tools | 49 | |--------------------|-------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------| 50 | | Install | Install the software binaries and all dependencies. | Bash, Chef, Ansible, Puppet | 51 | | Configure | Configure the software at runtime. Includes port settings, TLS certs, service discovery, leaders, followers, replication, etc. | Bash, Chef, Ansible, Puppet | 52 | | Provision | Provision the infrastructure. Includes EC2 instances, load balancers, network topology, security gr oups, IAM permissions, etc. | Terraform, CloudFormation | 53 | | Deploy | Deploy the service on top of the infrastructure. Roll out updates with no downtime. Includes blue-green, rolling, and canary deployments. | Scripts, Orchestration tools (ECS, k8s, Nomad) | 54 | | High availability | Withstand outages of individual processes, EC2 instances, services, Availability Zones, and regions. | Multi AZ, multi-region, replication, ASGs, ELBs | 55 | | Scalability | Scale up and down in response to load. Scale horizontally (more servers) and/or vertically (bigger servers). | ASGs, replication, sharding, caching, divide and conquer | 56 | | Performance | Optimize CPU, memory, disk, network, GPU, and usage. Includes query tuning, benchmarking, load testing, and profiling. | Dynatrace, valgrind, VisualVM, ab, Jmeter | 57 | | Networking | Configure static and dynamic IPs, ports, service discovery, firewalls, DNS, SSH access, and VPN access. | EIPs, ENIs, VPCs, NACLs, SGs, Route 53, OpenVPN | 58 | | Security | Encryption in transit (TLS) and on disk, authentication, authorization, secrets management, server hardening. | ACM, EBS Volumes, Cognito, Vault, CIS | 59 | | Metrics | Availability metrics, business metrics, app metrics, server metrics, events, observability, tracing, and alerting. | CloudWatch, DataDog, New Relic, Honeycomb | 60 | | Logs | Rotate logs on disk. Aggregate log data to a central location. | CloudWatch logs, ELK, Sumo Logic, Papertrail | 61 | | Backup and Restore | Make backups of DBs, caches, and other data on a scheduled basis. Replicate to separate region/account. | RDS, ElastiCache, ec2-snapper, Lambda | 62 | | Cost optimization | Pick proper instance types, use spot and reserved instances, use auto scaling, and nuke unused resources. | ASGs, spot instances, reserved instances | 63 | | Documentation | Document your code, architecture, and practices. Create playbooks to respond to incidents. | READMEs, wikis, Slack | 64 | | Tests | Write automated tests for your infrastructure code. Run tests after every commit and nightly. | Terratest | 65 | -------------------------------------------------------------------------------- /examples/gke-public-cluster/main.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # DEPLOY A GKE PUBLIC CLUSTER IN GOOGLE CLOUD PLATFORM 3 | # This is an example of how to use the gke-cluster module to deploy a public Kubernetes cluster in GCP with a 4 | # Load Balancer in front of it. 5 | # --------------------------------------------------------------------------------------------------------------------- 6 | 7 | terraform { 8 | # This module is now only being tested with Terraform 1.0.x. However, to make upgrading easier, we are setting 9 | # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it 10 | # forwards compatible with 1.0.x code. 11 | required_version = ">= 0.12.26" 12 | 13 | required_providers { 14 | google = { 15 | source = "hashicorp/google" 16 | version = "~> 3.43.0" 17 | } 18 | google-beta = { 19 | source = "hashicorp/google-beta" 20 | version = "~> 3.43.0" 21 | } 22 | } 23 | } 24 | 25 | # --------------------------------------------------------------------------------------------------------------------- 26 | # PREPARE PROVIDERS 27 | # --------------------------------------------------------------------------------------------------------------------- 28 | 29 | provider "google" { 30 | project = var.project 31 | region = var.region 32 | } 33 | 34 | provider "google-beta" { 35 | project = var.project 36 | region = var.region 37 | } 38 | 39 | # --------------------------------------------------------------------------------------------------------------------- 40 | # DEPLOY A PUBLIC CLUSTER IN GOOGLE CLOUD PLATFORM 41 | # --------------------------------------------------------------------------------------------------------------------- 42 | 43 | module "gke_cluster" { 44 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 45 | # to a specific version of the modules, such as the following example: 46 | # source = "github.com/gruntwork-io/terraform-google-gke.git//modules/gke-cluster?ref=v0.2.0" 47 | source = "../../modules/gke-cluster" 48 | 49 | name = var.cluster_name 50 | 51 | project = var.project 52 | location = var.location 53 | network = module.vpc_network.network 54 | 55 | # We're deploying the cluster in the 'public' subnetwork to allow outbound internet access 56 | # See the network access tier table for full details: 57 | # https://github.com/gruntwork-io/terraform-google-network/tree/master/modules/vpc-network#access-tier 58 | subnetwork = module.vpc_network.public_subnetwork 59 | cluster_secondary_range_name = module.vpc_network.public_subnetwork_secondary_range_name 60 | services_secondary_range_name = module.vpc_network.public_services_secondary_range_name 61 | 62 | alternative_default_service_account = var.override_default_node_pool_service_account ? module.gke_service_account.email : null 63 | 64 | enable_vertical_pod_autoscaling = var.enable_vertical_pod_autoscaling 65 | enable_workload_identity = var.enable_workload_identity 66 | 67 | resource_labels = { 68 | environment = "testing" 69 | } 70 | } 71 | 72 | # --------------------------------------------------------------------------------------------------------------------- 73 | # CREATE A NODE POOL 74 | # --------------------------------------------------------------------------------------------------------------------- 75 | 76 | resource "google_container_node_pool" "node_pool" { 77 | provider = google-beta 78 | 79 | name = "main-pool" 80 | project = var.project 81 | location = var.location 82 | cluster = module.gke_cluster.name 83 | 84 | initial_node_count = "1" 85 | 86 | autoscaling { 87 | min_node_count = "1" 88 | max_node_count = "5" 89 | } 90 | 91 | management { 92 | auto_repair = "true" 93 | auto_upgrade = "true" 94 | } 95 | 96 | node_config { 97 | image_type = "COS" 98 | machine_type = "n1-standard-1" 99 | 100 | labels = { 101 | all-pools-example = "true" 102 | } 103 | 104 | # Add a public tag to the instances. See the network access tier table for full details: 105 | # https://github.com/gruntwork-io/terraform-google-network/tree/master/modules/vpc-network#access-tier 106 | tags = [ 107 | module.vpc_network.public, 108 | "public-pool-example", 109 | ] 110 | 111 | disk_size_gb = "30" 112 | disk_type = "pd-standard" 113 | preemptible = false 114 | 115 | service_account = module.gke_service_account.email 116 | 117 | workload_metadata_config { 118 | node_metadata = "GKE_METADATA_SERVER" 119 | } 120 | 121 | oauth_scopes = [ 122 | "https://www.googleapis.com/auth/cloud-platform", 123 | ] 124 | } 125 | 126 | lifecycle { 127 | ignore_changes = [initial_node_count] 128 | } 129 | 130 | timeouts { 131 | create = "30m" 132 | update = "30m" 133 | delete = "30m" 134 | } 135 | } 136 | 137 | # --------------------------------------------------------------------------------------------------------------------- 138 | # CREATE A CUSTOM SERVICE ACCOUNT TO USE WITH THE GKE CLUSTER 139 | # --------------------------------------------------------------------------------------------------------------------- 140 | 141 | module "gke_service_account" { 142 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 143 | # to a specific version of the modules, such as the following example: 144 | # source = "github.com/gruntwork-io/terraform-google-gke.git//modules/gke-service-account?ref=v0.2.0" 145 | source = "../../modules/gke-service-account" 146 | 147 | name = var.cluster_service_account_name 148 | project = var.project 149 | description = var.cluster_service_account_description 150 | } 151 | 152 | # --------------------------------------------------------------------------------------------------------------------- 153 | # CREATE A NETWORK TO DEPLOY THE CLUSTER TO 154 | # --------------------------------------------------------------------------------------------------------------------- 155 | 156 | resource "random_string" "suffix" { 157 | length = 4 158 | special = false 159 | upper = false 160 | } 161 | 162 | module "vpc_network" { 163 | source = "github.com/gruntwork-io/terraform-google-network.git//modules/vpc-network?ref=v0.8.2" 164 | 165 | name_prefix = "${var.cluster_name}-network-${random_string.suffix.result}" 166 | project = var.project 167 | region = var.region 168 | 169 | cidr_block = var.vpc_cidr_block 170 | secondary_cidr_block = var.vpc_secondary_cidr_block 171 | 172 | public_subnetwork_secondary_range_name = var.public_subnetwork_secondary_range_name 173 | public_services_secondary_range_name = var.public_services_secondary_range_name 174 | public_services_secondary_cidr_block = var.public_services_secondary_cidr_block 175 | private_services_secondary_cidr_block = var.private_services_secondary_cidr_block 176 | secondary_cidr_subnetwork_width_delta = var.secondary_cidr_subnetwork_width_delta 177 | secondary_cidr_subnetwork_spacing = var.secondary_cidr_subnetwork_spacing 178 | } 179 | -------------------------------------------------------------------------------- /modules/gke-cluster/main.tf: -------------------------------------------------------------------------------- 1 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 2 | # DEPLOY A GKE CLUSTER 3 | # This module deploys a GKE cluster, a managed, production-ready environment for deploying containerized applications. 4 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 5 | 6 | terraform { 7 | # This module is now only being tested with Terraform 1.0.x. However, to make upgrading easier, we are setting 8 | # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it 9 | # forwards compatible with 1.0.x code. 10 | required_version = ">= 0.12.26" 11 | } 12 | 13 | locals { 14 | workload_identity_config = !var.enable_workload_identity ? [] : var.identity_namespace == null ? [{ 15 | identity_namespace = "${var.project}.svc.id.goog" }] : [{ identity_namespace = var.identity_namespace 16 | }] 17 | } 18 | 19 | # --------------------------------------------------------------------------------------------------------------------- 20 | # Create the GKE Cluster 21 | # We want to make a cluster with no node pools, and manage them all with the fine-grained google_container_node_pool resource 22 | # --------------------------------------------------------------------------------------------------------------------- 23 | 24 | resource "google_container_cluster" "cluster" { 25 | provider = google-beta 26 | 27 | name = var.name 28 | description = var.description 29 | 30 | project = var.project 31 | location = var.location 32 | network = var.network 33 | subnetwork = var.subnetwork 34 | 35 | logging_service = var.logging_service 36 | monitoring_service = var.monitoring_service 37 | min_master_version = local.kubernetes_version 38 | 39 | # Whether to enable legacy Attribute-Based Access Control (ABAC). RBAC has significant security advantages over ABAC. 40 | enable_legacy_abac = var.enable_legacy_abac 41 | 42 | # The API requires a node pool or an initial count to be defined; that initial count creates the 43 | # "default node pool" with that # of nodes. 44 | # So, we need to set an initial_node_count of 1. This will make a default node 45 | # pool with server-defined defaults that Terraform will immediately delete as 46 | # part of Create. This leaves us in our desired state- with a cluster master 47 | # with no node pools. 48 | remove_default_node_pool = true 49 | 50 | initial_node_count = 1 51 | 52 | # If we have an alternative default service account to use, set on the node_config so that the default node pool can 53 | # be created successfully. 54 | dynamic "node_config" { 55 | # Ideally we can do `for_each = var.alternative_default_service_account != null ? [object] : []`, but due to a 56 | # terraform bug, this doesn't work. See https://github.com/hashicorp/terraform/issues/21465. So we simulate it using 57 | # a for expression. 58 | for_each = [ 59 | for x in [var.alternative_default_service_account] : x if var.alternative_default_service_account != null 60 | ] 61 | 62 | content { 63 | service_account = node_config.value 64 | } 65 | } 66 | 67 | # ip_allocation_policy.use_ip_aliases defaults to true, since we define the block `ip_allocation_policy` 68 | ip_allocation_policy { 69 | // Choose the range, but let GCP pick the IPs within the range 70 | cluster_secondary_range_name = var.cluster_secondary_range_name 71 | services_secondary_range_name = var.services_secondary_range_name 72 | } 73 | 74 | # We can optionally control access to the cluster 75 | # See https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters 76 | private_cluster_config { 77 | enable_private_endpoint = var.disable_public_endpoint 78 | enable_private_nodes = var.enable_private_nodes 79 | master_ipv4_cidr_block = var.master_ipv4_cidr_block 80 | } 81 | 82 | addons_config { 83 | http_load_balancing { 84 | disabled = !var.http_load_balancing 85 | } 86 | 87 | horizontal_pod_autoscaling { 88 | disabled = !var.horizontal_pod_autoscaling 89 | } 90 | 91 | network_policy_config { 92 | disabled = !var.enable_network_policy 93 | } 94 | } 95 | 96 | network_policy { 97 | enabled = var.enable_network_policy 98 | 99 | # Tigera (Calico Felix) is the only provider 100 | provider = var.enable_network_policy ? "CALICO" : "PROVIDER_UNSPECIFIED" 101 | } 102 | 103 | vertical_pod_autoscaling { 104 | enabled = var.enable_vertical_pod_autoscaling 105 | } 106 | 107 | master_auth { 108 | username = var.basic_auth_username 109 | password = var.basic_auth_password 110 | } 111 | 112 | dynamic "master_authorized_networks_config" { 113 | for_each = var.master_authorized_networks_config 114 | content { 115 | dynamic "cidr_blocks" { 116 | for_each = lookup(master_authorized_networks_config.value, "cidr_blocks", []) 117 | content { 118 | cidr_block = cidr_blocks.value.cidr_block 119 | display_name = lookup(cidr_blocks.value, "display_name", null) 120 | } 121 | } 122 | } 123 | } 124 | 125 | maintenance_policy { 126 | daily_maintenance_window { 127 | start_time = var.maintenance_start_time 128 | } 129 | } 130 | 131 | lifecycle { 132 | ignore_changes = [ 133 | # Since we provide `remove_default_node_pool = true`, the `node_config` is only relevant for a valid construction of 134 | # the GKE cluster in the initial creation. As such, any changes to the `node_config` should be ignored. 135 | node_config, 136 | ] 137 | } 138 | 139 | # If var.gsuite_domain_name is non-empty, initialize the cluster with a G Suite security group 140 | dynamic "authenticator_groups_config" { 141 | for_each = [ 142 | for x in [var.gsuite_domain_name] : x if var.gsuite_domain_name != null 143 | ] 144 | 145 | content { 146 | security_group = "gke-security-groups@${authenticator_groups_config.value}" 147 | } 148 | } 149 | 150 | # If var.secrets_encryption_kms_key is non-empty, create ´database_encryption´ -block to encrypt secrets at rest in etcd 151 | dynamic "database_encryption" { 152 | for_each = [ 153 | for x in [var.secrets_encryption_kms_key] : x if var.secrets_encryption_kms_key != null 154 | ] 155 | 156 | content { 157 | state = "ENCRYPTED" 158 | key_name = database_encryption.value 159 | } 160 | } 161 | 162 | dynamic "workload_identity_config" { 163 | for_each = local.workload_identity_config 164 | 165 | content { 166 | identity_namespace = workload_identity_config.value.identity_namespace 167 | } 168 | } 169 | 170 | resource_labels = var.resource_labels 171 | } 172 | 173 | # --------------------------------------------------------------------------------------------------------------------- 174 | # Prepare locals to keep the code cleaner 175 | # --------------------------------------------------------------------------------------------------------------------- 176 | 177 | locals { 178 | latest_version = data.google_container_engine_versions.location.latest_master_version 179 | kubernetes_version = var.kubernetes_version != "latest" ? var.kubernetes_version : local.latest_version 180 | network_project = var.network_project != "" ? var.network_project : var.project 181 | } 182 | 183 | # --------------------------------------------------------------------------------------------------------------------- 184 | # Pull in data 185 | # --------------------------------------------------------------------------------------------------------------------- 186 | 187 | // Get available master versions in our location to determine the latest version 188 | data "google_container_engine_versions" "location" { 189 | location = var.location 190 | project = var.project 191 | } 192 | -------------------------------------------------------------------------------- /examples/gke-private-cluster/main.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # DEPLOY A GKE PRIVATE CLUSTER IN GOOGLE CLOUD PLATFORM 3 | # This is an example of how to use the gke-cluster module to deploy a private Kubernetes cluster in GCP 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | terraform { 7 | # This module is now only being tested with Terraform 1.0.x. However, to make upgrading easier, we are setting 8 | # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it 9 | # forwards compatible with 1.0.x code. 10 | required_version = ">= 0.12.26" 11 | 12 | required_providers { 13 | google = { 14 | source = "hashicorp/google" 15 | version = "~> 3.43.0" 16 | } 17 | google-beta = { 18 | source = "hashicorp/google-beta" 19 | version = "~> 3.43.0" 20 | } 21 | } 22 | } 23 | 24 | # --------------------------------------------------------------------------------------------------------------------- 25 | # PREPARE PROVIDERS 26 | # --------------------------------------------------------------------------------------------------------------------- 27 | 28 | provider "google" { 29 | project = var.project 30 | region = var.region 31 | } 32 | 33 | provider "google-beta" { 34 | project = var.project 35 | region = var.region 36 | } 37 | 38 | # --------------------------------------------------------------------------------------------------------------------- 39 | # DEPLOY A PRIVATE CLUSTER IN GOOGLE CLOUD PLATFORM 40 | # --------------------------------------------------------------------------------------------------------------------- 41 | 42 | module "gke_cluster" { 43 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 44 | # to a specific version of the modules, such as the following example: 45 | # source = "github.com/gruntwork-io/terraform-google-gke.git//modules/gke-cluster?ref=v0.2.0" 46 | source = "../../modules/gke-cluster" 47 | 48 | name = var.cluster_name 49 | 50 | project = var.project 51 | location = var.location 52 | network = module.vpc_network.network 53 | 54 | # We're deploying the cluster in the 'public' subnetwork to allow outbound internet access 55 | # See the network access tier table for full details: 56 | # https://github.com/gruntwork-io/terraform-google-network/tree/master/modules/vpc-network#access-tier 57 | subnetwork = module.vpc_network.public_subnetwork 58 | cluster_secondary_range_name = module.vpc_network.public_subnetwork_secondary_range_name 59 | services_secondary_range_name = module.vpc_network.public_services_secondary_range_name 60 | 61 | # When creating a private cluster, the 'master_ipv4_cidr_block' has to be defined and the size must be /28 62 | master_ipv4_cidr_block = var.master_ipv4_cidr_block 63 | 64 | # This setting will make the cluster private 65 | enable_private_nodes = "true" 66 | 67 | # To make testing easier, we keep the public endpoint available. In production, we highly recommend restricting access to only within the network boundary, requiring your users to use a bastion host or VPN. 68 | disable_public_endpoint = "false" 69 | 70 | # With a private cluster, it is highly recommended to restrict access to the cluster master 71 | # However, for testing purposes we will allow all inbound traffic. 72 | master_authorized_networks_config = [ 73 | { 74 | cidr_blocks = [ 75 | { 76 | cidr_block = "0.0.0.0/0" 77 | display_name = "all-for-testing" 78 | }, 79 | ] 80 | }, 81 | ] 82 | 83 | enable_vertical_pod_autoscaling = var.enable_vertical_pod_autoscaling 84 | 85 | resource_labels = { 86 | environment = "testing" 87 | } 88 | } 89 | 90 | # --------------------------------------------------------------------------------------------------------------------- 91 | # CREATE A NODE POOL 92 | # --------------------------------------------------------------------------------------------------------------------- 93 | 94 | resource "google_container_node_pool" "node_pool" { 95 | provider = google-beta 96 | 97 | name = "private-pool" 98 | project = var.project 99 | location = var.location 100 | cluster = module.gke_cluster.name 101 | 102 | initial_node_count = "1" 103 | 104 | autoscaling { 105 | min_node_count = "1" 106 | max_node_count = "5" 107 | } 108 | 109 | management { 110 | auto_repair = "true" 111 | auto_upgrade = "true" 112 | } 113 | 114 | node_config { 115 | image_type = "COS" 116 | machine_type = "n1-standard-1" 117 | 118 | labels = { 119 | private-pools-example = "true" 120 | } 121 | 122 | # Add a private tag to the instances. See the network access tier table for full details: 123 | # https://github.com/gruntwork-io/terraform-google-network/tree/master/modules/vpc-network#access-tier 124 | tags = [ 125 | module.vpc_network.private, 126 | "private-pool-example", 127 | ] 128 | 129 | disk_size_gb = "30" 130 | disk_type = "pd-standard" 131 | preemptible = false 132 | 133 | service_account = module.gke_service_account.email 134 | 135 | oauth_scopes = [ 136 | "https://www.googleapis.com/auth/cloud-platform", 137 | ] 138 | } 139 | 140 | lifecycle { 141 | ignore_changes = [initial_node_count] 142 | } 143 | 144 | timeouts { 145 | create = "30m" 146 | update = "30m" 147 | delete = "30m" 148 | } 149 | } 150 | 151 | # --------------------------------------------------------------------------------------------------------------------- 152 | # CREATE A CUSTOM SERVICE ACCOUNT TO USE WITH THE GKE CLUSTER 153 | # --------------------------------------------------------------------------------------------------------------------- 154 | 155 | module "gke_service_account" { 156 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 157 | # to a specific version of the modules, such as the following example: 158 | # source = "github.com/gruntwork-io/terraform-google-gke.git//modules/gke-service-account?ref=v0.2.0" 159 | source = "../../modules/gke-service-account" 160 | 161 | name = var.cluster_service_account_name 162 | project = var.project 163 | description = var.cluster_service_account_description 164 | } 165 | 166 | # --------------------------------------------------------------------------------------------------------------------- 167 | # CREATE A NETWORK TO DEPLOY THE CLUSTER TO 168 | # --------------------------------------------------------------------------------------------------------------------- 169 | 170 | module "vpc_network" { 171 | source = "github.com/gruntwork-io/terraform-google-network.git//modules/vpc-network?ref=v0.8.2" 172 | 173 | name_prefix = "${var.cluster_name}-network-${random_string.suffix.result}" 174 | project = var.project 175 | region = var.region 176 | 177 | cidr_block = var.vpc_cidr_block 178 | secondary_cidr_block = var.vpc_secondary_cidr_block 179 | 180 | public_subnetwork_secondary_range_name = var.public_subnetwork_secondary_range_name 181 | public_services_secondary_range_name = var.public_services_secondary_range_name 182 | public_services_secondary_cidr_block = var.public_services_secondary_cidr_block 183 | private_services_secondary_cidr_block = var.private_services_secondary_cidr_block 184 | secondary_cidr_subnetwork_width_delta = var.secondary_cidr_subnetwork_width_delta 185 | secondary_cidr_subnetwork_spacing = var.secondary_cidr_subnetwork_spacing 186 | } 187 | 188 | # Use a random suffix to prevent overlap in network names 189 | resource "random_string" "suffix" { 190 | length = 4 191 | special = false 192 | upper = false 193 | } 194 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ### Sunset notice 2 | 3 | We believe there is an opportunity to create a truly outstanding developer experience for deploying to the cloud, however developing this vision requires that we temporarily limit our focus to just one cloud. Gruntwork has hundreds of customers currently using AWS, so we have temporarily suspended our maintenance efforts on this repo. Once we have implemented and validated our vision for the developer experience on the cloud, we look forward to picking this up. In the meantime, you are welcome to use this code in accordance with the open source license, however we will not be responding to GitHub Issues or Pull Requests. 4 | 5 | If you wish to be the maintainer for this project, we are open to considering that. Please contact us at support@gruntwork.io. 6 | 7 | --- 8 | 9 | [![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/gruntwork-io/terraform-google-gke.svg?label=latest)](https://github.com/gruntwork-io/terraform-google-gke/releases/latest) 10 | ![Terraform Version](https://img.shields.io/badge/tf-%3E%3D1.0.x-blue.svg) 11 | 12 | # Google Kubernetes Engine (GKE) Module 13 | 14 | This repo contains a [Terraform](https://www.terraform.io) module for running a Kubernetes cluster on [Google Cloud Platform (GCP)](https://cloud.google.com/) 15 | using [Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine/). 16 | 17 | ## Quickstart 18 | 19 | If you want to quickly spin up a GKE Public Cluster, you can run the example that is in the root of this 20 | repo. Check out the [gke-basic-helm example documentation](https://github.com/gruntwork-io/terraform-google-gke/blob/master/examples/gke-basic-helm) 21 | for instructions. 22 | 23 | ## What's in this repo 24 | 25 | This repo has the following folder structure: 26 | 27 | - [root](https://github.com/gruntwork-io/terraform-google-gke/tree/master): The root folder contains an example of how 28 | to deploy a GKE Public Cluster with an example chart with [Helm](https://helm.sh/). See [gke-basic-helm](https://github.com/gruntwork-io/terraform-google-gke/blob/master/examples/gke-basic-helm) 29 | for the documentation. 30 | 31 | - [modules](https://github.com/gruntwork-io/terraform-google-gke/tree/master/modules): This folder contains the 32 | main implementation code for this Module, broken down into multiple standalone submodules. 33 | 34 | The primary module is: 35 | 36 | - [gke-cluster](https://github.com/gruntwork-io/terraform-google-gke/tree/master/modules/gke-cluster): The GKE Cluster module is used to 37 | administer the [cluster master](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-architecture) 38 | for a [GKE Cluster](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-admin-overview). 39 | 40 | There are also several supporting modules that add extra functionality on top of `gke-cluster`: 41 | 42 | - [gke-service-account](https://github.com/gruntwork-io/terraform-google-gke/tree/master/modules/gke-service-account): 43 | Used to configure a GCP service account for use with a GKE cluster. 44 | 45 | - [examples](https://github.com/gruntwork-io/terraform-google-gke/tree/master/examples): This folder contains 46 | examples of how to use the submodules. 47 | 48 | - [test](https://github.com/gruntwork-io/terraform-google-gke/tree/master/test): Automated tests for the submodules 49 | and examples. 50 | 51 | ## What is Kubernetes? 52 | 53 | [Kubernetes](https://kubernetes.io) is an open source container management system for deploying, scaling, and managing 54 | containerized applications. Kubernetes is built by Google based on their internal proprietary container management 55 | systems (Borg and Omega). Kubernetes provides a cloud agnostic platform to deploy your containerized applications with 56 | built in support for common operational tasks such as replication, autoscaling, self-healing, and rolling deployments. 57 | 58 | You can learn more about Kubernetes from [the official documentation](https://kubernetes.io/docs/tutorials/kubernetes-basics/). 59 | 60 | ## What is GKE? 61 | 62 | Google Kubernetes Engine or "GKE" is a Google-managed Kubernetes environment. GKE is a fully managed experience; it 63 | handles the management/upgrading of the Kubernetes cluster master as well as autoscaling of "nodes" through "node pool" 64 | templates. 65 | 66 | Through GKE, your Kubernetes deployments will have first-class support for GCP IAM identities, built-in configuration of 67 | high-availability and secured clusters, as well as native access to GCP's networking features such as load balancers. 68 | 69 | ## How do you run applications on Kubernetes? 70 | 71 | There are three different ways you can schedule your application on a Kubernetes cluster. In all three, your application 72 | Docker containers are packaged as a [Pod](https://kubernetes.io/docs/concepts/workloads/pods/pod/), which are the 73 | smallest deployable unit in Kubernetes, and represent one or more Docker containers that are tightly coupled. Containers 74 | in a Pod share certain elements of the kernel space that are traditionally isolated between containers, such as the 75 | network space (the containers both share an IP and thus the available ports are shared), IPC namespace, and PIDs in some 76 | cases. 77 | 78 | Pods are considered to be relatively ephemeral disposable entities in the Kubernetes ecosystem. This is because Pods are 79 | designed to be mobile across the cluster so that you can design a scalable fault tolerant system. As such, Pods are 80 | generally scheduled with 81 | [Controllers](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/#pods-and-controllers) that manage the 82 | lifecycle of a Pod. Using Controllers, you can schedule your Pods as: 83 | 84 | - Jobs, which are Pods with a controller that will guarantee the Pods run to completion. 85 | - Deployments behind a Service, which are Pods with a controller that implement lifecycle rules to provide replication 86 | and self-healing capabilities. Deployments will automatically reprovision failed Pods, or migrate Pods to healthy 87 | nodes off of failed nodes. A Service constructs a consistent endpoint that can be used to access the Deployment. 88 | - Daemon Sets, which are Pods that are scheduled on all worker nodes. Daemon Sets schedule exactly one instance of a Pod 89 | on each node. Like Deployments, Daemon Sets will reprovision failed Pods and schedule new ones automatically on 90 | new nodes that join the cluster. 91 | 92 | 93 | 94 | ## What's a Module? 95 | 96 | A Module is a canonical, reusable, best-practices definition for how to run a single piece of infrastructure, such 97 | as a database or server cluster. Each Module is written using a combination of [Terraform](https://www.terraform.io/) 98 | and scripts (mostly bash) and include automated tests, documentation, and examples. It is maintained both by the open 99 | source community and companies that provide commercial support. 100 | 101 | Instead of figuring out the details of how to run a piece of infrastructure from scratch, you can reuse 102 | existing code that has been proven in production. And instead of maintaining all that infrastructure code yourself, 103 | you can leverage the work of the Module community to pick up infrastructure improvements through 104 | a version number bump. 105 | 106 | ## Who maintains this Module? 107 | 108 | This Module and its Submodules are maintained by [Gruntwork](http://www.gruntwork.io/). If you are looking for help or 109 | commercial support, send an email to 110 | [support@gruntwork.io](mailto:support@gruntwork.io?Subject=GKE%20Module). 111 | 112 | Gruntwork can help with: 113 | 114 | - Setup, customization, and support for this Module. 115 | - Modules and submodules for other types of infrastructure, such as VPCs, Docker clusters, databases, and continuous 116 | integration. 117 | - Modules and Submodules that meet compliance requirements, such as HIPAA. 118 | - Consulting & Training on AWS, Terraform, and DevOps. 119 | 120 | ## How do I contribute to this Module? 121 | 122 | Contributions are very welcome! Check out the [Contribution Guidelines](https://github.com/gruntwork-io/terraform-google-gke/blob/master/CONTRIBUTING.md) 123 | for instructions. 124 | 125 | ## How is this Module versioned? 126 | 127 | This Module follows the principles of [Semantic Versioning](http://semver.org/). You can find each new release, along 128 | with the changelog, in the [Releases Page](https://github.com/gruntwork-io/terraform-google-gke/releases). 129 | 130 | During initial development, the major version will be 0 (e.g., `0.x.y`), which indicates the code does not yet have a 131 | stable API. Once we hit `1.0.0`, we will make every effort to maintain a backwards compatible API and use the MAJOR, 132 | MINOR, and PATCH versions on each release to indicate any incompatibilities. 133 | 134 | ## License 135 | 136 | Please see [LICENSE](https://github.com/gruntwork-io/terraform-google-gke/blob/master/LICENSE) for how the code in this 137 | repo is licensed. 138 | 139 | Copyright © 2020 Gruntwork, Inc. 140 | -------------------------------------------------------------------------------- /modules/gke-cluster/README.md: -------------------------------------------------------------------------------- 1 | # GKE Cluster Module 2 | 3 | The GKE Cluster module is used to administer the [cluster master](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-architecture) 4 | for a [Google Kubernetes Engine (GKE) Cluster](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-admin-overview). 5 | 6 | The cluster master is the "control plane" of the cluster; for example, it runs 7 | the Kubernetes API used by `kubectl`. Worker machines are configured by 8 | attaching [GKE node pools](https://cloud.google.com/kubernetes-engine/docs/concepts/node-pools) 9 | to the cluster module. 10 | 11 | ## How do you use this module? 12 | 13 | * See the [root README](https://github.com/gruntwork-io/terraform-google-gke/blob/master/README.md) for instructions on 14 | using Terraform modules. 15 | * See the [examples](https://github.com/gruntwork-io/terraform-google-gke/tree/master/examples) folder for example usage. 16 | * See [variables.tf](https://github.com/gruntwork-io/terraform-google-gke/blob/master/modules/gke-cluster/variables.tf) for all the 17 | variables you can set on this module. 18 | * See [outputs.tf](https://github.com/gruntwork-io/terraform-google-gke/blob/master/modules/gke-cluster/outputs.tf) for all the variables 19 | that are outputted by this module. 20 | 21 | ## What is a GKE Cluster? 22 | 23 | The GKE Cluster, or "cluster master", runs the Kubernetes control plane 24 | processes including the Kubernetes API server, scheduler, and core resource 25 | controllers. 26 | 27 | The master is the unified endpoint for your cluster; it's the "hub" through 28 | which all other components such as nodes interact. Users can interact with the 29 | cluster via Kubernetes API calls, such as by using `kubectl`. The GKE cluster 30 | is responsible for running workloads on nodes, as well as scaling/upgrading 31 | nodes. 32 | 33 | ## How do I attach worker machines using a GKE node pool? 34 | 35 | A "[node](https://kubernetes.io/docs/concepts/architecture/nodes/)" is 36 | a worker machine in Kubernetes; in GKE, nodes are provisioned as 37 | [Google Compute Engine VM instances](https://cloud.google.com/compute/docs/instances/). 38 | 39 | [GKE Node Pools](https://cloud.google.com/kubernetes-engine/docs/concepts/node-pools) 40 | are a group of nodes who share the same configuration, defined as a [NodeConfig](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/NodeConfig). 41 | Node pools also control the autoscaling of their nodes, and autoscaling 42 | configuration is done inline, alongside the node config definition. A GKE 43 | Cluster can have multiple node pools defined. 44 | 45 | Node pools are configured directly with the 46 | [`google_container_node_pool`](https://www.terraform.io/docs/providers/google/r/container_node_pool.html) 47 | Terraform resource by providing a reference to the cluster you configured with 48 | this module as the `cluster` field. 49 | 50 | ## What VPC network will this cluster use? 51 | 52 | You must explicitly specify the network and subnetwork of your GKE cluster using 53 | the `network` and `subnetwork` fields; this module will not implicitly use the 54 | `default` network with an automatically generated subnetwork. 55 | 56 | The modules in the [`terraform-google-network`](https://github.com/gruntwork-io/terraform-google-network) 57 | Gruntwork module are a useful tool for configuring your VPC network and 58 | subnetworks in GCP. 59 | 60 | ## What is a VPC-native cluster? 61 | 62 | A VPC-native cluster is a GKE Cluster that uses [alias IP ranges](https://cloud.google.com/vpc/docs/alias-ip), in that 63 | it allocates IP addresses from a block known to GCP. When using an alias range, pod addresses are natively routable 64 | within GCP, and VPC networks can ensure that the IP range the cluster uses is reserved. 65 | 66 | While using a secondary IP range is recommended [in order to to separate cluster master and pod IPs](https://github.com/gruntwork-io/terraform-google-network/tree/master/modules/vpc-network#how-is-a-secondary-range-connected-to-an-alias-ip-range), 67 | when using a network in the same project as your GKE cluster you can specify a blank range name to draw alias IPs from your subnetwork's primary IP range. If 68 | using a shared VPC network (a network from another GCP project) using an explicit secondary range is required. 69 | 70 | See [considerations for cluster sizing](https://cloud.google.com/kubernetes-engine/docs/how-to/alias-ips#cluster_sizing) 71 | for more information on sizing secondary ranges for your VPC-native cluster. 72 | 73 | ## What is a private cluster? 74 | 75 | In a private cluster, the nodes have internal IP addresses only, which ensures that their workloads are isolated from the public Internet. 76 | Private nodes do not have outbound Internet access, but Private Google Access provides private nodes and their workloads with 77 | limited outbound access to Google Cloud Platform APIs and services over Google's private network. 78 | 79 | If you want your cluster nodes to be able to access the Internet, for example pull images from external container registries, 80 | you will have to set up [Cloud NAT](https://cloud.google.com/nat/docs/overview). 81 | See [Example GKE Setup](https://cloud.google.com/nat/docs/gke-example) for further information. 82 | 83 | You can create a private cluster by setting `enable_private_nodes` to `true`. Note that with a private cluster, setting 84 | the master CIDR range with `master_ipv4_cidr_block` is also required. 85 | 86 | ### How do I control access to the cluster master? 87 | 88 | In a private cluster, the master has two endpoints: 89 | 90 | * **Private endpoint:** This is the internal IP address of the master, behind an internal load balancer in the master's 91 | VPC network. Nodes communicate with the master using the private endpoint. Any VM in your VPC network, and in the same 92 | region as your private cluster, can use the private endpoint. 93 | 94 | * **Public endpoint:** This is the external IP address of the master. You can disable access to the public endpoint by setting 95 | `enable_private_endpoint` to `true`. 96 | 97 | You can relax the restrictions by authorizing certain address ranges to access the endpoints with the input variable 98 | `master_authorized_networks_config`. 99 | 100 | ### How do I configure logging and monitoring with Stackdriver for my cluster? 101 | 102 | Stackdriver Kubernetes Engine Monitoring is enabled by default using this module. It provides improved support for both 103 | Stackdriver Monitoring and Stackdriver Logging in your cluster, including a GKE-customized Stackdriver Console with 104 | fine-grained breakdown of resources including namespaces and pods. Learn more with the [official documentation](https://cloud.google.com/monitoring/kubernetes-engine/#about-skm) 105 | 106 | Although Stackdriver Kubernetes Engine Monitoring is enabled by default, you can use the legacy Stackdriver options by 107 | modifying your configuration. See the [differences between GKE Stackdriver versions](https://cloud.google.com/monitoring/kubernetes-engine/#version) 108 | for the differences between legacy Stackdriver and Stackdriver Kubernetes Engine Monitoring. 109 | 110 | #### How do I use Prometheus for monitoring? 111 | 112 | Prometheus monitoring for your cluster is ready to go through GCP's Stackdriver Kubernetes Engine Monitoring service. If 113 | you've configured your GKE cluster with Stackdriver Kubernetes Engine Monitoring, you can follow Google's guide to 114 | [using Prometheus](https://cloud.google.com/monitoring/kubernetes-engine/prometheus) to configure your cluster with 115 | Prometheus. 116 | 117 | ### Private cluster restrictions and limitations 118 | 119 | Private clusters have the following restrictions and limitations: 120 | 121 | * The size of the RFC 1918 block for the cluster master must be /28. 122 | * The nodes in a private cluster must run Kubernetes version 1.8.14-gke.0 or later. 123 | * You cannot convert an existing, non-private cluster to a private cluster. 124 | * Each private cluster you create uses a unique VPC Network Peering. 125 | * Deleting the VPC peering between the cluster master and the cluster nodes, deleting the firewall rules that allow 126 | ingress traffic from the cluster master to nodes on port 10250, or deleting the default route to the default 127 | Internet gateway, causes a private cluster to stop functioning. 128 | 129 | ## How do I configure the cluster to use Google Groups for GKE? 130 | 131 | If you want to enable Google Groups for use with RBAC, you have to provide a G Suite domain name using input variable `var.gsuite_domain_name`. If a 132 | value is provided, the cluster will be initialised with a security group `gke-security-groups@[yourdomain.com]`. 133 | 134 | In G Suite, you will have to: 135 | 136 | 1. Create a G Suite Google Group in your domain, named gke-security-groups@[yourdomain.com]. The group must be named exactly gke-security-groups. 137 | 1. Create groups, if they do not already exist, that represent groups of users or groups who should have different permissions on your clusters. 138 | 1. Add these groups (not users) to the membership of gke-security-groups@[yourdomain.com]. 139 | 140 | After the cluster has been created, you are ready to create Roles, ClusterRoles, RoleBindings, and ClusterRoleBindings 141 | that reference your G Suite Google Groups. Note that you cannot enable this feature on existing clusters. 142 | 143 | For more information, see https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#google-groups-for-gke. -------------------------------------------------------------------------------- /modules/gke-cluster/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # REQUIRED PARAMETERS 3 | # These parameters must be supplied when consuming this module. 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "project" { 7 | description = "The project ID to host the cluster in" 8 | type = string 9 | } 10 | 11 | variable "location" { 12 | description = "The location (region or zone) to host the cluster in" 13 | type = string 14 | } 15 | 16 | variable "name" { 17 | description = "The name of the cluster" 18 | type = string 19 | } 20 | 21 | variable "network" { 22 | description = "A reference (self link) to the VPC network to host the cluster in" 23 | type = string 24 | } 25 | 26 | variable "subnetwork" { 27 | description = "A reference (self link) to the subnetwork to host the cluster in" 28 | type = string 29 | } 30 | 31 | variable "cluster_secondary_range_name" { 32 | description = "The name of the secondary range within the subnetwork for the cluster to use" 33 | type = string 34 | } 35 | 36 | # --------------------------------------------------------------------------------------------------------------------- 37 | # OPTIONAL PARAMETERS 38 | # Generally, these values won't need to be changed. 39 | # --------------------------------------------------------------------------------------------------------------------- 40 | 41 | variable "description" { 42 | description = "The description of the cluster" 43 | type = string 44 | default = "" 45 | } 46 | 47 | variable "kubernetes_version" { 48 | description = "The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region." 49 | type = string 50 | default = "latest" 51 | } 52 | 53 | variable "logging_service" { 54 | description = "The logging service that the cluster should write logs to. Available options include logging.googleapis.com/kubernetes, logging.googleapis.com (legacy), and none" 55 | type = string 56 | default = "logging.googleapis.com/kubernetes" 57 | } 58 | 59 | variable "monitoring_service" { 60 | description = "The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Stackdriver Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting. Available options include monitoring.googleapis.com/kubernetes, monitoring.googleapis.com (legacy), and none" 61 | type = string 62 | default = "monitoring.googleapis.com/kubernetes" 63 | } 64 | 65 | variable "horizontal_pod_autoscaling" { 66 | description = "Whether to enable the horizontal pod autoscaling addon" 67 | type = bool 68 | default = true 69 | } 70 | 71 | variable "http_load_balancing" { 72 | description = "Whether to enable the http (L7) load balancing addon" 73 | type = bool 74 | default = true 75 | } 76 | 77 | variable "enable_private_nodes" { 78 | description = "Control whether nodes have internal IP addresses only. If enabled, all nodes are given only RFC 1918 private addresses and communicate with the master via private networking." 79 | type = bool 80 | default = false 81 | } 82 | 83 | variable "disable_public_endpoint" { 84 | description = "Control whether the master's internal IP address is used as the cluster endpoint. If set to 'true', the master can only be accessed from internal IP addresses." 85 | type = bool 86 | default = false 87 | } 88 | 89 | variable "master_ipv4_cidr_block" { 90 | description = "The IP range in CIDR notation to use for the hosted master network. This range will be used for assigning internal IP addresses to the master or set of masters, as well as the ILB VIP. This range must not overlap with any other ranges in use within the cluster's network." 91 | type = string 92 | default = "" 93 | } 94 | 95 | variable "network_project" { 96 | description = "The project ID of the shared VPC's host (for shared vpc support)" 97 | type = string 98 | default = "" 99 | } 100 | 101 | variable "master_authorized_networks_config" { 102 | description = <