├── .gitignore ├── CONTRIBUTING.md ├── Jenkinsfile ├── LICENSE ├── Makefile ├── OWNERS ├── README.md ├── clusters ├── cluster.py ├── cluster.py.schema └── cluster.yaml ├── container ├── Dockerfile ├── README.md └── cloudbuild.yaml ├── gke-to-gke-peering ├── README-QWICKLABS.md ├── README.md ├── cleanup.sh ├── gke-to-gke-peering-architecture.png ├── install.sh └── validate.sh ├── gke-to-gke-vpn ├── README-QWIKLABS.md ├── README.md ├── cleanup.sh ├── gke-to-gke-vpn-architecture.png ├── install.sh ├── terraform │ ├── main.tf │ ├── network.tf │ ├── provider.tf │ ├── variables.tf │ ├── versions.tf │ └── vpn.tf └── validate.sh ├── images ├── cluster_details.png ├── gke-to-gke-peering-architecture.png ├── gke-to-gke-vpn-architecture.png ├── nav_menu_demo.png ├── nginx.png ├── nginx_external_ip.png ├── services.png ├── vm_internal_ips.png ├── vpc_networks.png └── workloads.png ├── manifests ├── cluster-ip-svc.yaml ├── ilb-svc.yaml ├── ingress-svc.yaml ├── lb-svc.yaml ├── nodeport-svc.yaml └── run-my-nginx.yaml ├── network ├── network.py ├── network.py.schema ├── network.yaml └── static-ip.yaml ├── renovate.json ├── test ├── boilerplate │ ├── boilerplate.Dockerfile.txt │ ├── boilerplate.Makefile.txt │ ├── boilerplate.go.txt │ ├── boilerplate.py.txt │ ├── boilerplate.sh.txt │ ├── boilerplate.tf.txt │ ├── boilerplate.xml.txt │ └── boilerplate.yaml.txt ├── make.sh └── verify_boilerplate.py ├── validate-pod-to-service-communication.sh └── verify-functions.sh /.gitignore: -------------------------------------------------------------------------------- 1 | # OSX leaves these everywhere on SMB shares 2 | ._* 3 | 4 | # OSX trash 5 | .DS_Store 6 | 7 | # Emacs save files 8 | *~ 9 | \#*\# 10 | .\#* 11 | 12 | # Vim-related files 13 | [._]*.s[a-w][a-z] 14 | [._]s[a-w][a-z] 15 | *.un~ 16 | Session.vim 17 | .netrwhist 18 | *.code-workspace 19 | 20 | # Local .terraform directories 21 | **/.terraform/* 22 | 23 | # .tfstate files 24 | *.tfstate 25 | *.tfstate.* 26 | 27 | # Ignore any .tfvars files that are generated automatically for each Terraform 28 | # run. Most .tfvars files are not managed as part of configuration and so should 29 | # not be included in version control. 30 | terraform.tfvars 31 | 32 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | We'd love to accept your patches and contributions to this project. There are 4 | just a few small guidelines you need to follow. 5 | 6 | ## Contributor License Agreement 7 | Contributions to this project must be accompanied by a Contributor License 8 | Agreement. You (or your employer) retain the copyright to your contribution; 9 | this simply gives us permission to use and redistribute your contributions as 10 | part of the project. Head over to https://cla.developers.google.com/ to see your 11 | current agreements on file or to sign a new one. 12 | 13 | You generally only need to submit a CLA once, so if you've already submitted one 14 | (even if it was for a different project), you probably don't need to do it again. 15 | 16 | ## Code reviews 17 | All submissions, including submissions by project members, require review. We 18 | use GitHub pull requests for this purpose. Consult GitHub Help for more 19 | information on using pull requests. 20 | 21 | ## Community Guidelines 22 | This project follows 23 | [Google's Open Source Community Guidelines](CODE-OF-CONDUCT.md). 24 | -------------------------------------------------------------------------------- /Jenkinsfile: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env groovy 2 | 3 | /* 4 | Copyright 2018 Google LLC 5 | 6 | Licensed under the Apache License, Version 2.0 (the "License"); 7 | you may not use this file except in compliance with the License. 8 | You may obtain a copy of the License at 9 | 10 | https://www.apache.org/licenses/LICENSE-2.0 11 | 12 | Unless required by applicable law or agreed to in writing, software 13 | distributed under the License is distributed on an "AS IS" BASIS, 14 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | See the License for the specific language governing permissions and 16 | limitations under the License. 17 | */ 18 | 19 | // Reference: https://github.com/jenkinsci/kubernetes-plugin 20 | // set up pod label and GOOGLE_APPLICATION_CREDENTIALS (for Terraform) 21 | def containerName = "networking" 22 | def GOOGLE_APPLICATION_CREDENTIALS = '/home/jenkins/dev/jenkins-deploy-dev-infra.json' 23 | def jenkins_container_version = env.JENKINS_CONTAINER_VERSION 24 | 25 | podTemplate( 26 | containers: [ 27 | containerTemplate(name: "${containerName}", 28 | image: "gcr.io/pso-helmsman-cicd/jenkins-k8s-node:${jenkins_container_version}", 29 | command: 'tail -f /dev/null', 30 | resourceRequestCpu: '1000m', 31 | resourceLimitCpu: '2000m', 32 | resourceRequestMemory: '1Gi', 33 | resourceLimitMemory: '2Gi' 34 | ) 35 | ], 36 | volumes: [secretVolume(mountPath: '/home/jenkins/dev', 37 | secretName: 'jenkins-deploy-dev-infra' 38 | )] 39 | ) { 40 | node(POD_LABEL) { 41 | try { 42 | // Options covers all other job properties or wrapper functions that apply to entire Pipeline. 43 | properties([disableConcurrentBuilds()]) 44 | // set env variable GOOGLE_APPLICATION_CREDENTIALS for Terraform 45 | env.GOOGLE_APPLICATION_CREDENTIALS = GOOGLE_APPLICATION_CREDENTIALS 46 | 47 | stage('Setup') { 48 | container(containerName) { 49 | // checkout code from scm i.e. commits related to the PR 50 | checkout scm 51 | 52 | // Setup gcloud service account access 53 | sh "gcloud auth activate-service-account --key-file=${GOOGLE_APPLICATION_CREDENTIALS}" 54 | sh "gcloud config set compute/zone ${env.CLUSTER_ZONE}" 55 | sh "gcloud config set core/project ${env.PROJECT_ID}" 56 | sh "gcloud config set compute/region ${env.REGION}" 57 | 58 | // Build and push container image to pso_examples 59 | sh "make build_container" 60 | } 61 | } 62 | stage('Lint') { 63 | container(containerName) { 64 | sh "make lint" 65 | } 66 | } 67 | stage('gke-to-gke-vpn-create') { 68 | container(containerName) { 69 | dir('gke-to-gke-vpn') { 70 | sh './install.sh' 71 | } 72 | } 73 | } 74 | 75 | stage('gke-to-gke-vpn-validate') { 76 | container(containerName) { 77 | dir('gke-to-gke-vpn') { 78 | sh './validate.sh' 79 | } 80 | sh './validate-pod-to-service-communication.sh' 81 | } 82 | } 83 | 84 | stage('gke-to-gke-vpn-cleanup') { 85 | container(containerName) { 86 | dir('gke-to-gke-vpn') { 87 | sh './cleanup.sh' 88 | } 89 | } 90 | } 91 | } catch (err) { 92 | stage('Teardown') { 93 | container(containerName) { 94 | dir('gke-to-gke-vpn') { 95 | sh './cleanup.sh' 96 | } 97 | } 98 | } 99 | // if any exception occurs, mark the build as failed 100 | // and display a detailed message on the Jenkins console output 101 | currentBuild.result = 'FAILURE' 102 | echo "FAILURE caught echo ${err}" 103 | throw err 104 | } finally { 105 | stage('Teardown') { 106 | container(containerName) { 107 | dir('gke-to-gke-vpn') { 108 | sh './cleanup.sh' 109 | } 110 | sh "gcloud auth revoke" 111 | } 112 | } 113 | } 114 | } 115 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # Make will use bash instead of sh 16 | SHELL := /usr/bin/env bash 17 | 18 | # All is the first target in the file so it will get picked up when you just run 'make' on its own 19 | lint: check_shell check_python check_golang check_terraform check_docker check_base_files check_headers check_trailing_whitespace 20 | 21 | # The .PHONY directive tells make that this isn't a real target and so 22 | # the presence of a file named 'check_shell' won't cause this target to stop 23 | # working 24 | .PHONY: check_shell 25 | check_shell: 26 | @source test/make.sh && check_shell 27 | 28 | .PHONY: check_python 29 | check_python: 30 | @source test/make.sh && check_python 31 | 32 | .PHONY: check_golang 33 | check_golang: 34 | @source test/make.sh && golang 35 | 36 | .PHONY: check_terraform 37 | check_terraform: 38 | @source test/make.sh && check_terraform 39 | 40 | .PHONY: check_docker 41 | check_docker: 42 | @source test/make.sh && docker 43 | 44 | .PHONY: check_base_files 45 | check_base_files: 46 | @source test/make.sh && basefiles 47 | 48 | .PHONY: check_shebangs 49 | check_shebangs: 50 | @source test/make.sh && check_bash 51 | 52 | .PHONY: check_trailing_whitespace 53 | check_trailing_whitespace: 54 | @source test/make.sh && check_trailing_whitespace 55 | 56 | .PHONY: check_headers 57 | check_headers: 58 | @echo "Checking file headers" 59 | @python test/verify_boilerplate.py 60 | 61 | .PHONY: build_container 62 | build_container: 63 | gcloud builds submit container --project=pso-examples \ 64 | --config="container/cloudbuild.yaml" 65 | -------------------------------------------------------------------------------- /OWNERS: -------------------------------------------------------------------------------- 1 | approvers: 2 | - chrislovecnm 3 | - robinpercy 4 | - geojaz 5 | - techgnosis 6 | - erkolson 7 | labels: 8 | - gke-helmsman 9 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Engine Networking 2 | 3 | 4 | ## Introduction 5 | 6 | Google cloud networking with Kubernetes Engine clusters can 7 | be complex. Assigning optimal CIDR ranges for the relevant VPC subnets 8 | and the Kubernetes Engine clusters' reserved IP ranges from the start is very important 9 | since VPC subnets are not always easy to resize and the cluster's reserved 10 | IP ranges are immutable. Using the correct method to expose the applications 11 | in the cluster is important as every method was designed for a different 12 | set of use cases. 13 | 14 | The demos in the project demonstrate the following best practices: 15 | 16 | 1. Connecting two GCP networks using VPC peering and Cloud VPN containing two Kubernetes Engine clusters each. 17 | 1. Deploying the nginx pods. 18 | 1. Exposing the pods using Kubernetes Engine services 19 | 1. Validating pod-to-service communication across the Kubernetes Engine clusters within the same region and the different regions. 20 | 21 | ## Prerequisites 22 | 23 | ### Tools 24 | 1. [Google Cloud SDK version >= 253.0.0](https://cloud.google.com/sdk/docs/downloads-versioned-archives) 25 | 2. [kubectl matching the latest GKE version](https://kubernetes.io/docs/tasks/tools/install-kubectl/) 26 | 3. bash or bash compatible shell 27 | 4. [jq](https://stedolan.github.io/jq/) 28 | 29 | #### Install Cloud SDK 30 | The Google Cloud SDK is used to interact with your GCP resources. 31 | [Installation instructions](https://cloud.google.com/sdk/downloads) for multiple platforms are available online. 32 | 33 | #### Install kubectl CLI 34 | The kubectl CLI is used to interteract with both Kubernetes Engine and kubernetes in general. 35 | [Installation instructions](https://cloud.google.com/kubernetes-engine/docs/quickstart) 36 | for multiple platforms are available online. 37 | 38 | ## Directory Structure 39 | 1. The [gke-to-gke-peering](gke-to-gke-peering) and [gke-to-gke-vpn](gke-to-gke-vpn) folders each contain a project. 40 | 1. README files exist for the above examples; [gke-to-gke-peering/README.md](gke-to-gke-peering/README.md) and [gke-to-gke-vpn/README.md](gke-to-gke-vpn/README.md). 41 | 1. The [network](network) folder contains the manifest files and deployment manager templates to setup networks. 42 | 1. The [clusters](clusters) folder contains the manifest files and deployment manager templates to create Kubernetes Engine clusters. 43 | 1. The [manifests](clusters) folder contains the manifest files to create Kubernetes Engine services. 44 | 45 | 46 | **This is not an officially supported Google product** 47 | -------------------------------------------------------------------------------- /clusters/cluster.py: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Create configuration to deploy GKE cluster. 17 | Below code creates a GKE cluster based on input values from cluster*.yaml file 18 | Please refer to 19 | https://github.com/GoogleCloudPlatform/deploymentmanager-samples/tree/master/examples/v2 20 | for deployment manager samples. 21 | """ 22 | 23 | 24 | def GenerateConfig(context): 25 | """ 26 | Generates the YAML resource configuration for a GKE cluster. 27 | The 'context' variable is to access input properties etc. 28 | """ 29 | name_prefix = context.env['deployment'] + '-' + context.env['name'] 30 | cluster_name = name_prefix 31 | type_name = name_prefix + '-type' 32 | k8s_endpoints = { 33 | '': 'api/v1', 34 | '-v1beta1-extensions': 'apis/extensions/v1beta1' 35 | } 36 | 37 | # Below input values are loaded from network*.yaml file. Change values in 38 | # network*.yaml file for customization. 39 | resources = [{ 40 | 'name': cluster_name, 41 | 'type': 'container.v1.cluster', 42 | 'properties': { 43 | 'zone': context.properties['zone'], 44 | 'cluster': { 45 | 'name': cluster_name, 46 | 'network': context.properties['network'], 47 | 'subnetwork': context.properties['subnet'], 48 | 'initialNodeCount': context.properties['initialNodeCount'], 49 | 'initialClusterVersion': context.properties['cluster-version'], 50 | 'nodeConfig': { 51 | 'imageType': 52 | context.properties['image-type'], 53 | 'tags': [context.properties['tags']], 54 | 'oauthScopes': [ 55 | 'https://www.googleapis.com/auth/' + s for s in [ 56 | 'compute', 'devstorage.read_only', 'logging.write', 57 | 'monitoring' 58 | ] 59 | ] 60 | }, 61 | 'ipAllocationPolicy': { 62 | 'useIpAliases': context.properties['enable-ip-alias'], 63 | 'clusterIpv4Cidr': context.properties['cluster-ipv4-cidr'], 64 | 'servicesIpv4Cidr': 65 | context.properties['services-ipv4-cidr'], 66 | } 67 | } 68 | } 69 | }] 70 | outputs = [] 71 | for type_suffix, endpoint in k8s_endpoints.iteritems(): 72 | resources.append({ 73 | 'name': type_name + type_suffix, 74 | 'type': 'deploymentmanager.v2beta.typeProvider', 75 | 'properties': { 76 | 'options': { 77 | 'validationOptions': { 78 | # Kubernetes API accepts ints, in fields they annotate 79 | # with string. This validation will show as warning 80 | # rather than failure for Deployment Manager. 81 | # https://github.com/kubernetes/kubernetes/issues/2971 82 | 'schemaValidation': 'IGNORE_WITH_WARNINGS' 83 | }, 84 | # According to kubernetes spec, the path parameter 'name' 85 | # should be the value inside the metadata field 86 | # https://github.com/kubernetes/community/blob/master 87 | # /contributors/devel/api-conventions.md 88 | # This mapping specifies that 89 | 'inputMappings': [{ 90 | 'fieldName': 91 | 'name', 92 | 'location': 93 | 'PATH', 94 | 'methodMatch': 95 | '^(GET|DELETE|PUT)$', 96 | 'value': 97 | '$.ifNull(' 98 | '$.resource.properties.metadata.name, ' 99 | '$.resource.name)' 100 | }, { 101 | 'fieldName': 102 | 'metadata.name', 103 | 'location': 104 | 'BODY', 105 | 'methodMatch': 106 | '^(PUT|POST)$', 107 | 'value': 108 | '$.ifNull(' 109 | '$.resource.properties.metadata.name, ' 110 | '$.resource.name)' 111 | }, { 112 | 'fieldName': 113 | 'Authorization', 114 | 'location': 115 | 'HEADER', 116 | 'value': 117 | '$.concat("Bearer ",' 118 | '$.googleOauth2AccessToken())' 119 | }] 120 | }, 121 | 'descriptorUrl': 122 | ''.join([ 123 | 'https://$(ref.', cluster_name, '.endpoint)/swaggerapi/', 124 | endpoint 125 | ]) 126 | } 127 | }) 128 | outputs.append({ 129 | 'name': 'clusterType' + type_suffix, 130 | 'value': type_name + type_suffix 131 | }) 132 | 133 | return {'resources': resources, 'outputs': outputs} 134 | -------------------------------------------------------------------------------- /clusters/cluster.py.schema: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | #GKE cluster schema 16 | info: 17 | title: GKE cluster 18 | author: Google LLC 19 | description: | 20 | Creates a GKE cluster and associated type for use in DM. The type can be 21 | used in other DM configurations in the following manner: 22 | 23 | "type: :/api/v1/namespaces/{namespace}/services" 24 | 25 | required: 26 | - zone 27 | 28 | properties: 29 | zone: 30 | type: string 31 | description: Zone in which the cluster should run. 32 | initialNodeCount: 33 | type: integer 34 | description: Initial number of nodes desired in the cluster. 35 | default: 4 36 | 37 | outputs: 38 | clusterType: 39 | description: The name of the type provider which can create resources from the Kubernetes v1 API in your cluster. 40 | type: string 41 | clusterType-v1beta1-extensions: 42 | description: The name of the type provider which can create resources from the Kubernetes v1beta1-extensions API in your cluster. 43 | type: string 44 | -------------------------------------------------------------------------------- /clusters/cluster.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | ### cluster1 configuration 16 | 17 | imports: 18 | - path: cluster.py 19 | 20 | resources: 21 | - name: cluster1 22 | type: cluster.py 23 | properties: 24 | network: network1 25 | subnet: subnet1-us-east1 26 | zone: us-east1-d 27 | cluster-version: "1.12" 28 | initialNodeCount: 1 29 | image-type: cos 30 | tags: kc-node 31 | enable-ip-alias: true 32 | cluster-ipv4-cidr: 10.108.0.0/19 33 | services-ipv4-cidr: 10.208.0.0/20 34 | 35 | - name: cluster2 36 | type: cluster.py 37 | properties: 38 | network: network1 39 | subnet: subnet2-us-central1 40 | zone: us-central1-b 41 | cluster-version: "1.12" 42 | initialNodeCount: 1 43 | image-type: cos 44 | tags: kc-node 45 | enable-ip-alias: true 46 | cluster-ipv4-cidr: 10.118.0.0/19 47 | services-ipv4-cidr: 10.218.0.0/20 48 | 49 | - name: cluster3 50 | type: cluster.py 51 | properties: 52 | network: network2 53 | subnet: subnet3-us-east1 54 | zone: us-east1-c 55 | cluster-version: "1.12" 56 | initialNodeCount: 1 57 | image-type: cos 58 | tags: kc-node 59 | enable-ip-alias: true 60 | cluster-ipv4-cidr: 10.128.0.0/19 61 | services-ipv4-cidr: 10.228.0.0/20 62 | 63 | - name: cluster4 64 | type: cluster.py 65 | properties: 66 | network: network2 67 | subnet: subnet4-us-central1 68 | zone: us-central1-c 69 | cluster-version: "1.12" 70 | initialNodeCount: 1 71 | image-type: cos 72 | tags: kc-node 73 | enable-ip-alias: true 74 | cluster-ipv4-cidr: 10.138.0.0/19 75 | services-ipv4-cidr: 10.238.0.0/20 76 | -------------------------------------------------------------------------------- /container/Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | FROM nginx:alpine 16 | 17 | RUN apk add --no-cache 'curl=~7' 18 | -------------------------------------------------------------------------------- /container/README.md: -------------------------------------------------------------------------------- 1 | # Nginx Container 2 | 3 | This project provides an Alpine Nginx container with curl to test pod to service 4 | communications. 5 | 6 | The container is available via: 7 | 8 | ```console 9 | docker pull gcr.io/pso-examples/nginx-curl:1.0.0 10 | ``` 11 | -------------------------------------------------------------------------------- /container/cloudbuild.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | steps: 16 | - name: 'gcr.io/cloud-builders/docker' 17 | args: [ 'build', '-t', 'gcr.io/pso-examples/nginx-curl:1.0.0', '.'] 18 | images: 19 | - 'gcr.io/pso-examples/nginx-curl:1.0.0' 20 | -------------------------------------------------------------------------------- /gke-to-gke-peering/README-QWICKLABS.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Engine Networking 2 | 3 | ## Table of Contents 4 | 5 | 6 | * [Kubernetes Engine Communication Through VPC Peering](#kubernetes-engine-communication-through-vpc-peering) 7 | * [Introduction](#introduction) 8 | * [Architecture](#architecture) 9 | * [GCP Network 1](#gcp-network-1) 10 | * [Kubernetes Engine Cluster 1](#kubernetes-engine-cluster-1) 11 | * [Kubernetes Engine Cluster 2](#kubernetes-engine-cluster-2) 12 | * [Other Resources](#other-resources) 13 | * [GCP Network 2](#gcp-network-2) 14 | * [Kubernetes Engine Cluster 3](#kubernetes-engine-cluster-3) 15 | * [Kubernetes Engine Cluster 4](#kubernetes-engine-cluster-4) 16 | * [Other Resources](#other-resources-1) 17 | * [Notes](#notes) 18 | * [Initialize GCP Authorization](#initialize-gcp-authorization) 19 | * [Directory Structure](#directory-structure) 20 | * [Deployment Steps](#deployment-steps) 21 | * [Validation](#validation) 22 | * [Verify the pod-to-service communication](#verify-the-pod-to-service-communication) 23 | * [Tear Down](#tear-down) 24 | * [Troubleshooting](#troubleshooting) 25 | * [Deleting Resources Manually](#deleting-resources-manually) 26 | * [Relevant Materials](#relevant-materials) 27 | 28 | 29 | 30 | ## Kubernetes Engine Communication Through VPC Peering 31 | 32 | ## Introduction 33 | 34 | Google Cloud networking with Kubernetes Engine clusters can be 35 | complex. This demo strives to simplify the best practices for exposing cluster 36 | services to other clusters and establishing network links between Kubernetes Engine clusters 37 | running in separate projects. 38 | 39 | This project contains a set of Deployment Manager templates that allows a user to 40 | create networks, subnets, and Kubernetes Engine clusters. This project demonstrates the 41 | following best practices. 42 | 43 | 1. Network design of launching Kubernetes Engine clusters in custom networks. 44 | 1. Assigning node CIDR, container CIDR and service CIDR for Kubernetes Engine clusters. 45 | 1. IP range management. 46 | 1. Exposing pods of Kubernetes Engine clusters over peered networks 47 | 48 | This example also includes Kubernetes manifests for: 49 | 50 | 1. Deploying the Nginx pods in clusters. 51 | 1. Exposing the Nginx pods of the clusters with different types of services like cluster 52 | IP, nodeport, internal load balancer, Network Load Balancer and Ingress. 53 | 1. Validating the pod-to-service communication over the peered networks. 54 | 55 | ## Architecture 56 | 57 | The execution of this code in the GCP environment creates two custom GCP networks connected via VPC peering. Each network will have two subnets one in the us-west1 region and the other in the us-central1 region. Each of the subnets hosts a Kubernetes Engine cluster which has nginx pods and services to expose those pods across other clusters. 58 | 59 | ![Kubernetes Engine Communication Through VPC Peering](gke-to-gke-peering-architecture.png "Kubernetes Engine-Communication-Through-VPC-Peering") 60 | 61 | 62 | Below is the detailed overview of GCP resources which will be created. 63 | 64 | ### GCP Network 1 65 | #### Kubernetes Engine Cluster 1 66 | 1. Subnet: subnet1-us-west1 (10.1.0.0/28) 67 | 68 | |cluster-ipv4-cidr|service-ipv4-cidr|zone|Initial Node count|Node Image 69 | |---|---|---|---|---| 70 | |10.108.0.0/19|10.208.0.0/20|us-west1-b|3|COS 71 | 72 | #### Kubernetes Engine Cluster 2 73 | 1. Subnet: subnet1-us-central1 (10.2.0.0/28) 74 | 75 | |cluster-ipv4-cidr|service-ipv4-cidr|zone|Initial Node count|Node Image 76 | |---|---|---|---|---| 77 | |10.118.0.0/19|10.218.0.0/20|us-central1-b|3|COS 78 | 79 | #### Other Resources 80 | 1. Cluster IP, Nodeport, ILB, LB and Ingress services to expose pods in each of 81 | those clusters. 82 | 1. VPC Peering connection with network2. 83 | 84 | ### GCP Network 2 85 | #### Kubernetes Engine Cluster 3 86 | 1. Subnet: subnet3-us-west1 (10.11.0.0/28) 87 | 88 | |cluster-ipv4-cidr|service-ipv4-cidr|zone|Initial Node count|Node Image 89 | |---|---|---|---|---| 90 | |10.128.0.0/19|10.228.0.0/20|us-west1-c|3|COS 91 | 92 | #### Kubernetes Engine Cluster 4 93 | 1. Subnet: subnet4-us-central1 (10.12.0.0/28) 94 | 95 | |cluster-ipv4-cidr|service-ipv4-cidr|zone|Initial Node count|Node Image 96 | |---|---|---|---|---| 97 | |10.138.0.0/19|10.238.0.0/20|us-central1-c|3|COS 98 | 99 | #### Other Resources 100 | 1. Cluster IP, Nodeport, ILB, LB and Ingress services to expose pods in each of 101 | those clusters. 102 | 1. VPC Peering connection with network1. 103 | 104 | ### Notes 105 | 106 | > If you get an error while running the install script, run the cleanup script or manually delete the resources and attempt the install script again. Manual deletion is covered in a section below, but using the cleanup script is hightly recommended. 107 | 108 | For qwicklabs you do not have to make these changes. These changes can be made if you are more advanced. 109 | 110 | 1. Region for subnets and Node CIDR can be customized in /network/network.yaml. 111 | 1. Cluster attributes like zone, image, node count, cluster CIDR and service CIDR can be customized in clusters/cluster.yaml. 112 | 1. To add additional custom attributes to network or clusters yaml files, and deployment manager scripts, /network/\*.py or clusters/\*.py, need to be updated accordingly. 113 | 114 | ### Initialize GCP Authorization 115 | 116 | When using Cloud Shell execute the following command in order to setup gcloud cli. 117 | 118 | ```console 119 | gcloud init 120 | ``` 121 | 122 | ### Directory Structure 123 | 1. The root folder is the "gke-networking-demos" folder. 124 | 1. The "network" folder contains the manifest files and Deployment Manager templates to setup networks. 125 | 1. The "clusters" folder contains the manifest files and Deployment Manager templates to create Kubernetes Engine clusters. 126 | 1. The "manifests" folder contains the manifest files to create Kubernetes Engine services. 127 | 1. The "gke-to-gke-peering" folder contains scripts specific to this demo. 128 | 129 | ## Deployment Steps 130 | 131 | The following steps will allow a user to to run the demo: 132 | 133 | Execute the script `./install.sh` that is located in the `gke-to-gke-peering` directory. 134 | 135 | ## Validation 136 | 1. Make sure that there are no errors in the install script execution. If you have problems refer to the Troubleshooting section below. 137 | 1. Login to GCP console. 138 | 1. Verify that the CIDR ranges of subnet-us-west1 and subnet-us-central1 matches 139 | the specification. 140 | 1. Click on the VM instances in the Compute Engine and verify that the node IP addresses 141 | are drawn from the subnet's CIDR. 142 | 1. Verify the created clusters in Kubernetes Engine. Click on the cluster hyperlink 143 | and verify that "Container address range" matches the specified cluster-ipv4-cidr. 144 | 1. Click on workloads and verify that the status is OK for nginx pods. 145 | 1. Click on discovery & load balancing. Verify that the cluster ip, nodeport, ILB and LB are created for cluster1. 146 | 1. Click on discovery & load balancing. Verify that the cluster ip, nodeport, LB and ingress services are created for cluster2. 147 | 1. Verify that cluster IP address of all the services for a cluster are drawn 148 | from service-ipv4-cidr. 149 | 1. Access the endpoint URL for external load balancer to view the nginx pods. 150 | 1. Change directory to `gke-to-gke-peering` 151 | 1. Run `./validate.sh` 152 | 153 | 154 | ## Verify the pod-to-service communication 155 | 1. Clusters in the same region communicate through the internal load balancer. 156 | 1. Clusters across the different regions communicate through the global load balancer. 157 | 1. All the services created to expose pods in a cluster are accessible to pods within that cluster. 158 | 1. Refer to validate-pod-to-service-communication.sh script to view the commands to verify pod to service communication. 159 | 1. Run `./validate-pod-to-service-communication.sh` located in the project root directory 160 | 1. The above script demonstrates how the pods in cluster1 can access the local Kubernetes Engine services and the other Kubernetes Engine Internal/External load balancer services from the same or different regions. 161 | 162 | ## Tear Down 163 | 164 | 1. Change directory to `gke-to-gke-peering` 165 | 2. Run `./cleanup.sh` 166 | 3. Enter 'y' when prompted to delete the resources. 167 | 4. Verify that the script executed with no errors. 168 | 5. Verify that all the resources created are deleted. 169 | 170 | ## Troubleshooting 171 | 172 | 1. Remember to enable API's as mentioned in deployment steps in the project where the resources are to be created. Otherwise, API not enabled error is thrown. 173 | 1. Verify that the project is associated with a valid billing account. 174 | 1. Make sure to have the right permissions for the GCP account to create above GCP/Kubernetes Engine resources in project. Otherwise, permission denied error is thrown. 175 | 1. Make sure that the deployments created through install script are deleted before you try to re-install the resources. Otherwise, resources will not be installed properly. 176 | 1. If there are any errors in cleanup script execution, refer to steps for deleting resources manually. 177 | 1. If you get an error while running the install script, run the cleanup script or manually delete the resources and attempt the install script again. 178 | 179 | ## Deleting Resources Manually 180 | 1. Goto Kubernetes Engine -> services. Delete all the services created through install script. 181 | 1. Goto Network Services -> Load Balancing and delete the load balancers along with associated heathchecks. 182 | 1. Goto Compute Engine -> VM Instances and delete all the instances created through install script. 183 | 1. Goto Compute Engine -> Instance Groups and delete all the instance groups created through install script. 184 | 1. Goto VPC Networks -> Firewall Rules and delete the firewall rules created for network1. 185 | 1. Goto Deployment Manager -> Deployments and delete cluster and network deployments. 186 | 1. Delete the dependent resources if network deployment doesn't get deleted. 187 | 188 | ## Relevant Materials 189 | 190 | * [VPC Network Peering](https://cloud.google.com/vpc/docs/vpc-peering) 191 | * [Internal Load Balancing](https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing) 192 | * [Exposing an Application to External Traffic](https://cloud.google.com/kubernetes-engine/docs/how-to/exposing-apps) 193 | -------------------------------------------------------------------------------- /gke-to-gke-peering/README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Engine Networking 2 | ## Table of Contents 3 | 4 | 5 | * [Introduction](#introduction) 6 | * [Architecture](#architecture) 7 | * [GCP Network1](#gcp-network-network1) 8 | * [Kubernetes Engine Cluster 1](#kubernetes-engine-cluster-1) 9 | * [Kubernetes Engine Cluster 2](#kubernetes-engine-cluster-2) 10 | * [Other Resources](#other-resources) 11 | * [GCP Network2](#gcp-network-network2) 12 | * [Kubernetes Engine Cluster 3](#kubernetes-engine-cluster-3) 13 | * [Kubernetes Engine Cluster 4](#kubernetes-engine-cluster-4) 14 | * [Other Resources](#other-resources-1) 15 | * [Notes](#notes) 16 | * [Prerequisites](#prerequisites) 17 | * [Run Demo in a Google Cloud Shell](#run-demo-in-a-google-cloud-shell) 18 | * [Supported Operating Systems](#supported-operating-systems) 19 | * [Tools](#tools) 20 | * [Versions](#versions) 21 | * [Setup](#setup) 22 | * [Directory Structure](#directory-structure) 23 | * [Deployment Steps](#deployment-steps) 24 | * [Validation](#validation) 25 | * [Verify the pod-to-service communication](#verify-the-pod-to-service-communication) 26 | * [Tear Down](#tear-down) 27 | * [Troubleshooting](#troubleshooting) 28 | * [Deleting Resources Manually](#deleting-resources-manually) 29 | * [Relevant Materials](#relevant-materials) 30 | 31 | 32 | 33 | ## Kubernetes Engine Communication Through VPC Peering 34 | 35 | ## Introduction 36 | 37 | Google Cloud networking with Kubernetes Engine clusters can be 38 | complex. This demo strives to simplify the best practices for exposing cluster 39 | services to other clusters and establishing network links between Kubernetes Engine clusters 40 | running in separate projects. 41 | 42 | This project contains a set of Deployment Manager templates that allows a user to 43 | create networks, subnets, and Kubernetes Engine clusters. This project demonstrates the 44 | following best practices. 45 | 46 | 1. Network design of launching Kubernetes Engine clusters in custom networks. 47 | 1. Assigning node CIDR, container CIDR and service CIDR for Kubernetes Engine clusters. 48 | 1. IP range management. 49 | 1. Exposing pods of Kubernetes Engine clusters over peered networks 50 | 51 | This example also includes Kubernetes manifests for: 52 | 53 | 1. Deploying the Nginx pods in clusters. 54 | 1. Exposing the Nginx pods of the clusters with different types of services like cluster 55 | IP, nodeport, internal load balancer, Network Load Balancer and Ingress. 56 | 1. Validating the pod-to-service communication over the peered networks. 57 | 58 | ## Architecture 59 | 60 | The execution of this code in the GCP environment creates two custom GCP networks connected via VPC peering. Each network will have two subnets one in the us-west1 region and the other in the us-central1 region. Each of the subnets hosts a Kubernetes Engine cluster which has nginx pods and services to expose those pods across other clusters. 61 | 62 | ![Kubernetes Engine Communication Through VPC Peering](gke-to-gke-peering-architecture.png "Kubernetes Engine-Communication-Through-VPC-Peering") 63 | 64 | 65 | Below is the detailed overview of GCP resources which will be created. 66 | 67 | ### GCP Network 1 68 | #### Kubernetes Engine Cluster 1 69 | 1. Subnet: subnet1-us-west1 (10.1.0.0/28) 70 | 71 | |cluster-ipv4-cidr|service-ipv4-cidr|zone|Initial Node count|Node Image 72 | |---|---|---|---|---| 73 | |10.108.0.0/19|10.208.0.0/20|us-west1-b|3|COS 74 | 75 | #### Kubernetes Engine Cluster 2 76 | 1. Subnet: subnet1-us-central1 (10.2.0.0/28) 77 | 78 | |cluster-ipv4-cidr|service-ipv4-cidr|zone|Initial Node count|Node Image 79 | |---|---|---|---|---| 80 | |10.118.0.0/19|10.218.0.0/20|us-central1-b|3|COS 81 | 82 | #### Other Resources 83 | 1. Cluster IP, Nodeport, ILB, LB and Ingress services to expose pods in each of 84 | those clusters. 85 | 1. VPC Peering connection with network2. 86 | 87 | ### GCP Network 2 88 | #### Kubernetes Engine Cluster 3 89 | 1. Subnet: subnet3-us-west1 (10.11.0.0/28) 90 | 91 | |cluster-ipv4-cidr|service-ipv4-cidr|zone|Initial Node count|Node Image 92 | |---|---|---|---|---| 93 | |10.128.0.0/19|10.228.0.0/20|us-west1-c|3|COS 94 | 95 | #### Kubernetes Engine Cluster 4 96 | 1. Subnet: subnet4-us-central1 (10.12.0.0/28) 97 | 98 | |cluster-ipv4-cidr|service-ipv4-cidr|zone|Initial Node count|Node Image 99 | |---|---|---|---|---| 100 | |10.138.0.0/19|10.238.0.0/20|us-central1-c|3|COS 101 | 102 | #### Other Resources 103 | 1. Cluster IP, Nodeport, ILB, LB and Ingress services to expose pods in each of 104 | those clusters. 105 | 1. VPC Peering connection with network1. 106 | 107 | ### Notes 108 | 1. Region for subnets and Node CIDR can be customized in /network/network.yaml. 109 | 1. Cluster attributes like zone, image, node count, cluster CIDR and service CIDR can be customized in clusters/cluster.yaml. 110 | 1. To add additional custom attributes to network or clusters yaml files and deployment manager scripts at /network/*.py or clusters/*.py needs to be updated accordingly. 111 | 112 | ## Prerequisites 113 | 114 | A Google Cloud account and project is required for this. 115 | 116 | Access to an existing Google Cloud project with the Kubernetes Engine service enabled 117 | If you do not have a Google Cloud account please signup for a free trial 118 | [here](https://cloud.google.com). 119 | 120 | ### Run Demo in a Google Cloud Shell 121 | 122 | Click the button below to run the demo in a [Google Cloud Shell](https://cloud.google.com/shell/docs/). 123 | 124 | [![Open in Cloud Shell](http://gstatic.com/cloudssh/images/open-btn.svg)](https://console.cloud.google.com/cloudshell/open?cloudshell_git_repo=https://github.com/GoogleCloudPlatform/gke-networking-demos.git&cloudshell_git_branch=master&cloudshell_working_dir=gke-to-gke-peering&cloudshell_tutorial=README.md) 125 | 126 | All the tools for the demo are installed. When using Cloud Shell execute the following 127 | command in order to setup gcloud cli. 128 | 129 | ```console 130 | gcloud init 131 | ``` 132 | 133 | ### Supported Operating Systems 134 | 135 | This project will run on macOS, or in a [Google Cloud Shell](https://cloud.google.com/shell/docs/). 136 | 137 | ### Tools 138 | 139 | When not using Cloud Shell, the following tools are required. 140 | 141 | 1. gcloud cli ( >= Google Cloud SDK 200.0.0 ) 142 | 2. bash 143 | 3. kubectl - ( >= v1.11.0-gke.0 ) 144 | 4. jq 145 | 146 | ### Versions 147 | 1. Kubernetes Engine >= 1.11.0-gke.0 148 | 149 | ### Setup 150 | 151 | 1. Pull the code from git repo. 152 | 1. Optionally, customize the configuration in .yaml files under /network/ or /clusters/ or /manifests/, if needed. 153 | 154 | ### Directory Structure 155 | 1. The root folder is the "gke-networking-demos" folder. 156 | 1. The "network" folder contains the manifest files and Deployment Manager templates to setup networks. 157 | 1. The "clusters" folder contains the manifest files and Deployment Manager templates to create Kubernetes Engine clusters. 158 | 1. The "manifests" folder contains the manifest files to create Kubernetes Engine services. 159 | 1. The "gke-to-gke-peering" folder contains scripts specific to this demo. 160 | 161 | ## Deployment Steps 162 | 163 | The following steps will allow a user to 164 | 165 | 1. Change directory to `gke-to-gke-peering` 166 | 1. Run `./install.sh` 167 | 168 | ## Validation 169 | 1. Make sure that there are no errors in the install script execution. 170 | 1. Login to GCP console. 171 | 1. Verify that the CIDR ranges of subnet-us-west1 and subnet-us-central1 matches 172 | the specification. 173 | 1. Click on the VM instances in the Compute Engine and verify that the node IP addresses 174 | are drawn from the subnet's CIDR. 175 | 1. Verify the created clusters in Kubernetes Engine. Click on the cluster hyperlink 176 | and verify that "Container address range" matches the specified cluster-ipv4-cidr. 177 | 1. Click on workloads and verify that the status is OK for nginx pods. 178 | 1. Click on discovery & load balancing. Verify that the cluster ip, nodeport, ILB and LB are created for cluster1. 179 | 1. Click on discovery & load balancing. Verify that the cluster ip, nodeport, LB and ingress services are created for cluster2. 180 | 1. Verify that cluster IP address of all the services for a cluster are drawn 181 | from service-ipv4-cidr. 182 | 1. Access the endpoint for URL for external load balancer to view the nginx pods. 183 | 1. Change directory to `gke-to-gke-peering` 184 | 1. Run `./validate.sh` 185 | 186 | 187 | ## Verify the pod-to-service communication 188 | 1. Clusters in the same region communicate through the internal load balancer. 189 | 1. Clusters across the different regions communicate through the global load balancer. 190 | 1. All the services created to expose pods in a cluster are accessible to pods within that cluster. 191 | 1. Refer to validate-pod-to-service-communication.sh script to view the commands to verify pod to service communication. 192 | 1. Change directory back to project root. Run `./validate-pod-to-service-communication.sh` located in the project root directory 193 | 1. The above script demonstrates how the pods in cluster1 can access the local Kubernetes Engine services and the other Kubernetes Engine Internal/External load balancer services from the same or different regions. 194 | 195 | ## Tear Down 196 | 197 | 1. Change directory to `gke-to-gke-peering` 198 | 2. Run `./cleanup.sh` 199 | 3. Enter 'y' when prompted to delete the resources. 200 | 4. Verify that the script executed with no errors. 201 | 5. Verify that all the resources created are deleted. 202 | 203 | 204 | ## Troubleshooting 205 | 206 | 1. Remember to enable API's as mentioned in deployment steps in the project where the resources are to be created. Otherwise, API not enabled error is thrown. 207 | 1. Verify that the project is associated with a valid billing account. 208 | 1. Make sure to have the right permissions for the GCP account to create above GCP/Kubernetes Engine resources in project. Otherwise, permission denied error is thrown. 209 | 1. Make sure that the deployments created through install script are deleted before you try to re-install the resources. Otherwise, resources will not be installed properly. 210 | 1. If there are any errors in cleanup script execution, refer to steps for deleting resources manually. 211 | 212 | ## Deleting Resources Manually 213 | 1. Goto Kubernetes Engine -> services. Delete all the services created through install script. 214 | 1. Goto Network Services -> Load Balancing and delete the load balancers along with associated heathchecks. 215 | 1. Goto Compute Engine -> VM Instances and delete all the instances created through install script. 216 | 1. Goto Compute Engine -> Instance Groups and delete all the instance groups created through install script. 217 | 1. Goto VPC Networks -> Firewall Rules and delete the firewall rules created for network1. 218 | 1. Goto Deployment Manager -> Deployments and delete cluster and network deployments. 219 | 1. Delete the dependent resources if network deployment doesn't get deleted. 220 | 221 | ## Relevant Materials 222 | 223 | * [VPC Network Peering](https://cloud.google.com/vpc/docs/vpc-peering) 224 | * [Internal Load Balancing](https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing) 225 | * [Exposing an Application to External Traffic](https://cloud.google.com/kubernetes-engine/docs/how-to/exposing-apps) 226 | -------------------------------------------------------------------------------- /gke-to-gke-peering/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Copyright 2018 Google LLC 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # https://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | ### Deletes all the resources created as part of gke-to-gke-peering POC. 17 | 18 | dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) 19 | ROOT="$(dirname "${dir}")" 20 | 21 | #shellcheck disable=SC1090 22 | source "${ROOT}/verify-functions.sh" 23 | 24 | command -v gcloud >/dev/null 2>&1 || \ 25 | { echo >&2 "I require gcloud but it's not installed. Aborting.";exit 1; } 26 | 27 | command -v kubectl >/dev/null 2>&1 || \ 28 | { echo >&2 "I require kubectl but it's not installed. Aborting."; exit 1; } 29 | 30 | ### Obtain current active PROJECT_ID 31 | PROJECT_ID=$(gcloud config get-value project) 32 | if [ -z "${PROJECT_ID}" ] 33 | then echo >&2 "I require default project is set but it's not. Aborting."; exit 1; 34 | fi 35 | 36 | ### Delete cluster1 services 37 | if cluster_running "${PROJECT_ID}" "cluster-deployment-cluster1"; then 38 | gcloud container clusters get-credentials cluster-deployment-cluster1 \ 39 | --zone us-east1-d 40 | kubectl config set-context "$(kubectl config current-context)" --namespace=default 41 | kubectl delete -f "${ROOT}"/manifests/lb-svc.yaml --cascade --grace-period 10 42 | kubectl delete -f "${ROOT}"/manifests/nodeport-svc.yaml 43 | kubectl delete -f "${ROOT}"/manifests/cluster-ip-svc.yaml 44 | kubectl delete -f "${ROOT}"/manifests/run-my-nginx.yaml 45 | kubectl delete -f "${ROOT}"/manifests/ilb-svc.yaml --cascade --grace-period 10 46 | fi 47 | 48 | ### Delete cluster2 services 49 | if cluster_running "${PROJECT_ID}" "cluster-deployment-cluster2"; then 50 | gcloud container clusters get-credentials cluster-deployment-cluster2 \ 51 | --zone us-central1-b 52 | kubectl config set-context "$(kubectl config current-context)" --namespace=default 53 | kubectl delete -f "${ROOT}"/manifests/lb-svc.yaml --cascade --grace-period 10 54 | kubectl delete -f "${ROOT}"/manifests/nodeport-svc.yaml 55 | kubectl delete -f "${ROOT}"/manifests/cluster-ip-svc.yaml 56 | kubectl delete -f "${ROOT}"/manifests/run-my-nginx.yaml 57 | kubectl delete -f "${ROOT}"/manifests/ingress-svc.yaml --cascade --grace-period 10 58 | fi 59 | 60 | ### Delete cluster3 services 61 | if cluster_running "${PROJECT_ID}" "cluster-deployment-cluster3"; then 62 | gcloud container clusters get-credentials cluster-deployment-cluster3 \ 63 | --zone us-east1-c 64 | kubectl config set-context "$(kubectl config current-context)" --namespace=default 65 | kubectl delete -f "${ROOT}"/manifests/lb-svc.yaml --cascade --grace-period 10 66 | kubectl delete -f "${ROOT}"/manifests/nodeport-svc.yaml 67 | kubectl delete -f "${ROOT}"/manifests/cluster-ip-svc.yaml 68 | kubectl delete -f "${ROOT}"/manifests/run-my-nginx.yaml 69 | kubectl delete -f "${ROOT}"/manifests/ilb-svc.yaml --cascade --grace-period 10 70 | fi 71 | 72 | ### Delete cluster4 services 73 | if cluster_running "${PROJECT_ID}" "cluster-deployment-cluster4"; then 74 | gcloud container clusters get-credentials cluster-deployment-cluster4 \ 75 | --zone us-central1-c 76 | kubectl config set-context "$(kubectl config current-context)" --namespace=default 77 | kubectl delete -f "${ROOT}"/manifests/lb-svc.yaml --cascade --grace-period 10 78 | kubectl delete -f "${ROOT}"/manifests/nodeport-svc.yaml 79 | kubectl delete -f "${ROOT}"/manifests/cluster-ip-svc.yaml 80 | kubectl delete -f "${ROOT}"/manifests/run-my-nginx.yaml 81 | kubectl delete -f "${ROOT}"/manifests/ingress-svc.yaml --cascade --grace-period 10 82 | fi 83 | 84 | ### wait for all service related backends to get deleted. 85 | ### Otherwise, deletion of network deployments fails with dependent resources. 86 | if backends_exists "${PROJECT_ID}" "k8s-ig"; then 87 | echo "Service related backends have been removed" 88 | fi 89 | 90 | ### Delete clusters 91 | deployment_exists "${PROJECT_ID}" "cluster-deployment" 92 | ## deployment_exists with output 2 = there are no traces of the specific deployment in the deployment manager, hence no need to attempt deletion 93 | if [ $? -ne 2 ]; then 94 | deployment_deletes "${PROJECT_ID}" "cluster-deployment" 95 | fi 96 | 97 | ### Delete VPC peering connections 98 | if network_peering_exists "${PROJECT_ID}" "network1"; then 99 | network_peering_deletes "${PROJECT_ID}" "network1" "peer-network1-to-network2" 100 | fi 101 | 102 | if network_peering_exists "${PROJECT_ID}" "network2"; then 103 | network_peering_deletes "${PROJECT_ID}" "network2" "peer-network2-to-network1" 104 | fi 105 | 106 | ### Delete network 107 | deployment_exists "${PROJECT_ID}" "network-deployment" 108 | if [ $? -ne 2 ]; then 109 | deployment_deletes "${PROJECT_ID}" "network-deployment" 110 | fi 111 | -------------------------------------------------------------------------------- /gke-to-gke-peering/gke-to-gke-peering-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GoogleCloudPlatform/gke-networking-demos/a2f141350ec649fe3c1da79407206f1f6b8c18e0/gke-to-gke-peering/gke-to-gke-peering-architecture.png -------------------------------------------------------------------------------- /gke-to-gke-peering/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | # Copyright 2018 Google LLC 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # https://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | ### Creates GCP/GKE resources for GKE-to-GKE-communication-through-VPC-Peering 18 | ### demo. 19 | ### Refer to https://cloud.google.com/sdk/gcloud/ for usage of gcloud 20 | ### Deployment manager templates, gcloud and kubectl commands are used. 21 | 22 | dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) 23 | ROOT="$(dirname "${dir}")" 24 | 25 | #shellcheck disable=SC1090 26 | source "${ROOT}/verify-functions.sh" 27 | 28 | command -v gcloud >/dev/null 2>&1 || \ 29 | { echo >&2 "I require gcloud but it's not installed. Aborting.";exit 1; } 30 | 31 | command -v kubectl >/dev/null 2>&1 || \ 32 | { echo >&2 "I require kubectl but it's not installed. Aborting."; exit 1; } 33 | 34 | ### Obtain current active PROJECT_ID 35 | PROJECT_ID=$(gcloud config get-value project) 36 | if [ -z "${PROJECT_ID}" ] 37 | then echo >&2 "I require default project is set but it's not. Aborting."; exit 1; 38 | fi 39 | 40 | ### Ensure that the Forwarding rules quota is met 41 | if ! meets_quota "${PROJECT_ID}" "FORWARDING_RULES" 8; then 42 | echo "Refer to https://cloud.google.com/compute/quotas" 43 | echo "Terminating..." 44 | exit 1 45 | fi 46 | 47 | ### Ensure that the In-use IP addresses global quota is met 48 | if ! meets_quota "${PROJECT_ID}" "IN_USE_ADDRESSES" 6; then 49 | echo "Refer to https://cloud.google.com/compute/quotas" 50 | echo "Terminating..." 51 | exit 1 52 | fi 53 | 54 | ### Ensure that the Backend services quota is met 55 | if ! meets_quota "${PROJECT_ID}" "BACKEND_SERVICES" 4; then 56 | echo "Refer to https://cloud.google.com/compute/quotas" 57 | echo "Terminating..." 58 | exit 1 59 | fi 60 | 61 | ### Ensure that the Firewall rules quota is met 62 | if ! meets_quota "${PROJECT_ID}" "FIREWALLS" 42; then 63 | echo "Refer to https://cloud.google.com/compute/quotas" 64 | echo "Terminating..." 65 | exit 1 66 | fi 67 | 68 | ### enable required service apis in the project 69 | gcloud services enable \ 70 | compute.googleapis.com \ 71 | deploymentmanager.googleapis.com 72 | 73 | ### create networks and subnets 74 | if ! deployment_exists "${PROJECT_ID}" "network-deployment"; then 75 | gcloud deployment-manager deployments create network-deployment \ 76 | --config "${ROOT}"/network/network.yaml 77 | fi 78 | 79 | ### create clusters 80 | if ! deployment_exists "${PROJECT_ID}" "cluster-deployment"; then 81 | gcloud deployment-manager deployments create cluster-deployment \ 82 | --config "${ROOT}"/clusters/cluster.yaml 83 | fi 84 | 85 | ### create VPC peering connections between network1 & network2 86 | gcloud compute networks peerings create peer-network1-to-network2 \ 87 | --network network1 --peer-network network2 --auto-create-routes 88 | 89 | gcloud compute networks peerings create peer-network2-to-network1 \ 90 | --network network2 --peer-network network1 --auto-create-routes 91 | 92 | ### Fetch cluster1 credentials, deploy nginx pods in cluster1 and create services 93 | gcloud container clusters get-credentials cluster-deployment-cluster1 \ 94 | --zone us-east1-d 95 | kubectl config set-context "$(kubectl config current-context)" --namespace=default 96 | kubectl apply -f "${ROOT}"/manifests/run-my-nginx.yaml 97 | kubectl apply -f "${ROOT}"/manifests/cluster-ip-svc.yaml 98 | kubectl apply -f "${ROOT}"/manifests/nodeport-svc.yaml 99 | kubectl apply -f "${ROOT}"/manifests/lb-svc.yaml 100 | kubectl apply -f "${ROOT}"/manifests/ilb-svc.yaml 101 | 102 | ### Fetch cluster2 credentials, deploy nginx pods in cluster2 and create services 103 | gcloud container clusters get-credentials cluster-deployment-cluster2 \ 104 | --zone us-central1-b 105 | kubectl config set-context "$(kubectl config current-context)" --namespace=default 106 | kubectl apply -f "${ROOT}"/manifests/run-my-nginx.yaml 107 | kubectl apply -f "${ROOT}"/manifests/cluster-ip-svc.yaml 108 | kubectl apply -f "${ROOT}"/manifests/nodeport-svc.yaml 109 | kubectl apply -f "${ROOT}"/manifests/lb-svc.yaml 110 | kubectl apply -f "${ROOT}"/manifests/ingress-svc.yaml 111 | 112 | ### Fetch cluster3 credentials, deploy nginx pods in cluster3 and create services 113 | gcloud container clusters get-credentials cluster-deployment-cluster3 \ 114 | --zone us-east1-c 115 | kubectl config set-context "$(kubectl config current-context)" --namespace=default 116 | kubectl apply -f "${ROOT}"/manifests/run-my-nginx.yaml 117 | kubectl apply -f "${ROOT}"/manifests/cluster-ip-svc.yaml 118 | kubectl apply -f "${ROOT}"/manifests/nodeport-svc.yaml 119 | kubectl apply -f "${ROOT}"/manifests/lb-svc.yaml 120 | kubectl apply -f "${ROOT}"/manifests/ilb-svc.yaml 121 | 122 | ### Fetch cluster4 credentials, deploy nginx pods in cluster4 and create services 123 | gcloud container clusters get-credentials cluster-deployment-cluster4 \ 124 | --zone us-central1-c 125 | kubectl config set-context "$(kubectl config current-context)" --namespace=default 126 | kubectl apply -f "${ROOT}"/manifests/run-my-nginx.yaml 127 | kubectl apply -f "${ROOT}"/manifests/cluster-ip-svc.yaml 128 | kubectl apply -f "${ROOT}"/manifests/nodeport-svc.yaml 129 | kubectl apply -f "${ROOT}"/manifests/lb-svc.yaml 130 | kubectl apply -f "${ROOT}"/manifests/ingress-svc.yaml 131 | -------------------------------------------------------------------------------- /gke-to-gke-peering/validate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # Copyright 2018 Google LLC 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # https://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # bash "strict-mode", fail immediately if there is a problem 17 | set -o nounset 18 | set -o pipefail 19 | 20 | dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) 21 | ROOT="$(dirname "${dir}")" 22 | 23 | #shellcheck disable=SC1090 24 | source "${ROOT}/verify-functions.sh" 25 | 26 | PROJECT_ID=$(gcloud config get-value project) 27 | if [ -z "${PROJECT_ID}" ] 28 | then echo >&2 "I require default project is set but it's not. Aborting."; exit 1; 29 | fi 30 | 31 | ### Obtain Cluster Zone 32 | CLUSTER1_ZONE=$(gcloud container clusters list \ 33 | --filter="name=cluster-deployment-cluster1" --format "value(zone)") 34 | CLUSTER2_ZONE=$(gcloud container clusters list \ 35 | --filter="name=cluster-deployment-cluster2" --format "value(zone)") 36 | CLUSTER3_ZONE=$(gcloud container clusters list \ 37 | --filter="name=cluster-deployment-cluster3" --format "value(zone)") 38 | CLUSTER4_ZONE=$(gcloud container clusters list \ 39 | --filter="name=cluster-deployment-cluster4" --format "value(zone)") 40 | 41 | CLUSTER1_CONTEXT="gke_${PROJECT_ID}_${CLUSTER1_ZONE}_cluster-deployment-cluster1" 42 | CLUSTER2_CONTEXT="gke_${PROJECT_ID}_${CLUSTER2_ZONE}_cluster-deployment-cluster2" 43 | CLUSTER3_CONTEXT="gke_${PROJECT_ID}_${CLUSTER3_ZONE}_cluster-deployment-cluster3" 44 | CLUSTER4_CONTEXT="gke_${PROJECT_ID}_${CLUSTER4_ZONE}_cluster-deployment-cluster4" 45 | 46 | ### Ensure that the Networks exists 47 | if ! network_exists "${PROJECT_ID}" "network1" || \ 48 | ! network_exists "${PROJECT_ID}" "network2"; then 49 | echo "Network is missing" 50 | echo "Terminating..." 51 | exit 1 52 | fi 53 | 54 | ### Ensure that the Subnet range is correct 55 | if ! verify_cidr_range "${PROJECT_ID}" "subnet1-us-east1" "10.1.0.0/28"; then 56 | echo "Subnet ip range is incorrect" 57 | echo "Terminating..." 58 | exit 1 59 | fi 60 | 61 | ### Ensure that the Subnet range is correct 62 | if ! verify_cidr_range "${PROJECT_ID}" "subnet2-us-central1" "10.2.0.0/28"; then 63 | echo "Subnet ip range is incorrect" 64 | echo "Terminating..." 65 | exit 1 66 | fi 67 | 68 | ### Ensure that the Subnet range is correct 69 | if ! verify_cidr_range "${PROJECT_ID}" "subnet3-us-east1" "10.11.0.0/28"; then 70 | echo "Subnet ip range is incorrect" 71 | echo "Terminating..." 72 | exit 1 73 | fi 74 | 75 | ### Ensure that the Subnet range is correct 76 | if ! verify_cidr_range "${PROJECT_ID}" "subnet4-us-central1" "10.12.0.0/28"; then 77 | echo "Subnet ip range is incorrect" 78 | echo "Terminating..." 79 | exit 1 80 | fi 81 | 82 | ### Ensure that VPC peering exists 83 | if ! network_peering_exists "${PROJECT_ID}" "network1"; then 84 | echo "Peering does not exist" 85 | echo "Terminating..." 86 | exit 1 87 | fi 88 | 89 | if ! network_peering_exists "${PROJECT_ID}" "network2"; then 90 | echo "Peering does not exist" 91 | echo "Terminating..." 92 | exit 1 93 | fi 94 | 95 | ### Ensure that the clusters are running 96 | for (( c=1; c<=4; c++ )) 97 | do 98 | if ! cluster_running "${PROJECT_ID}" "cluster-deployment-cluster$c"; then 99 | echo "cluster$c is missing or is not running" 100 | echo "Terminating..." 101 | exit 1 102 | fi 103 | done 104 | 105 | ### Check external nginx service ips for cluster1 106 | if ! access_service "${PROJECT_ID}" "${CLUSTER1_CONTEXT}" "my-nginx-lb"; then 107 | echo "Service ip is not available" 108 | echo "Terminating..." 109 | exit 1 110 | fi 111 | 112 | ### Check internal nginx service ips for cluster1 113 | if ! access_service "${PROJECT_ID}" "${CLUSTER1_CONTEXT}" "my-nginx-ilb"; then 114 | echo "Service ip is not available" 115 | echo "Terminating..." 116 | exit 1 117 | fi 118 | 119 | ### Check external nginx service ips for cluster2 120 | if ! access_service "${PROJECT_ID}" "${CLUSTER2_CONTEXT}" "my-nginx-lb"; then 121 | echo "Service ip is not available" 122 | echo "Terminating..." 123 | exit 1 124 | fi 125 | 126 | ### Check external nginx service ips for cluster3 127 | if ! access_service "${PROJECT_ID}" "${CLUSTER3_CONTEXT}" "my-nginx-lb"; then 128 | echo "Service ip is not available" 129 | echo "Terminating..." 130 | exit 1 131 | fi 132 | 133 | ### Check internal nginx service ips for cluster3 134 | if ! access_service "${PROJECT_ID}" "${CLUSTER3_CONTEXT}" "my-nginx-ilb"; then 135 | echo "Service ip is not available" 136 | echo "Terminating..." 137 | exit 1 138 | fi 139 | 140 | ### Check external nginx service ips for cluster4 141 | if ! access_service "${PROJECT_ID}" "${CLUSTER4_CONTEXT}" "my-nginx-lb"; then 142 | echo "Service ip is not available" 143 | echo "Terminating..." 144 | exit 1 145 | fi 146 | -------------------------------------------------------------------------------- /gke-to-gke-vpn/README-QWIKLABS.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Engine Networking Demo 2 | ## Kuberenetes Engine Communication Through VPN 3 | ## Table of Contents 4 | 5 | * [Introduction](#introduction) 6 | * [Architecture](#architecture) 7 | * [GCP Network network1](#gcp-network-network1) 8 | * [Kubernetes Engine Cluster 1](#gke-cluster-1) 9 | * [Kubernetes Engine Cluster 2](#gke-cluster-2) 10 | * [Other Resources](#other-resources) 11 | * [GCP Network network2](#gcp-network-network2) 12 | * [Kubernetes Engine Cluster 3](#gke-cluster-3) 13 | * [Kubernetes Engine Cluster 4](#gke-cluster-4) 14 | * [Other Resources](#other-resources-1) 15 | * [Notes](#notes) 16 | * [Deployment Steps](#deployment-steps) 17 | * [Validation](#validation) 18 | * [Verify the pod-to-service communication](#verify-the-pod-to-service-communication) 19 | * [Tear Down](#tear-down) 20 | * [Troubleshooting](#troubleshooting) 21 | * [Deleting Resources Manually](#deleting-resources-manually) 22 | * [Relevant Materials](#relevant-materials) 23 | 24 | 25 | ## Introduction 26 | 27 | Google Cloud networking with Kubernetes Engine clusters can be 28 | complex. This project strives to simplify the best practices for exposing cluster 29 | services to other clusters and establishing network links between Kubernetes Engine clusters 30 | running in separate projects or between a Kubernetes Engine cluster and a cluster running 31 | in an on-premises datacenter. 32 | 33 | The code contains a set of Deployment Manager templates that allows a user to 34 | create networks, subnets, and Kubernetes Engine clusters. This project demonstrates the 35 | following best practices. 36 | 37 | 1. Network design of launching Kubernetes Engine clusters in custom networks. 38 | 1. Assigning node CIDR, container CIDR and service CIDR for Kubernetes Engine clusters. 39 | 1. IP range management. 40 | 1. Exposing pods of Kubernetes Engine clusters over networks connected using VPN. 41 | 42 | This example also includes Kubernetes manifests for: 43 | 44 | 1. Deploying Nginx pods in clusters. 45 | 1. Exposing Nginx pods of clusters with different types of services like cluster 46 | IP, nodeport, internal load balancer, Network Load Balancer and Ingress. 47 | 1. Validating pod-to-service communication over networks connected using VPN. 48 | 49 | ## Architecture 50 | 51 | The execution of this demo in the GCP environment creates two custom GCP networks. Each network will have two subnets one in the us-west1 region and the other in the us-central1 region. Each of the subnets hosts a Kubernetes Engine cluster which has nginx pods and services to expose those pods across other clusters. Both networks are connected using VPN. Kubernetes Engine internal load balancers are regional services. VPN gateway per region is needed to reach ILB services in that region. Hence four VPN gateways are created in both projects. Please refer to https://cloud.google.com/compute/docs/load-balancing/internal/#global_routing_issueVPN for more details. 52 | 53 | In this project, we are using route-based VPN over policy-based VPN to establish pod-to-service communication. In the VPN tunnel configuration, node CIDR, pod CIDR and service CIDR's from peer remote network need to be added so that nodes, pods and services can reach exposed services from other clusters. 54 | 55 | ![Kuberenetes Engine-Communication-Through-VPN](gke-to-gke-vpn-architecture.png) 56 | 57 | Below is the detailed overview of GCP resources which will be created. 58 | 59 | ### GCP Network 1 60 | #### Kubernetes Engine Cluster 1 61 | 1. Subnet: subnet1-us-west1 (10.1.0.0/28) 62 | 63 | |cluster-ipv4-cidr|service-ipv4-cidr|zone|Initial Node count|Node Image 64 | |---|---|---|---|---| 65 | |10.108.0.0/19|10.208.0.0/20|us-west1-b|3|COS 66 | 67 | #### Kubernetes Engine Cluster 2 68 | 1. Subnet: subnet1-us-central1 (10.2.0.0/28) 69 | 70 | |cluster-ipv4-cidr|service-ipv4-cidr|zone|Initial Node count|Node Image 71 | |---|---|---|---|---| 72 | |10.118.0.0/19|10.218.0.0/20|us-central1-b|3|COS 73 | 74 | #### Other Resources 75 | 1. Cluster IP, Nodeport, ILB, LB and Ingress services to expose pods in each of 76 | those clusters. 77 | 1. VPN gateways 78 | 79 | |Gateway name|Google IP address|Network|Region|Tunnels 80 | |---|---|---|---|---| 81 | |vpn1-deployment-gateway|x.x.x.x|network1|us-west1|vpn1-deployment-tunnel| 82 | |vpn2-deployment-gateway|x.x.x.x|network1|us-east1|vpn2-deployment-tunnel| 83 | 84 | 1. VPN Tunnels 85 | 86 | |Tunnel name|Status|Google gateway|Google IP address|Google network|Region|Peer IP address|Routing type 87 | |---|---|---|---|---|---|---|---| 88 | |vpn1-deployment-tunnel|Established|vpn1-deployment-gateway|x.x.x.x|network1|us-west1|vpn3-static-ip|Route-based| 89 | |vpn2-deployment-tunnel|Established|vpn2-deployment-gateway|x.x.x.x|network1|us-east1|vpn4-static-ip|Route-based| 90 | 91 | ### GCP Network 2 92 | #### Kubernetes Engine Cluster 3 93 | 94 | 1. Subnet: subnet3-us-west1 (10.11.0.0/28) 95 | 96 | |cluster-ipv4-cidr|service-ipv4-cidr|zone|Initial Node count|Node Image 97 | |---|---|---|---|---| 98 | |10.128.0.0/19|10.228.0.0/20|us-west1-c|3|COS| 99 | 100 | #### Kubernetes Engine Cluster 4 101 | 1. Subnet: subnet4-us-central1 (10.12.0.0/28) 102 | 103 | |cluster-ipv4-cidr|service-ipv4-cidr|zone|Initial Node count|Node Image 104 | |---|---|---|---|---| 105 | |10.138.0.0/19|10.238.0.0/20|us-central1-c|3|COS 106 | 107 | #### Other Resources 108 | 1. Cluster IP, Nodeport, ILB, LB and Ingress services to expose pods in each of 109 | those clusters. 110 | 1. VPN gateways 111 | 112 | |Gateway name|Google IP address|Network|Region|Tunnels| 113 | |---|---|---|---|---| 114 | |vpn3-deployment-gateway|x.x.x.x|network2|us-west1|vpn3-deployment-tunnel| 115 | |vpn4-deployment-gateway|x.x.x.x|network2|us-east1|vpn4-deployment-tunnel| 116 | 117 | 1. VPN Tunnels 118 | 119 | |Tunnel name|Status|Google gateway|Google IP address|Google network|Region|Peer IP address|Routing type| 120 | |---|---|---|---|---|---|---|---| 121 | |vpn3-deployment-tunnel|Established|vpn3-deployment-gateway|x.x.x.x|network2|us-west1|vpn1-static-ip|Route-based| 122 | |vpn4-deployment-tunnel|Established|vpn4-deployment-gateway|x.x.x.x|network2|us-east1|vpn2-static-ip|Route-based| 123 | 124 | ### Notes 125 | 1. Region for subnets and Node CIDR can be customized in /network/network.yaml. 126 | 1. Cluster attributes like zone, image, node count, cluster CIDR and service CIDR can be customized in clusters/cluster.yaml. 127 | 1. To add additional custom attributes to network or clusters, yaml files (*.yaml) and deployment manager scripts (*.py) at "/network/" or "/clusters/" need to be updated accordingly. 128 | 129 | ## Deployment Steps 130 | 131 | The following steps will allow a user to run this demo. 132 | 133 | 1. Change directory to `gke-to-gke-vpn` 134 | 1. Run `./install.sh` 135 | 136 | ## Validation 137 | 1. Make sure that there are no errors in the install script execution. 138 | 1. Login to GCP console. 139 | 1. Use the navigation menu, accessible at the top-left of the console, to select services in the following steps. 140 | ![Navigation Menu](../images/nav_menu_demo.png) 141 | 1. Select "VPC networks" and confirm that CIDR ranges of subnet1-us-west1 is 10.1.0.0/28 and subnet2-us-central1 is 10.2.0.0/28 142 | 143 | the specification. 144 | ![VPC Networks](../images/vpc_networks.png) 145 | 1. Select "Compute Engine"-> VM instances and see that the cluster VM instances are are drawn from the subnet's CIDR ranges. 146 | ![VM IPs](../images/vm_internal_ips.png) 147 | 1. Select "Kubernetes Engine"->"cluster1" and see that "Container address range" matches the diagram (10.108.0.0/19). Repeat for the other three clusters: 148 | ![VM IPs](../images/cluster_details.png) 149 | * Repeat for the other three clusters: 150 | * cluster2: 10.118.0.0/19 151 | * cluster3: 10.128.0.0/19 152 | * cluster4: 10.138.0.0/19 153 | 1. Select "Kubernetes Engine"-> "Workloads" and verify that the status is OK for nginx pods. 154 | ![Workloads](../images/workloads.png) 155 | 1. Select "Kubernetes Engine" -> "Services" and see that the cluster ip, nodeport, ILB and LB are created for cluster1 and that cluster IP address of all the services for a cluster are drawn the service ipv4 CIDR range 156 | ![Services](../images/services.png) 157 | 1. Try to access the IP of the external load balancer to view the nginx pods. The external IP 158 | will be displayed in the "my-nginx-lb" row: 159 | ![Nginx External IP](../images/nginx_external_ip.png) 160 | ![Nginx Default Page](../images/nginx.png) 161 | 1. Change directory to `gke-to-gke-vpn` 162 | 1. Run `./validate.sh` 163 | 164 | ## Verify the pod-to-service communication 165 | 1. Clusters in the same region communicate through the internal load balancer. 166 | 1. Clusters across the different regions communicate through the global load balancer, unless they are peered via VPN. When peered via VPN, clusters can still communicate via internal load balancers. 167 | 1. All the services created to expose pods in a cluster are accessible to pods within that cluster. 168 | 1. Refer to validate-pod-to-service-communication.sh script to view the commands to verify pod to service communication. 169 | 1. Change directory back to project root. Run `./validate-pod-to-service-communication.sh` located in the project root directory 170 | 1. The above script demonstrates how the pods in cluster1 can access the local Kubernetes Engine services and the other Kubernetes Engine Internal/External load balancer services from the same or different regions. 171 | 172 | ## Tear Down 173 | 174 | 1. Change directory to `gke-to-gke-vpn` 175 | 1. Run `./cleanup.sh` 176 | 1. Verify that the script executed with no errors. 177 | 1. Verify that all the resources created are deleted. 178 | 179 | 180 | ## Troubleshooting 181 | 182 | 1. Remember to enable API's as mentioned in deployment steps where the resources are to be created. Otherwise, API not enabled error is thrown. 183 | 1. Make sure to have the right permissions for the GCP account to create above GCP/Kubernetes Engine resources. Otherwise, permission denied error is thrown. 184 | 1. Make sure that the deployments created through install script are deleted before you try to re-install the resources. Otherwise, resources will not be installed properly. 185 | 1. If there are any errors in cleanup script execution, refer to steps for deleting resources manually. 186 | 187 | ## Deleting Resources Manually 188 | 1. Select the project in GCP cloud console. 189 | 1. Goto Kubernetes Engine -> services. Delete all the services created through install script. 190 | 1. Goto Network Services -> Load Balancing and delete the load balancers along with associated heathchecks. 191 | 1. Goto Compute Engine -> VM Instances and delete all the instances created through install script. 192 | 1. Goto Compute Engine -> Instance Groups and delete all the instance groups created through install script. 193 | 1. Goto VPC Networks -> Firewall Rules and delete the firewall rules created for network1. 194 | 1. Goto Deployment Manager -> Deployments and delete vpn, static-ip, cluster and network deployments in the same order. 195 | 1. Delete the dependent resources if network deployment doesn't get deleted. 196 | 197 | ## Relevant Materials 198 | 199 | * [Cloud VPN Overview](https://cloud.google.com/vpn/docs/concepts/overview) 200 | * [Internal Load Balancing](https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing) 201 | * [Exposing an Application to External Traffic](https://cloud.google.com/kubernetes-engine/docs/how-to/exposing-apps) 202 | -------------------------------------------------------------------------------- /gke-to-gke-vpn/README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Engine Networking Demo 2 | ## Kubernetes Engine Communication Through VPN 3 | ## Table of Contents 4 | 5 | * [Introduction](#introduction) 6 | * [Architecture](#architecture) 7 | * [GCP Network network1](#gcp-network-network1) 8 | * [Kubernetes Engine Cluster 1](#gke-cluster-1) 9 | * [Kubernetes Engine Cluster 2](#gke-cluster-2) 10 | * [Other Resources](#other-resources) 11 | * [GCP Network network2](#gcp-network-network2) 12 | * [Kubernetes Engine Cluster 3](#gke-cluster-3) 13 | * [Kubernetes Engine Cluster 4](#gke-cluster-4) 14 | * [Other Resources](#other-resources-1) 15 | * [Notes](#notes) 16 | * [Prerequisites](#prerequisites) 17 | * [Run Demo in a Google Cloud Shell](#run-demo-in-a-google-cloud-shell) 18 | * [Supported Operating Systems](#supported-operating-systems) 19 | * [Tools](#tools) 20 | * [Versions](#versions) 21 | * [Setup](#setup) 22 | * [Directory Structure](#directory-structure) 23 | * [Deployment Steps](#deployment-steps) 24 | * [Validation](#validation) 25 | * [Verify the pod-to-service communication](#verify-the-pod-to-service-communication) 26 | * [Tear Down](#tear-down) 27 | * [Troubleshooting](#troubleshooting) 28 | * [Deleting Resources Manually](#deleting-resources-manually) 29 | * [Relevant Materials](#relevant-materials) 30 | 31 | 32 | ## Introduction 33 | 34 | Google Cloud networking with Kubernetes Engine clusters can be 35 | complex. This project strives to simplify the best practices for exposing cluster 36 | services to other clusters and establishing network links between Kubernetes Engine clusters 37 | running in separate projects or between a Kubernetes Engine cluster and a cluster running 38 | in an on-premises datacenter. 39 | 40 | The code contains a set of Deployment Manager templates that allows a user to 41 | create networks, subnets, and Kubernetes Engine clusters. This project demonstrates the 42 | following best practices. 43 | 44 | 1. Network design of launching Kubernetes Engine clusters in custom networks. 45 | 1. Assigning node CIDR, container CIDR and service CIDR for Kubernetes Engine clusters. 46 | 1. IP range management. 47 | 1. Exposing pods of Kubernetes Engine clusters over networks connected using VPN. 48 | 49 | This example also includes Kubernetes manifests for: 50 | 51 | 1. Deploying Nginx pods in clusters. 52 | 1. Exposing Nginx pods of clusters with different types of services like cluster 53 | IP, nodeport, internal load balancer, Network Load Balancer and Ingress. 54 | 1. Validating pod-to-service communication over networks connected using VPN. 55 | 56 | ## Architecture 57 | 58 | The execution of this demo in the GCP environment creates two custom GCP networks. Each network will have two subnets one in the us-west1 region and the other in the us-central1 region. Each of the subnets hosts a Kubernetes Engine cluster which has nginx pods and services to expose those pods across other clusters. Both networks are connected using VPN. Kubernetes Engine internal load balancers are regional services. VPN gateway per region is needed to reach ILB services in that region. Hence four VPN gateways are created in both projects. Please refer to https://cloud.google.com/compute/docs/load-balancing/internal/#global_routing_issueVPN for more details. 59 | 60 | In this project, we are using route-based VPN over policy-based VPN to establish pod-to-service communication. In the VPN tunnel configuration, node CIDR, pod CIDR and service CIDR's from peer remote network need to be added so that nodes, pods and services can reach exposed services from other clusters. 61 | 62 | ![Kuberenetes Engine-Communication-Through-VPN](gke-to-gke-vpn-architecture.png) 63 | 64 | Below is the detailed overview of GCP resources which will be created. 65 | 66 | ### GCP Network 1 67 | #### Kubernetes Engine Cluster 1 68 | 1. Subnet: subnet1-us-west1 (10.1.0.0/28) 69 | 70 | |cluster-ipv4-cidr|service-ipv4-cidr|zone|Initial Node count|Node Image 71 | |---|---|---|---|---| 72 | |10.108.0.0/19|10.208.0.0/20|us-west1-b|3|COS 73 | 74 | #### Kubernetes Engine Cluster 2 75 | 1. Subnet: subnet1-us-central1 (10.2.0.0/28) 76 | 77 | |cluster-ipv4-cidr|service-ipv4-cidr|zone|Initial Node count|Node Image 78 | |---|---|---|---|---| 79 | |10.118.0.0/19|10.218.0.0/20|us-central1-b|3|COS 80 | 81 | #### Other Resources 82 | 1. Cluster IP, Nodeport, ILB, LB and Ingress services to expose pods in each of 83 | those clusters. 84 | 1. VPN gateways 85 | 86 | |Gateway name|Google IP address|Network|Region|Tunnels 87 | |---|---|---|---|---| 88 | |vpn1-deployment-gateway|x.x.x.x|network1|us-west1|vpn1-deployment-tunnel| 89 | |vpn2-deployment-gateway|x.x.x.x|network1|us-central1|vpn2-deployment-tunnel| 90 | 91 | 1. VPN Tunnels 92 | 93 | |Tunnel name|Status|Google gateway|Google IP address|Google network|Region|Peer IP address|Routing type 94 | |---|---|---|---|---|---|---|---| 95 | |vpn1-deployment-tunnel|Established|vpn1-deployment-gateway|x.x.x.x|network1|us-west1|vpn3-static-ip|Route-based| 96 | |vpn2-deployment-tunnel|Established|vpn2-deployment-gateway|x.x.x.x|network1|us-central1|vpn4-static-ip|Route-based| 97 | 98 | ### GCP Network 2 99 | #### Kubernetes Engine Cluster 3 100 | 101 | 1. Subnet: subnet3-us-west1 (10.11.0.0/28) 102 | 103 | |cluster-ipv4-cidr|service-ipv4-cidr|zone|Initial Node count|Node Image 104 | |---|---|---|---|---| 105 | |10.128.0.0/19|10.228.0.0/20|us-west1-c|3|COS| 106 | 107 | #### Kubernetes Engine Cluster 4 108 | 1. Subnet: subnet4-us-central1 (10.12.0.0/28) 109 | 110 | |cluster-ipv4-cidr|service-ipv4-cidr|zone|Initial Node count|Node Image 111 | |---|---|---|---|---| 112 | |10.138.0.0/19|10.238.0.0/20|us-central1-c|3|COS| 113 | 114 | #### Other Resources 115 | 1. Cluster IP, Nodeport, ILB, LB and Ingress services to expose pods in each of 116 | those clusters. 117 | 1. VPN gateways 118 | 119 | |Gateway name|Google IP address|Network|Region|Tunnels| 120 | |---|---|---|---|---| 121 | |vpn3-deployment-gateway|x.x.x.x|network2|us-west1|vpn3-deployment-tunnel| 122 | |vpn4-deployment-gateway|x.x.x.x|network2|us-central1|vpn4-deployment-tunnel| 123 | 124 | 1. VPN Tunnels 125 | 126 | |Tunnel name|Status|Google gateway|Google IP address|Google network|Region|Peer IP address|Routing type| 127 | |---|---|---|---|---|---|---|---| 128 | |vpn3-deployment-tunnel|Established|vpn3-deployment-gateway|x.x.x.x|network2|us-west1|vpn1-static-ip|Route-based| 129 | |vpn4-deployment-tunnel|Established|vpn4-deployment-gateway|x.x.x.x|network2|us-central1|vpn2-static-ip|Route-based| 130 | 131 | ### Notes 132 | 1. Region for subnets and Node CIDR can be customized in /network/network.yaml. 133 | 1. Cluster attributes like zone, image, node count, cluster CIDR and service CIDR can be customized in clusters/cluster.yaml. 134 | 1. To add additional custom attributes to network or clusters, yaml files (*.yaml) and deployment manager scripts (*.py) at "/network/" or "/clusters/" need to be updated accordingly. 135 | 136 | ## Prerequisites 137 | 138 | A Google Cloud account and project is required for this. 139 | 140 | Access to an existing Google Cloud project with the Kubernetes Engine service enabled 141 | If you do not have a Google Cloud account please signup for a free trial 142 | [here](https://cloud.google.com). 143 | 144 | ### Run Demo in a Google Cloud Shell 145 | 146 | Click the button below to run the demo in a [Google Cloud Shell](https://cloud.google.com/shell/docs/). 147 | 148 | [![Open in Cloud Shell](http://gstatic.com/cloudssh/images/open-btn.svg)](https://console.cloud.google.com/cloudshell/open?cloudshell_git_repo=https://github.com/GoogleCloudPlatform/gke-networking-demos.git&cloudshell_git_branch=master&cloudshell_working_dir=gke-to-gke-vpn&cloudshell_tutorial=README.md) 149 | 150 | All the tools for the demo are installed. When using Cloud Shell execute the following 151 | command in order to setup gcloud cli. 152 | 153 | ```console 154 | gcloud init 155 | ``` 156 | 157 | ### Supported Operating Systems 158 | 159 | This project will run on macOS, or in a [Google Cloud Shell](https://cloud.google.com/shell/docs/). 160 | 161 | ### Tools 162 | 163 | When not using Cloud Shell, the following tools are required. 164 | 165 | 1. [Terraform >= 0.12.3](https://www.terraform.io/downloads.html) 166 | 2. gcloud cli ( >= Google Cloud SDK 253.0.0 ) 167 | 3. bash 168 | 4. kubectl - ( >= v1.10.0-gke.0 ) 169 | 5. jq 170 | 171 | ### Versions 172 | 1. Kubernetes Engine >= 1.10.0-gke.0 173 | 174 | ### Setup 175 | 176 | 1. Pull the code from git repo. 177 | 1. Optionally, customize the configuration in .yaml files under /network/ or /clusters/ or /manifests/, if needed. 178 | 179 | ### Directory Structure 180 | 1. The root folder is the "Kubernetes Engine-networking-demos" folder. 181 | 1. The "network" folder contains the manifest files and Deployment Manager templates to setup networks. 182 | 1. The "clusters" folder contains the manifest files and Deployment Manager templates to create Kubernetes Engine clusters. 183 | 1. The "manifests" folder contains the manifest files to create Kubernetes Engine services. 184 | 185 | ## Deployment Steps 186 | 187 | The following steps will allow a user to run this demo. 188 | 189 | 1. Change directory to `gke-to-gke-vpn` 190 | 1. Run `./install.sh` 191 | 192 | ## Validation 193 | 1. Make sure that there are no errors in the install script execution. 194 | 1. Login to GCP console. 195 | 1. Use the navigation menu, accessible at the top-left of the console, to select services in the following steps. 196 | ![Navigation Menu](../images/nav_menu_demo.png) 197 | 1. Select "VPC networks" and confirm that CIDR ranges of subnet1-us-west1 is 10.1.0.0/28 and subnet2-us-central1 is 10.2.0.0/28 198 | the specification. 199 | ![VPC Networks](../images/vpc_networks.png) 200 | 1. Select "Compute Engine"-> VM instances and see that the cluster VM instances are are drawn from the subnet's CIDR ranges. 201 | ![VM IPs](../images/vm_internal_ips.png) 202 | 1. Select "Kubernetes Engine"->"cluster1" and see that "Container address range" matches the diagram (10.108.0.0/19). Repeat for the other three clusters: 203 | ![VM IPs](../images/cluster_details.png) 204 | * Repeat for the other three clusters: 205 | * cluster2: 10.118.0.0/19 206 | * cluster3: 10.128.0.0/19 207 | * cluster4: 10.138.0.0/19 208 | 1. Select "Kubernetes Engine"-> "Workloads" and verify that the status is OK for nginx pods. 209 | ![Workloads](../images/workloads.png) 210 | 1. Select "Kubernetes Engine" -> "Services" and see that the cluster ip, nodeport, ILB and LB are created for cluster1 and that cluster IP address of all the services for a cluster are drawn the service ipv4 CIDR range 211 | ![Services](../images/services.png) 212 | 1. Try to access the IP of the external load balancer to view the nginx pods. The external IP 213 | will be displayed in the "my-nginx-lb" row: 214 | ![Nginx External IP](../images/nginx_external_ip.png) 215 | ![Nginx Default Page](../images/nginx.png) 216 | 1. Change directory to `gke-to-gke-vpn` 217 | 1. Run `./validate.sh` 218 | 219 | ## Verify the pod-to-service communication 220 | 1. Clusters in the same region communicate through the internal load balancer. 221 | 1. Clusters across the different regions communicate through the global load balancer, unless they are peered via VPN. When peered via VPN, clusters can still communicate via internal load balancers. 222 | 1. All the services created to expose pods in a cluster are accessible to pods within that cluster. 223 | 1. Refer to validate-pod-to-service-communication.sh script to view the commands to verify pod to service communication. 224 | 1. Change directory back to project root. Run `./validate-pod-to-service-communication.sh` located in the project root directory 225 | 1. The above script demonstrates how the pods in cluster1 can access the local Kubernetes Engine services and the other Kubernetes Engine Internal/External load balancer services from the same or different regions. 226 | 227 | ## Tear Down 228 | 229 | 1. Change directory to `gke-to-gke-vpn` 230 | 1. Run `./cleanup.sh` 231 | 1. Verify that the script executed with no errors. 232 | 1. Verify that all the resources created are deleted. 233 | 234 | 235 | ## Troubleshooting 236 | 237 | 1. Remember to enable API's as mentioned in deployment steps where the resources are to be created. Otherwise, API not enabled error is thrown. 238 | 1. Make sure to have the right permissions for the GCP account to create above GCP/Kubernetes Engine resources. Otherwise, permission denied error is thrown. 239 | 1. Make sure that the deployments created through install script are deleted before you try to re-install the resources. Otherwise, resources will not be installed properly. 240 | 1. If there are any errors in cleanup script execution, refer to steps for deleting resources manually. 241 | 242 | ## Deleting Resources Manually 243 | 1. Select the project in GCP cloud console. 244 | 1. Goto Kubernetes Engine -> services. Delete all the services created through install script. 245 | 1. Goto Network Services -> Load Balancing and delete the load balancers along with associated heathchecks. 246 | 1. Goto Compute Engine -> VM Instances and delete all the instances created through install script. 247 | 1. Goto Compute Engine -> Instance Groups and delete all the instance groups created through install script. 248 | 1. Goto VPC Networks -> Firewall Rules and delete the firewall rules created for network1. 249 | 1. Goto Deployment Manager -> Deployments and delete vpn, static-ip, cluster and network deployments in the same order. 250 | 1. Delete the dependent resources if network deployment doesn't get deleted. 251 | 252 | ## Relevant Materials 253 | 254 | * [Cloud VPN Overview](https://cloud.google.com/vpn/docs/concepts/overview) 255 | * [Internal Load Balancing](https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing) 256 | * [Exposing an Application to External Traffic](https://cloud.google.com/kubernetes-engine/docs/how-to/exposing-apps) 257 | -------------------------------------------------------------------------------- /gke-to-gke-vpn/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Copyright 2018 Google LLC 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # https://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | ### Deletes all the resources created as part of gke-to-gke-vpn POC. 17 | 18 | dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) 19 | ROOT="$(dirname "${dir}")" 20 | 21 | #shellcheck disable=SC1090 22 | source "${ROOT}/verify-functions.sh" 23 | 24 | command -v gcloud >/dev/null 2>&1 || \ 25 | { echo >&2 "I require gcloud but it's not installed. Aborting.";exit 1; } 26 | 27 | command -v kubectl >/dev/null 2>&1 || \ 28 | { echo >&2 "I require kubectl but it's not installed. Aborting."; exit 1; } 29 | 30 | ### Obtain current active PROJECT_ID 31 | PROJECT_ID=$(gcloud config get-value project) 32 | if [ -z "${PROJECT_ID}" ] 33 | then echo >&2 "I require default project is set but it's not. Aborting."; exit 1; 34 | fi 35 | 36 | ### Delete cluster1 services 37 | if cluster_running "${PROJECT_ID}" "cluster-deployment-cluster1"; then 38 | gcloud container clusters get-credentials cluster-deployment-cluster1 \ 39 | --zone us-east1-d 40 | kubectl config set-context "$(kubectl config current-context)" --namespace=default 41 | kubectl delete -f "${ROOT}"/manifests/lb-svc.yaml --cascade --grace-period 10 42 | kubectl delete -f "${ROOT}"/manifests/nodeport-svc.yaml 43 | kubectl delete -f "${ROOT}"/manifests/cluster-ip-svc.yaml 44 | kubectl delete -f "${ROOT}"/manifests/run-my-nginx.yaml 45 | kubectl delete -f "${ROOT}"/manifests/ilb-svc.yaml --cascade --grace-period 10 46 | fi 47 | 48 | ### Delete cluster2 services 49 | if cluster_running "${PROJECT_ID}" "cluster-deployment-cluster2"; then 50 | gcloud container clusters get-credentials cluster-deployment-cluster2 \ 51 | --zone us-central1-b 52 | kubectl config set-context "$(kubectl config current-context)" --namespace=default 53 | kubectl delete -f "${ROOT}"/manifests/lb-svc.yaml --cascade --grace-period 10 54 | kubectl delete -f "${ROOT}"/manifests/nodeport-svc.yaml 55 | kubectl delete -f "${ROOT}"/manifests/cluster-ip-svc.yaml 56 | kubectl delete -f "${ROOT}"/manifests/run-my-nginx.yaml 57 | kubectl delete -f "${ROOT}"/manifests/ingress-svc.yaml --cascade --grace-period 10 58 | fi 59 | 60 | ### Delete cluster3 services 61 | if cluster_running "${PROJECT_ID}" "cluster-deployment-cluster3"; then 62 | gcloud container clusters get-credentials cluster-deployment-cluster3 \ 63 | --zone us-east1-c 64 | kubectl config set-context "$(kubectl config current-context)" --namespace=default 65 | kubectl delete -f "${ROOT}"/manifests/lb-svc.yaml --cascade --grace-period 10 66 | kubectl delete -f "${ROOT}"/manifests/nodeport-svc.yaml 67 | kubectl delete -f "${ROOT}"/manifests/cluster-ip-svc.yaml 68 | kubectl delete -f "${ROOT}"/manifests/run-my-nginx.yaml 69 | kubectl delete -f "${ROOT}"/manifests/ilb-svc.yaml --cascade --grace-period 10 70 | fi 71 | 72 | ### Delete cluster4 services 73 | if cluster_running "${PROJECT_ID}" "cluster-deployment-cluster4"; then 74 | gcloud container clusters get-credentials cluster-deployment-cluster4 \ 75 | --zone us-central1-c 76 | kubectl config set-context "$(kubectl config current-context)" --namespace=default 77 | kubectl delete -f "${ROOT}"/manifests/lb-svc.yaml --cascade --grace-period 10 78 | kubectl delete -f "${ROOT}"/manifests/nodeport-svc.yaml 79 | kubectl delete -f "${ROOT}"/manifests/cluster-ip-svc.yaml 80 | kubectl delete -f "${ROOT}"/manifests/run-my-nginx.yaml 81 | kubectl delete -f "${ROOT}"/manifests/ingress-svc.yaml --cascade --grace-period 10 82 | fi 83 | 84 | ### wait for all service related backends to get deleted. 85 | ### Otherwise, deletion of network deployments fails with dependent resources. 86 | if backends_exists "${PROJECT_ID}" "k8s-ig"; then 87 | echo "Service related backends have been removed" 88 | fi 89 | 90 | (cd "$ROOT/gke-to-gke-vpn/terraform" && terraform destroy -auto-approve) 91 | -------------------------------------------------------------------------------- /gke-to-gke-vpn/gke-to-gke-vpn-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GoogleCloudPlatform/gke-networking-demos/a2f141350ec649fe3c1da79407206f1f6b8c18e0/gke-to-gke-vpn/gke-to-gke-vpn-architecture.png -------------------------------------------------------------------------------- /gke-to-gke-vpn/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | # Copyright 2018 Google LLC 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # https://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | ### Creates GCP/GKE resources for GKE-to-GKE-communication-through-VPN 18 | ### Refer to https://cloud.google.com/sdk/gcloud/ for usage of gcloud 19 | ### Deployment manager templates, gcloud and kubectl commands are used. 20 | 21 | dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) 22 | ROOT="$(dirname "${dir}")" 23 | 24 | #shellcheck disable=SC1090 25 | source "${ROOT}/verify-functions.sh" 26 | 27 | command -v gcloud >/dev/null 2>&1 || \ 28 | { echo >&2 "I require gcloud but it's not installed. Aborting.";exit 1; } 29 | 30 | command -v kubectl >/dev/null 2>&1 || \ 31 | { echo >&2 "I require kubectl but it's not installed. Aborting."; exit 1; } 32 | 33 | ### Obtain current active PROJECT_ID 34 | PROJECT_ID=$(gcloud config get-value project) 35 | if [ -z "${PROJECT_ID}" ] 36 | then echo >&2 "I require default project is set but it's not. Aborting."; exit 1; 37 | fi 38 | 39 | echo "project=\"$PROJECT_ID\"" > "$ROOT"/gke-to-gke-vpn/terraform/terraform.tfvars 40 | 41 | ### Ensure that the Forwarding rules quota is met 42 | if ! meets_quota "${PROJECT_ID}" "FORWARDING_RULES" 8; then 43 | echo "Refer to https://cloud.google.com/compute/quotas" 44 | echo "Terminating..." 45 | exit 1 46 | fi 47 | 48 | ### Ensure that the In-use IP addresses global quota is met 49 | if ! meets_quota "${PROJECT_ID}" "IN_USE_ADDRESSES" 6; then 50 | echo "Refer to https://cloud.google.com/compute/quotas" 51 | echo "Terminating..." 52 | exit 1 53 | fi 54 | 55 | ### Ensure that the Backend services quota is met 56 | if ! meets_quota "${PROJECT_ID}" "BACKEND_SERVICES" 4; then 57 | echo "Refer to https://cloud.google.com/compute/quotas" 58 | echo "Terminating..." 59 | exit 1 60 | fi 61 | 62 | ### Ensure that the Firewall rules quota is met 63 | if ! meets_quota "${PROJECT_ID}" "FIREWALLS" 42; then 64 | echo "Refer to https://cloud.google.com/compute/quotas" 65 | echo "Terminating..." 66 | exit 1 67 | fi 68 | 69 | ### enable required service apis in the project 70 | gcloud services enable \ 71 | compute.googleapis.com 72 | 73 | (cd "$ROOT/gke-to-gke-vpn/terraform"; terraform init -input=false) 74 | (cd "$ROOT/gke-to-gke-vpn/terraform"; terraform apply -input=false -auto-approve) 75 | 76 | ### Fetch cluster1 credentials, deploy nginx pods in cluster1 and create services 77 | gcloud container clusters get-credentials cluster-deployment-cluster1 \ 78 | --zone us-east1-d 79 | kubectl config set-context "$(kubectl config current-context)" --namespace=default 80 | kubectl apply -f "${ROOT}"/manifests/run-my-nginx.yaml 81 | kubectl apply -f "${ROOT}"/manifests/cluster-ip-svc.yaml 82 | kubectl apply -f "${ROOT}"/manifests/nodeport-svc.yaml 83 | kubectl apply -f "${ROOT}"/manifests/lb-svc.yaml 84 | kubectl apply -f "${ROOT}"/manifests/ilb-svc.yaml 85 | 86 | ### Fetch cluster2 credentials, deploy nginx pods in cluster2 and create services 87 | gcloud container clusters get-credentials cluster-deployment-cluster2 \ 88 | --zone us-central1-b 89 | kubectl config set-context "$(kubectl config current-context)" --namespace=default 90 | kubectl apply -f "${ROOT}"/manifests/run-my-nginx.yaml 91 | kubectl apply -f "${ROOT}"/manifests/cluster-ip-svc.yaml 92 | kubectl apply -f "${ROOT}"/manifests/nodeport-svc.yaml 93 | kubectl apply -f "${ROOT}"/manifests/lb-svc.yaml 94 | kubectl apply -f "${ROOT}"/manifests/ingress-svc.yaml 95 | 96 | ### Fetch cluster3 credentials, deploy nginx pods in cluster3 and create services 97 | gcloud container clusters get-credentials cluster-deployment-cluster3 \ 98 | --zone us-east1-c 99 | kubectl config set-context "$(kubectl config current-context)" --namespace=default 100 | kubectl apply -f "${ROOT}"/manifests/run-my-nginx.yaml 101 | kubectl apply -f "${ROOT}"/manifests/cluster-ip-svc.yaml 102 | kubectl apply -f "${ROOT}"/manifests/nodeport-svc.yaml 103 | kubectl apply -f "${ROOT}"/manifests/lb-svc.yaml 104 | kubectl apply -f "${ROOT}"/manifests/ilb-svc.yaml 105 | 106 | ### Fetch cluster4 credentials, deploy nginx pods in cluster4 and create services 107 | gcloud container clusters get-credentials cluster-deployment-cluster4 \ 108 | --zone us-central1-c 109 | kubectl config set-context "$(kubectl config current-context)" --namespace=default 110 | kubectl apply -f "${ROOT}"/manifests/run-my-nginx.yaml 111 | kubectl apply -f "${ROOT}"/manifests/cluster-ip-svc.yaml 112 | kubectl apply -f "${ROOT}"/manifests/nodeport-svc.yaml 113 | kubectl apply -f "${ROOT}"/manifests/lb-svc.yaml 114 | kubectl apply -f "${ROOT}"/manifests/ingress-svc.yaml 115 | -------------------------------------------------------------------------------- /gke-to-gke-vpn/terraform/main.tf: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2018 Google LLC 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | https://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | /* 18 | Creating Clusters and their Node Pools 19 | */ 20 | 21 | # Gets the current version of Kubernetes engine 22 | data "google_container_engine_versions" "gke_version" { 23 | location = "us-east1-b" 24 | project = var.project 25 | } 26 | 27 | // Install the first cluster 28 | resource "google_container_cluster" "cluster-deployment-cluster1" { 29 | name = "cluster-deployment-cluster1" 30 | project = var.project 31 | location = var.cluster1-location 32 | network = google_compute_network.network1.self_link 33 | subnetwork = google_compute_subnetwork.subnet1-us-east1.self_link 34 | initial_node_count = "1" 35 | min_master_version = data.google_container_engine_versions.gke_version.latest_master_version 36 | 37 | ip_allocation_policy { 38 | use_ip_aliases = "true" 39 | cluster_ipv4_cidr_block = var.cluster1-cidr 40 | services_ipv4_cidr_block = var.cluster1-srv-cidr 41 | } 42 | } 43 | 44 | // Install node-pool for the first cluster. It's recommended by Terraform to be in a seperate block than main cluster 45 | resource "google_container_node_pool" "cluster1_nodes" { 46 | name = "cluster1-nodes" 47 | location = var.cluster1-location 48 | project = var.project 49 | cluster = google_container_cluster.cluster-deployment-cluster1.name 50 | 51 | node_config { 52 | oauth_scopes = [ 53 | "https://www.googleapis.com/auth/devstorage.read_only", 54 | "https://www.googleapis.com/auth/logging.write", 55 | "https://www.googleapis.com/auth/monitoring", 56 | "https://www.googleapis.com/auth/servicecontrol", 57 | "https://www.googleapis.com/auth/service.management.readonly", 58 | "https://www.googleapis.com/auth/trace.append", 59 | ] 60 | 61 | image_type = "COS" 62 | tags = ["kc-node"] 63 | } 64 | } 65 | 66 | // Install the second cluster 67 | resource "google_container_cluster" "cluster-deployment-cluster2" { 68 | name = "cluster-deployment-cluster2" 69 | project = var.project 70 | location = var.cluster2-location 71 | network = google_compute_network.network1.self_link 72 | subnetwork = google_compute_subnetwork.subnet2-us-central1.self_link 73 | initial_node_count = "1" 74 | min_master_version = data.google_container_engine_versions.gke_version.latest_master_version 75 | 76 | ip_allocation_policy { 77 | use_ip_aliases = "true" 78 | cluster_ipv4_cidr_block = var.cluster2-cidr 79 | services_ipv4_cidr_block = var.cluster2-srv-cidr 80 | } 81 | } 82 | 83 | // Install node-pool for the second cluster. 84 | resource "google_container_node_pool" "cluster2_nodes" { 85 | name = "cluster2-nodes" 86 | location = var.cluster2-location 87 | project = var.project 88 | cluster = google_container_cluster.cluster-deployment-cluster2.name 89 | 90 | node_config { 91 | oauth_scopes = [ 92 | "https://www.googleapis.com/auth/devstorage.read_only", 93 | "https://www.googleapis.com/auth/logging.write", 94 | "https://www.googleapis.com/auth/monitoring", 95 | "https://www.googleapis.com/auth/servicecontrol", 96 | "https://www.googleapis.com/auth/service.management.readonly", 97 | "https://www.googleapis.com/auth/trace.append", 98 | ] 99 | 100 | image_type = "COS" 101 | tags = ["kc-node"] 102 | } 103 | } 104 | 105 | // Install the third cluster 106 | resource "google_container_cluster" "cluster-deployment-cluster3" { 107 | name = "cluster-deployment-cluster3" 108 | project = var.project 109 | location = var.cluster3-location 110 | network = google_compute_network.network2.self_link 111 | subnetwork = google_compute_subnetwork.subnet3-us-east1.self_link 112 | initial_node_count = "1" 113 | min_master_version = data.google_container_engine_versions.gke_version.latest_master_version 114 | 115 | ip_allocation_policy { 116 | use_ip_aliases = "true" 117 | cluster_ipv4_cidr_block = var.cluster3-cidr 118 | services_ipv4_cidr_block = var.cluster3-srv-cidr 119 | } 120 | } 121 | 122 | // Install node-pool for the third cluster. 123 | resource "google_container_node_pool" "cluster3_nodes" { 124 | name = "cluster3-nodes" 125 | location = var.cluster3-location 126 | project = var.project 127 | cluster = google_container_cluster.cluster-deployment-cluster3.name 128 | 129 | node_config { 130 | oauth_scopes = [ 131 | "https://www.googleapis.com/auth/devstorage.read_only", 132 | "https://www.googleapis.com/auth/logging.write", 133 | "https://www.googleapis.com/auth/monitoring", 134 | "https://www.googleapis.com/auth/servicecontrol", 135 | "https://www.googleapis.com/auth/service.management.readonly", 136 | "https://www.googleapis.com/auth/trace.append", 137 | ] 138 | 139 | image_type = "COS" 140 | tags = ["kc-node"] 141 | } 142 | } 143 | 144 | // Install the forth cluster 145 | resource "google_container_cluster" "cluster-deployment-cluster4" { 146 | name = "cluster-deployment-cluster4" 147 | project = var.project 148 | location = var.cluster4-location 149 | network = google_compute_network.network2.self_link 150 | subnetwork = google_compute_subnetwork.subnet4-us-central1.self_link 151 | initial_node_count = "1" 152 | min_master_version = data.google_container_engine_versions.gke_version.latest_master_version 153 | 154 | ip_allocation_policy { 155 | use_ip_aliases = "true" 156 | cluster_ipv4_cidr_block = var.cluster4-cidr 157 | services_ipv4_cidr_block = var.cluster4-srv-cidr 158 | } 159 | } 160 | 161 | // Install node-pool for the forth cluster. 162 | resource "google_container_node_pool" "cluster4_nodes" { 163 | name = "cluster4-nodes" 164 | location = var.cluster4-location 165 | project = var.project 166 | cluster = google_container_cluster.cluster-deployment-cluster4.name 167 | 168 | node_config { 169 | oauth_scopes = [ 170 | "https://www.googleapis.com/auth/devstorage.read_only", 171 | "https://www.googleapis.com/auth/logging.write", 172 | "https://www.googleapis.com/auth/monitoring", 173 | "https://www.googleapis.com/auth/servicecontrol", 174 | "https://www.googleapis.com/auth/service.management.readonly", 175 | "https://www.googleapis.com/auth/trace.append", 176 | ] 177 | 178 | image_type = "COS" 179 | tags = ["kc-node"] 180 | } 181 | } 182 | -------------------------------------------------------------------------------- /gke-to-gke-vpn/terraform/network.tf: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2018 Google LLC 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | https://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | /* 18 | This will setup basic underlying network vpn.tf 19 | It will setup Networks, IPs and Subnetworks 20 | */ 21 | 22 | // Setting up Static IP Addresses 23 | resource "google_compute_address" "vpn1-ip-address" { 24 | name = "vpn1-ip-address" 25 | project = var.project 26 | region = var.region1 27 | } 28 | 29 | resource "google_compute_address" "vpn2-ip-address" { 30 | name = "vpn2-ip-address" 31 | project = var.project 32 | region = var.region2 33 | } 34 | 35 | resource "google_compute_address" "vpn3-ip-address" { 36 | name = "vpn3-ip-address" 37 | project = var.project 38 | region = var.region1 39 | } 40 | 41 | resource "google_compute_address" "vpn4-ip-address" { 42 | name = "vpn4-ip-address" 43 | project = var.project 44 | region = var.region2 45 | } 46 | 47 | // Setting up 4 subnets for our 2 networks 48 | resource "google_compute_subnetwork" "subnet1-us-east1" { 49 | name = "subnet1-us-east1" 50 | project = var.project 51 | ip_cidr_range = var.node1-cidr 52 | network = google_compute_network.network1.self_link 53 | region = var.region1 54 | } 55 | 56 | resource "google_compute_subnetwork" "subnet2-us-central1" { 57 | name = "subnet2-us-central1" 58 | project = var.project 59 | ip_cidr_range = var.node2-cidr 60 | network = google_compute_network.network1.self_link 61 | region = var.region2 62 | } 63 | 64 | resource "google_compute_subnetwork" "subnet3-us-east1" { 65 | name = "subnet3-us-east1" 66 | project = var.project 67 | ip_cidr_range = var.node3-cidr 68 | network = google_compute_network.network2.self_link 69 | region = var.region1 70 | } 71 | 72 | resource "google_compute_subnetwork" "subnet4-us-central1" { 73 | name = "subnet4-us-central1" 74 | project = var.project 75 | ip_cidr_range = var.node4-cidr 76 | network = google_compute_network.network2.self_link 77 | region = var.region2 78 | } 79 | 80 | // Setting 2 networks 81 | resource "google_compute_network" "network1" { 82 | name = var.network1 83 | project = var.project 84 | auto_create_subnetworks = false 85 | } 86 | 87 | resource "google_compute_network" "network2" { 88 | name = var.network2 89 | project = var.project 90 | auto_create_subnetworks = false 91 | } 92 | -------------------------------------------------------------------------------- /gke-to-gke-vpn/terraform/provider.tf: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2018 Google LLC 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | https://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | provider "google" { 18 | version = "2.11.0" 19 | region = var.region1 20 | } 21 | -------------------------------------------------------------------------------- /gke-to-gke-vpn/terraform/variables.tf: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2018 Google LLC 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | https://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | /* 18 | Variables for the creation of the cluster, bastion, subnet and network. 19 | See https://www.terraform.io/docs/configuration/variables.html 20 | */ 21 | 22 | variable "project" { 23 | description = "The project in which to hold the components" 24 | type = string 25 | } 26 | 27 | variable "vpn-deployments" { 28 | description = "Name of all VPN Deployments" 29 | type = list(string) 30 | default = ["vpn1-deployment", "vpn2-deployment", "vpn3-deployment", "vpn4-deployment"] 31 | } 32 | 33 | variable "vpn-regions" { 34 | description = "Regions for VPNs" 35 | type = list(string) 36 | default = ["us-east1", "us-central1", "us-east1", "us-central1"] 37 | } 38 | 39 | variable "network1" { 40 | description = "Name of VPN Network 1" 41 | type = string 42 | default = "network1" 43 | } 44 | 45 | variable "network2" { 46 | description = "Name of VPN Network 2" 47 | type = string 48 | default = "network2" 49 | } 50 | 51 | // The region in which to deploy first regionally-scoped resources 52 | variable "region1" { 53 | description = "Name of Region1" 54 | type = string 55 | default = "us-east1" 56 | } 57 | 58 | // The region in which to deploy second regionally-scoped resources 59 | variable "region2" { 60 | description = "Name of Region2" 61 | type = string 62 | default = "us-central1" 63 | } 64 | 65 | // Cluster variables 66 | variable "cluster1-location" { 67 | description = "Location of Cluster1" 68 | type = string 69 | default = "us-east1-d" 70 | } 71 | 72 | variable "cluster1-cidr" { 73 | description = "CIDR block for Cluster1" 74 | type = string 75 | default = "10.108.0.0/19" 76 | } 77 | 78 | variable "cluster1-srv-cidr" { 79 | description = "Service CIDR block for Cluster1" 80 | type = string 81 | default = "10.208.0.0/20" 82 | } 83 | 84 | variable "cluster2-location" { 85 | description = "Location of Cluster2" 86 | type = string 87 | default = "us-central1-b" 88 | } 89 | 90 | variable "cluster2-cidr" { 91 | description = "CIDR block for Cluster2" 92 | type = string 93 | default = "10.118.0.0/19" 94 | } 95 | 96 | variable "cluster2-srv-cidr" { 97 | description = "Service CIDR block for Cluster2" 98 | type = string 99 | default = "10.218.0.0/20" 100 | } 101 | 102 | variable "cluster3-location" { 103 | description = "Location of Cluster3" 104 | type = string 105 | default = "us-east1-c" 106 | } 107 | 108 | variable "cluster3-cidr" { 109 | description = "CIDR block for Cluster3" 110 | type = string 111 | default = "10.128.0.0/19" 112 | } 113 | 114 | variable "cluster3-srv-cidr" { 115 | description = "Service CIDR block for Cluster3" 116 | type = string 117 | default = "10.228.0.0/20" 118 | } 119 | 120 | variable "cluster4-location" { 121 | description = "Location of Cluster4" 122 | type = string 123 | default = "us-central1-c" 124 | } 125 | 126 | variable "cluster4-cidr" { 127 | description = "CIDR block for Cluster4" 128 | type = string 129 | default = "10.138.0.0/19" 130 | } 131 | 132 | variable "cluster4-srv-cidr" { 133 | description = "Service CIDR block for Cluster4" 134 | type = string 135 | default = "10.238.0.0/20" 136 | } 137 | 138 | // Network variables 139 | variable "node1-cidr" { 140 | description = "CIDR block for Subnet1" 141 | type = string 142 | default = "10.1.0.0/28" 143 | } 144 | 145 | variable "node2-cidr" { 146 | description = "CIDR block for Subnet2" 147 | type = string 148 | default = "10.2.0.0/28" 149 | } 150 | 151 | variable "node3-cidr" { 152 | description = "CIDR block for Subnet3" 153 | type = string 154 | default = "10.11.0.0/28" 155 | } 156 | 157 | variable "node4-cidr" { 158 | description = "CIDR block for Subnet4" 159 | type = string 160 | default = "10.12.0.0/28" 161 | } 162 | -------------------------------------------------------------------------------- /gke-to-gke-vpn/terraform/versions.tf: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2018 Google LLC 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | https://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | terraform { 18 | required_version = ">= 0.12" 19 | } 20 | -------------------------------------------------------------------------------- /gke-to-gke-vpn/terraform/vpn.tf: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2018 Google LLC 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | https://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | /* 18 | All needed pieces for setting up the VPN including Gateways, Tunnels, 19 | FireWalls, Forwarding Rules and Routes 20 | Network, Subnetwork and Static IPs have been already set (in network.tf) 21 | */ 22 | 23 | // Setting up Gateways first 24 | resource "google_compute_vpn_gateway" "vpn1-gateway" { 25 | name = "vpn1-gateway" 26 | project = var.project 27 | network = google_compute_network.network1.self_link 28 | region = var.region1 29 | } 30 | 31 | resource "google_compute_vpn_gateway" "vpn2-gateway" { 32 | name = "vpn2-gateway" 33 | project = var.project 34 | network = google_compute_network.network1.self_link 35 | region = var.region2 36 | } 37 | 38 | resource "google_compute_vpn_gateway" "vpn3-gateway" { 39 | name = "vpn3-gateway" 40 | project = var.project 41 | network = google_compute_network.network2.self_link 42 | region = var.region1 43 | } 44 | 45 | resource "google_compute_vpn_gateway" "vpn4-gateway" { 46 | name = "vpn4-gateway" 47 | project = var.project 48 | network = google_compute_network.network2.self_link 49 | region = var.region2 50 | } 51 | 52 | // Setting Forwarding Rules 53 | resource "google_compute_forwarding_rule" "vpn1-deployment-fr-esp" { 54 | name = "vpn1-deployment-fr-esp" 55 | region = var.region1 56 | ip_protocol = "ESP" 57 | ip_address = google_compute_address.vpn1-ip-address.address 58 | target = google_compute_vpn_gateway.vpn1-gateway.self_link 59 | project = var.project 60 | } 61 | 62 | resource "google_compute_forwarding_rule" "vpn1-deployment-fr-udp500" { 63 | name = "vpn1-deployment-fr-udp500" 64 | region = var.region1 65 | ip_protocol = "UDP" 66 | port_range = 500 67 | ip_address = google_compute_address.vpn1-ip-address.address 68 | target = google_compute_vpn_gateway.vpn1-gateway.self_link 69 | project = var.project 70 | } 71 | 72 | resource "google_compute_forwarding_rule" "vpn1-deployment-fr-udp4500" { 73 | name = "vpn1-deployment-fr-udp4500" 74 | region = var.region1 75 | ip_protocol = "UDP" 76 | port_range = 4500 77 | ip_address = google_compute_address.vpn1-ip-address.address 78 | target = google_compute_vpn_gateway.vpn1-gateway.self_link 79 | project = var.project 80 | } 81 | 82 | resource "google_compute_forwarding_rule" "vpn2-deployment-fr-esp" { 83 | name = "vpn2-deployment-fr-esp" 84 | region = var.region2 85 | ip_protocol = "ESP" 86 | ip_address = google_compute_address.vpn2-ip-address.address 87 | target = google_compute_vpn_gateway.vpn2-gateway.self_link 88 | project = var.project 89 | } 90 | 91 | resource "google_compute_forwarding_rule" "vpn2-deployment-fr-udp500" { 92 | name = "vpn2-deployment-fr-udp500" 93 | region = var.region2 94 | ip_protocol = "UDP" 95 | port_range = 500 96 | ip_address = google_compute_address.vpn2-ip-address.address 97 | target = google_compute_vpn_gateway.vpn2-gateway.self_link 98 | project = var.project 99 | } 100 | 101 | resource "google_compute_forwarding_rule" "vpn2-deployment-fr-udp4500" { 102 | name = "vpn2-deployment-fr-udp4500" 103 | region = var.region2 104 | ip_protocol = "UDP" 105 | port_range = 4500 106 | ip_address = google_compute_address.vpn2-ip-address.address 107 | target = google_compute_vpn_gateway.vpn2-gateway.self_link 108 | project = var.project 109 | } 110 | 111 | resource "google_compute_forwarding_rule" "vpn3-deployment-fr-esp" { 112 | name = "vpn3-deployment-fr-esp" 113 | region = var.region1 114 | ip_protocol = "ESP" 115 | ip_address = google_compute_address.vpn3-ip-address.address 116 | target = google_compute_vpn_gateway.vpn3-gateway.self_link 117 | project = var.project 118 | } 119 | 120 | resource "google_compute_forwarding_rule" "vpn3-deployment-fr-udp500" { 121 | name = "vpn3-deployment-fr-udp500" 122 | region = var.region1 123 | ip_protocol = "UDP" 124 | port_range = 500 125 | ip_address = google_compute_address.vpn3-ip-address.address 126 | target = google_compute_vpn_gateway.vpn3-gateway.self_link 127 | project = var.project 128 | } 129 | 130 | resource "google_compute_forwarding_rule" "vpn3-deployment-fr-udp4500" { 131 | name = "vpn3-deployment-fr-udp4500" 132 | region = var.region1 133 | ip_protocol = "UDP" 134 | port_range = 4500 135 | ip_address = google_compute_address.vpn3-ip-address.address 136 | target = google_compute_vpn_gateway.vpn3-gateway.self_link 137 | project = var.project 138 | } 139 | 140 | resource "google_compute_forwarding_rule" "vpn4-deployment-fr-esp" { 141 | name = "vpn4-deployment-fr-esp" 142 | region = var.region2 143 | ip_protocol = "ESP" 144 | ip_address = google_compute_address.vpn4-ip-address.address 145 | target = google_compute_vpn_gateway.vpn4-gateway.self_link 146 | project = var.project 147 | } 148 | 149 | resource "google_compute_forwarding_rule" "vpn4-deployment-fr-udp500" { 150 | name = "vpn4-deployment-fr-udp500" 151 | region = var.region2 152 | ip_protocol = "UDP" 153 | port_range = 500 154 | ip_address = google_compute_address.vpn4-ip-address.address 155 | target = google_compute_vpn_gateway.vpn4-gateway.self_link 156 | project = var.project 157 | } 158 | 159 | resource "google_compute_forwarding_rule" "vpn4-deployment-fr-udp4500" { 160 | name = "vpn4-deployment-fr-udp4500" 161 | region = var.region2 162 | ip_protocol = "UDP" 163 | port_range = 4500 164 | ip_address = google_compute_address.vpn4-ip-address.address 165 | target = google_compute_vpn_gateway.vpn4-gateway.self_link 166 | project = var.project 167 | } 168 | 169 | // Setting VPN Tunnels 170 | resource "google_compute_vpn_tunnel" "vpn1-deployment-tunnel" { 171 | region = var.region1 172 | name = "vpn1-deployment-tunnel" 173 | project = var.project 174 | peer_ip = google_compute_address.vpn3-ip-address.address 175 | shared_secret = "gke-to-gke-vpn" 176 | local_traffic_selector = ["0.0.0.0/0"] 177 | remote_traffic_selector = ["0.0.0.0/0"] 178 | ike_version = 2 179 | 180 | target_vpn_gateway = google_compute_vpn_gateway.vpn1-gateway.self_link 181 | 182 | depends_on = [google_compute_forwarding_rule.vpn1-deployment-fr-udp4500] 183 | } 184 | 185 | resource "google_compute_vpn_tunnel" "vpn2-deployment-tunnel" { 186 | region = var.region2 187 | name = "vpn2-deployment-tunnel" 188 | project = var.project 189 | peer_ip = google_compute_address.vpn4-ip-address.address 190 | shared_secret = "gke-to-gke-vpn" 191 | local_traffic_selector = ["0.0.0.0/0"] 192 | remote_traffic_selector = ["0.0.0.0/0"] 193 | ike_version = 2 194 | 195 | target_vpn_gateway = google_compute_vpn_gateway.vpn2-gateway.self_link 196 | 197 | depends_on = [google_compute_forwarding_rule.vpn2-deployment-fr-udp4500] 198 | } 199 | 200 | resource "google_compute_vpn_tunnel" "vpn3-deployment-tunnel" { 201 | region = var.region1 202 | name = "vpn3-deployment-tunnel" 203 | project = var.project 204 | peer_ip = google_compute_address.vpn1-ip-address.address 205 | shared_secret = "gke-to-gke-vpn" 206 | local_traffic_selector = ["0.0.0.0/0"] 207 | remote_traffic_selector = ["0.0.0.0/0"] 208 | ike_version = 2 209 | 210 | target_vpn_gateway = google_compute_vpn_gateway.vpn3-gateway.self_link 211 | 212 | depends_on = [google_compute_forwarding_rule.vpn3-deployment-fr-udp4500] 213 | } 214 | 215 | resource "google_compute_vpn_tunnel" "vpn4-deployment-tunnel" { 216 | region = var.region2 217 | name = "vpn4-deployment-tunnel" 218 | project = var.project 219 | peer_ip = google_compute_address.vpn2-ip-address.address 220 | shared_secret = "gke-to-gke-vpn" 221 | local_traffic_selector = ["0.0.0.0/0"] 222 | remote_traffic_selector = ["0.0.0.0/0"] 223 | ike_version = 2 224 | 225 | target_vpn_gateway = google_compute_vpn_gateway.vpn4-gateway.self_link 226 | 227 | depends_on = [google_compute_forwarding_rule.vpn4-deployment-fr-udp4500] 228 | } 229 | 230 | // Setting up firewalls 231 | resource "google_compute_firewall" "vpn1-firewall" { 232 | name = "vpn1-firewall" 233 | project = var.project 234 | network = google_compute_network.network1.self_link 235 | source_ranges = [var.node3-cidr, var.cluster3-cidr, var.cluster3-srv-cidr] 236 | 237 | allow { 238 | protocol = "tcp" 239 | } 240 | 241 | allow { 242 | protocol = "udp" 243 | } 244 | 245 | allow { 246 | protocol = "icmp" 247 | } 248 | } 249 | 250 | resource "google_compute_firewall" "vpn2-firewall" { 251 | name = "vpn2-firewall" 252 | project = var.project 253 | network = google_compute_network.network1.self_link 254 | source_ranges = [var.node4-cidr, var.cluster4-cidr, var.cluster4-srv-cidr] 255 | 256 | allow { 257 | protocol = "tcp" 258 | } 259 | 260 | allow { 261 | protocol = "udp" 262 | } 263 | 264 | allow { 265 | protocol = "icmp" 266 | } 267 | } 268 | 269 | resource "google_compute_firewall" "vpn3-firewall" { 270 | name = "vpn3-firewall" 271 | project = var.project 272 | network = google_compute_network.network2.self_link 273 | source_ranges = [var.node1-cidr, var.cluster1-cidr, var.cluster1-srv-cidr] 274 | 275 | allow { 276 | protocol = "tcp" 277 | } 278 | 279 | allow { 280 | protocol = "udp" 281 | } 282 | 283 | allow { 284 | protocol = "icmp" 285 | } 286 | } 287 | 288 | resource "google_compute_firewall" "vpn4-firewall" { 289 | name = "vpn4-firewall" 290 | project = var.project 291 | network = google_compute_network.network2.self_link 292 | source_ranges = [var.node2-cidr, var.cluster2-cidr, var.cluster2-srv-cidr] 293 | 294 | allow { 295 | protocol = "tcp" 296 | } 297 | 298 | allow { 299 | protocol = "udp" 300 | } 301 | 302 | allow { 303 | protocol = "icmp" 304 | } 305 | } 306 | 307 | // Setting up Routes 308 | resource "google_compute_route" "vpn1-route1" { 309 | name = "vpn1-route1" 310 | project = var.project 311 | network = google_compute_network.network1.self_link 312 | next_hop_vpn_tunnel = google_compute_vpn_tunnel.vpn1-deployment-tunnel.self_link 313 | dest_range = var.node3-cidr 314 | priority = 100 315 | } 316 | 317 | resource "google_compute_route" "vpn1-route2" { 318 | name = "vpn1-route2" 319 | project = var.project 320 | network = google_compute_network.network1.self_link 321 | next_hop_vpn_tunnel = google_compute_vpn_tunnel.vpn1-deployment-tunnel.self_link 322 | dest_range = var.cluster3-cidr 323 | priority = 100 324 | } 325 | 326 | resource "google_compute_route" "vpn1-route3" { 327 | name = "vpn1-route3" 328 | project = var.project 329 | network = google_compute_network.network1.self_link 330 | next_hop_vpn_tunnel = google_compute_vpn_tunnel.vpn1-deployment-tunnel.self_link 331 | dest_range = var.cluster3-srv-cidr 332 | priority = 100 333 | } 334 | 335 | resource "google_compute_route" "vpn2-route1" { 336 | name = "vpn2-route1" 337 | project = var.project 338 | network = google_compute_network.network1.self_link 339 | next_hop_vpn_tunnel = google_compute_vpn_tunnel.vpn2-deployment-tunnel.self_link 340 | dest_range = var.node4-cidr 341 | priority = 100 342 | } 343 | 344 | resource "google_compute_route" "vpn2-route2" { 345 | name = "vpn2-route2" 346 | project = var.project 347 | network = google_compute_network.network1.self_link 348 | next_hop_vpn_tunnel = google_compute_vpn_tunnel.vpn2-deployment-tunnel.self_link 349 | dest_range = var.cluster4-cidr 350 | priority = 100 351 | } 352 | 353 | resource "google_compute_route" "vpn2-route3" { 354 | name = "vpn2-route3" 355 | project = var.project 356 | network = google_compute_network.network1.self_link 357 | next_hop_vpn_tunnel = google_compute_vpn_tunnel.vpn2-deployment-tunnel.self_link 358 | dest_range = var.cluster4-srv-cidr 359 | priority = 100 360 | } 361 | 362 | resource "google_compute_route" "vpn3-route1" { 363 | name = "vpn3-route1" 364 | project = var.project 365 | network = google_compute_network.network2.self_link 366 | next_hop_vpn_tunnel = google_compute_vpn_tunnel.vpn3-deployment-tunnel.self_link 367 | dest_range = var.node1-cidr 368 | priority = 100 369 | } 370 | 371 | resource "google_compute_route" "vpn3-route2" { 372 | name = "vpn3-route2" 373 | project = var.project 374 | network = google_compute_network.network2.self_link 375 | next_hop_vpn_tunnel = google_compute_vpn_tunnel.vpn3-deployment-tunnel.self_link 376 | dest_range = var.cluster1-cidr 377 | priority = 100 378 | } 379 | 380 | resource "google_compute_route" "vpn3-route3" { 381 | name = "vpn3-route3" 382 | project = var.project 383 | network = google_compute_network.network2.self_link 384 | next_hop_vpn_tunnel = google_compute_vpn_tunnel.vpn3-deployment-tunnel.self_link 385 | dest_range = var.cluster1-srv-cidr 386 | priority = 100 387 | } 388 | 389 | resource "google_compute_route" "vpn4-route1" { 390 | name = "vpn4-route1" 391 | project = var.project 392 | network = google_compute_network.network2.self_link 393 | next_hop_vpn_tunnel = google_compute_vpn_tunnel.vpn4-deployment-tunnel.self_link 394 | dest_range = var.node2-cidr 395 | priority = 100 396 | } 397 | 398 | resource "google_compute_route" "vpn4-route2" { 399 | name = "vpn4-route2" 400 | project = var.project 401 | network = google_compute_network.network2.self_link 402 | next_hop_vpn_tunnel = google_compute_vpn_tunnel.vpn4-deployment-tunnel.self_link 403 | dest_range = var.cluster2-cidr 404 | priority = 100 405 | } 406 | 407 | resource "google_compute_route" "vpn4-route3" { 408 | name = "vpn4-route3" 409 | project = var.project 410 | network = google_compute_network.network2.self_link 411 | next_hop_vpn_tunnel = google_compute_vpn_tunnel.vpn4-deployment-tunnel.self_link 412 | dest_range = var.cluster2-srv-cidr 413 | priority = 100 414 | } 415 | -------------------------------------------------------------------------------- /gke-to-gke-vpn/validate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # Copyright 2018 Google LLC 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # https://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # bash "strict-mode", fail immediately if there is a problem 17 | set -o nounset 18 | set -o pipefail 19 | 20 | dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) 21 | ROOT="$(dirname "${dir}")" 22 | 23 | #shellcheck disable=SC1090 24 | source "${ROOT}/verify-functions.sh" 25 | 26 | PROJECT_ID=$(gcloud config get-value project) 27 | if [ -z "${PROJECT_ID}" ] 28 | then echo >&2 "I require default project is set but it's not. Aborting."; exit 1; 29 | fi 30 | 31 | ### Obtain Cluster Zone 32 | CLUSTER1_ZONE=$(gcloud container clusters list \ 33 | --filter="name=cluster-deployment-cluster1" --format "value(zone)") 34 | CLUSTER2_ZONE=$(gcloud container clusters list \ 35 | --filter="name=cluster-deployment-cluster2" --format "value(zone)") 36 | CLUSTER3_ZONE=$(gcloud container clusters list \ 37 | --filter="name=cluster-deployment-cluster3" --format "value(zone)") 38 | CLUSTER4_ZONE=$(gcloud container clusters list \ 39 | --filter="name=cluster-deployment-cluster4" --format "value(zone)") 40 | 41 | CLUSTER1_CONTEXT="gke_${PROJECT_ID}_${CLUSTER1_ZONE}_cluster-deployment-cluster1" 42 | CLUSTER2_CONTEXT="gke_${PROJECT_ID}_${CLUSTER2_ZONE}_cluster-deployment-cluster2" 43 | CLUSTER3_CONTEXT="gke_${PROJECT_ID}_${CLUSTER3_ZONE}_cluster-deployment-cluster3" 44 | CLUSTER4_CONTEXT="gke_${PROJECT_ID}_${CLUSTER4_ZONE}_cluster-deployment-cluster4" 45 | 46 | ### Ensure that the Networks exists 47 | if ! network_exists "${PROJECT_ID}" "network1" || \ 48 | ! network_exists "${PROJECT_ID}" "network2"; then 49 | echo "Network is missing" 50 | echo "Terminating..." 51 | exit 1 52 | fi 53 | 54 | ### Ensure that the Subnet range is correct 55 | if ! verify_cidr_range "${PROJECT_ID}" "subnet1-us-east1" "10.1.0.0/28"; then 56 | echo "Subnet ip range is incorrect" 57 | echo "Terminating..." 58 | exit 1 59 | fi 60 | 61 | ### Ensure that the Subnet range is correct 62 | if ! verify_cidr_range "${PROJECT_ID}" "subnet2-us-central1" "10.2.0.0/28"; then 63 | echo "Subnet ip range is incorrect" 64 | echo "Terminating..." 65 | exit 1 66 | fi 67 | 68 | ### Ensure that the Subnet range is correct 69 | if ! verify_cidr_range "${PROJECT_ID}" "subnet3-us-east1" "10.11.0.0/28"; then 70 | echo "Subnet ip range is incorrect" 71 | echo "Terminating..." 72 | exit 1 73 | fi 74 | 75 | ### Ensure that the Subnet range is correct 76 | if ! verify_cidr_range "${PROJECT_ID}" "subnet4-us-central1" "10.12.0.0/28"; then 77 | echo "Subnet ip range is incorrect" 78 | echo "Terminating..." 79 | exit 1 80 | fi 81 | 82 | ### Ensure that the VPN exists 83 | for (( c=1; c<=4; c++ )) 84 | do 85 | if ! vpn_exists "${PROJECT_ID}" "vpn$c-deployment-tunnel"; then 86 | echo "vpn$c-deployment-tunnel is missing or is not running" 87 | echo "Terminating..." 88 | exit 1 89 | fi 90 | done 91 | 92 | ### Ensure that the clusters are running 93 | for (( c=1; c<=4; c++ )) 94 | do 95 | if ! cluster_running "${PROJECT_ID}" "cluster-deployment-cluster$c"; then 96 | echo "cluster$c is missing or is not running" 97 | echo "Terminating..." 98 | exit 1 99 | fi 100 | done 101 | 102 | ### Check external nginx service ips for cluster1 103 | if ! access_service "${PROJECT_ID}" "${CLUSTER1_CONTEXT}" "my-nginx-lb"; then 104 | echo "Service ip is not available" 105 | echo "Terminating..." 106 | exit 1 107 | fi 108 | 109 | ### Check internal nginx service ips for cluster1 110 | if ! access_service "${PROJECT_ID}" "${CLUSTER1_CONTEXT}" "my-nginx-ilb"; then 111 | echo "Service ip is not available" 112 | echo "Terminating..." 113 | exit 1 114 | fi 115 | 116 | ### Check external nginx service ips for cluster2 117 | if ! access_service "${PROJECT_ID}" "${CLUSTER2_CONTEXT}" "my-nginx-lb"; then 118 | echo "Service ip is not available" 119 | echo "Terminating..." 120 | exit 1 121 | fi 122 | 123 | ### Check external nginx service ips for cluster3 124 | if ! access_service "${PROJECT_ID}" "${CLUSTER3_CONTEXT}" "my-nginx-lb"; then 125 | echo "Service ip is not available" 126 | echo "Terminating..." 127 | exit 1 128 | fi 129 | 130 | ### Check internal nginx service ips for cluster3 131 | if ! access_service "${PROJECT_ID}" "${CLUSTER3_CONTEXT}" "my-nginx-ilb"; then 132 | echo "Service ip is not available" 133 | echo "Terminating..." 134 | exit 1 135 | fi 136 | 137 | ### Check external nginx service ips for cluster4 138 | if ! access_service "${PROJECT_ID}" "${CLUSTER4_CONTEXT}" "my-nginx-lb"; then 139 | echo "Service ip is not available" 140 | echo "Terminating..." 141 | exit 1 142 | fi 143 | -------------------------------------------------------------------------------- /images/cluster_details.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GoogleCloudPlatform/gke-networking-demos/a2f141350ec649fe3c1da79407206f1f6b8c18e0/images/cluster_details.png -------------------------------------------------------------------------------- /images/gke-to-gke-peering-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GoogleCloudPlatform/gke-networking-demos/a2f141350ec649fe3c1da79407206f1f6b8c18e0/images/gke-to-gke-peering-architecture.png -------------------------------------------------------------------------------- /images/gke-to-gke-vpn-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GoogleCloudPlatform/gke-networking-demos/a2f141350ec649fe3c1da79407206f1f6b8c18e0/images/gke-to-gke-vpn-architecture.png -------------------------------------------------------------------------------- /images/nav_menu_demo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GoogleCloudPlatform/gke-networking-demos/a2f141350ec649fe3c1da79407206f1f6b8c18e0/images/nav_menu_demo.png -------------------------------------------------------------------------------- /images/nginx.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GoogleCloudPlatform/gke-networking-demos/a2f141350ec649fe3c1da79407206f1f6b8c18e0/images/nginx.png -------------------------------------------------------------------------------- /images/nginx_external_ip.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GoogleCloudPlatform/gke-networking-demos/a2f141350ec649fe3c1da79407206f1f6b8c18e0/images/nginx_external_ip.png -------------------------------------------------------------------------------- /images/services.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GoogleCloudPlatform/gke-networking-demos/a2f141350ec649fe3c1da79407206f1f6b8c18e0/images/services.png -------------------------------------------------------------------------------- /images/vm_internal_ips.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GoogleCloudPlatform/gke-networking-demos/a2f141350ec649fe3c1da79407206f1f6b8c18e0/images/vm_internal_ips.png -------------------------------------------------------------------------------- /images/vpc_networks.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GoogleCloudPlatform/gke-networking-demos/a2f141350ec649fe3c1da79407206f1f6b8c18e0/images/vpc_networks.png -------------------------------------------------------------------------------- /images/workloads.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GoogleCloudPlatform/gke-networking-demos/a2f141350ec649fe3c1da79407206f1f6b8c18e0/images/workloads.png -------------------------------------------------------------------------------- /manifests/cluster-ip-svc.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # cluster ip service configuration 16 | 17 | apiVersion: v1 18 | kind: Service 19 | metadata: 20 | name: my-nginx 21 | labels: 22 | run: my-nginx 23 | spec: 24 | ports: 25 | - port: 80 26 | protocol: TCP 27 | selector: 28 | run: my-nginx 29 | -------------------------------------------------------------------------------- /manifests/ilb-svc.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # Internal LoadBalancer service configuration 16 | apiVersion: v1 17 | kind: Service 18 | metadata: 19 | name: my-nginx-ilb 20 | annotations: 21 | cloud.google.com/load-balancer-type: "Internal" 22 | labels: 23 | run: my-nginx 24 | spec: 25 | type: LoadBalancer 26 | ports: 27 | - port: 8080 28 | targetPort: 80 29 | protocol: TCP 30 | name: http 31 | - port: 443 32 | protocol: TCP 33 | name: https 34 | selector: 35 | run: my-nginx 36 | -------------------------------------------------------------------------------- /manifests/ingress-svc.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | ### Ingress service configuration 16 | apiVersion: extensions/v1beta1 17 | kind: Ingress 18 | metadata: 19 | name: my-nginx-ingress 20 | annotations: 21 | ingress.kubernetes.io/rewrite-target: / 22 | spec: 23 | backend: 24 | serviceName: my-nginx-nodeport 25 | servicePort: 8080 26 | -------------------------------------------------------------------------------- /manifests/lb-svc.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # Load Balancer service configuration 16 | apiVersion: v1 17 | kind: Service 18 | metadata: 19 | name: my-nginx-lb 20 | labels: 21 | run: my-nginx 22 | spec: 23 | type: LoadBalancer 24 | ports: 25 | - port: 8080 26 | targetPort: 80 27 | protocol: TCP 28 | name: http 29 | - port: 443 30 | protocol: TCP 31 | name: https 32 | selector: 33 | run: my-nginx 34 | -------------------------------------------------------------------------------- /manifests/nodeport-svc.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | ### Nodeport service configuration 16 | apiVersion: v1 17 | kind: Service 18 | metadata: 19 | name: my-nginx-nodeport 20 | labels: 21 | run: my-nginx 22 | spec: 23 | type: NodePort 24 | ports: 25 | - port: 8080 26 | targetPort: 80 27 | protocol: TCP 28 | name: http 29 | - port: 443 30 | protocol: TCP 31 | name: https 32 | selector: 33 | run: my-nginx 34 | -------------------------------------------------------------------------------- /manifests/run-my-nginx.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | ### nginx app deployment configuration 16 | apiVersion: apps/v1 17 | kind: Deployment 18 | metadata: 19 | name: my-nginx 20 | spec: 21 | selector: 22 | matchLabels: 23 | run: my-nginx 24 | replicas: 2 25 | template: 26 | metadata: 27 | labels: 28 | run: my-nginx 29 | spec: 30 | containers: 31 | - name: my-nginx 32 | image: gcr.io/pso-examples/nginx-curl:1.0.0 33 | ports: 34 | - containerPort: 80 35 | 36 | -------------------------------------------------------------------------------- /network/network.py: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Below code creates a custom network and its subnetworks based on input values 17 | from network*.yaml file. 18 | Please refer to 19 | https://github.com/GoogleCloudPlatform/deploymentmanager-samples/tree/master/examples/v2 20 | for deployment manager samples. 21 | """ 22 | 23 | 24 | def GenerateConfig(context): 25 | """ 26 | Generates the YAML resource configuration for a GCP network. 27 | The 'context' variable is to access input properties etc. 28 | """ 29 | network_name = context.env['name'] 30 | 31 | # Below input values are loaded from network*.yaml file. Change values in 32 | # network*.yaml file for customization. 33 | resources = [{ 34 | 'name': network_name, 35 | 'type': 'compute.v1.network', 36 | 'properties': { 37 | 'name': network_name, 38 | 'autoCreateSubnetworks': False, 39 | } 40 | }] 41 | 42 | for subnetwork in context.properties['subnetworks']: 43 | resources.append({ 44 | 'name': 45 | '%s-%s' % (subnetwork['name'], subnetwork['region']), 46 | 'type': 47 | 'compute.v1.subnetwork', 48 | 'properties': { 49 | 'name': 50 | '%s-%s' % (subnetwork['name'], subnetwork['region']), 51 | 'description': 52 | 'Subnetwork of %s in %s' % (network_name, 53 | subnetwork['region']), 54 | 'ipCidrRange': 55 | subnetwork['cidr'], 56 | 'region': 57 | subnetwork['region'], 58 | 'network': 59 | '$(ref.%s.selfLink)' % network_name, 60 | }, 61 | 'metadata': { 62 | 'dependsOn': [ 63 | network_name, 64 | ] 65 | } 66 | }) 67 | 68 | return {'resources': resources} 69 | -------------------------------------------------------------------------------- /network/network.py.schema: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | info: 16 | title: Network 17 | author: Google LLC 18 | description: > 19 | Creates a single network and its subnetwork 20 | 21 | required: 22 | - subnetworks 23 | 24 | properties: 25 | subnetworks: 26 | type: array 27 | description: > 28 | An array of subnetworks. 29 | item: 30 | description: > 31 | A subnetwork, defined by its region and its CIDR. 32 | type: object 33 | properties: 34 | name: 35 | type: string 36 | region: 37 | type: string 38 | cidr: 39 | type: string 40 | pattern: ^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]{1,2}$ 41 | -------------------------------------------------------------------------------- /network/network.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | ### Network1 configuration 16 | 17 | imports: 18 | - path: network.py 19 | 20 | resources: 21 | # The "name" property below will be the name of the new network 22 | - name: network1 23 | type: network.py 24 | properties: 25 | subnetworks: 26 | - name: subnet1 27 | region: us-east1 28 | cidr: 10.1.0.0/28 29 | - name: subnet2 30 | region: us-central1 31 | cidr: 10.2.0.0/28 32 | 33 | # The "name" property below will be the name of the new network 34 | - name: network2 35 | type: network.py 36 | properties: 37 | subnetworks: 38 | - name: subnet3 39 | region: us-east1 40 | cidr: 10.11.0.0/28 41 | - name: subnet4 42 | region: us-central1 43 | cidr: 10.12.0.0/28 44 | 45 | -------------------------------------------------------------------------------- /network/static-ip.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # Template for creating static ip 16 | # Refer to 17 | # https://github.com/GoogleCloudPlatform/deploymentmanager-samples/tree/master/examples/v2/vpn_auto_subnet 18 | # Input values are provided using --properties flag 19 | 20 | resources: 21 | 22 | ## STATIC IP 23 | - type: compute.v1.address 24 | name: vpn1-ip-address 25 | properties: 26 | region: us-east1 27 | 28 | - type: compute.v1.address 29 | name: vpn2-ip-address 30 | properties: 31 | region: us-central1 32 | 33 | - type: compute.v1.address 34 | name: vpn3-ip-address 35 | properties: 36 | region: us-east1 37 | 38 | - type: compute.v1.address 39 | name: vpn4-ip-address 40 | properties: 41 | region: us-central1 42 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": [ 3 | "config:base" 4 | ] 5 | } 6 | -------------------------------------------------------------------------------- /test/boilerplate/boilerplate.Dockerfile.txt: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /test/boilerplate/boilerplate.Makefile.txt: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /test/boilerplate/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2018 Google LLC 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | https://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | -------------------------------------------------------------------------------- /test/boilerplate/boilerplate.py.txt: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /test/boilerplate/boilerplate.sh.txt: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /test/boilerplate/boilerplate.tf.txt: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2018 Google LLC 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | https://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | -------------------------------------------------------------------------------- /test/boilerplate/boilerplate.xml.txt: -------------------------------------------------------------------------------- 1 | 16 | -------------------------------------------------------------------------------- /test/boilerplate/boilerplate.yaml.txt: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /test/make.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright 2018 Google LLC 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # https://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # This function checks to make sure that every 18 | # shebang has a '- e' flag, which causes it 19 | # to exit on error 20 | function check_bash() { 21 | find . -name "*.sh" | while IFS= read -d '' -r file; 22 | do 23 | if [[ "$file" != *"bash -e"* ]]; 24 | then 25 | echo "$file is missing shebang with -e"; 26 | exit 1; 27 | fi; 28 | done; 29 | } 30 | 31 | # This function makes sure that the required files for 32 | # releasing to OSS are present 33 | function basefiles() { 34 | echo "Checking for required files" 35 | test -f CONTRIBUTING.md || echo "Missing CONTRIBUTING.md" 36 | test -f LICENSE || echo "Missing LICENSE" 37 | test -f README.md || echo "Missing README.md" 38 | } 39 | 40 | # This function runs the hadolint linter on 41 | # every file named 'Dockerfile' 42 | function docker() { 43 | echo "Running hadolint on Dockerfiles" 44 | find . -name "Dockerfile" -exec hadolint {} \; 45 | } 46 | 47 | # This function runs 'terraform validate' against all 48 | # files ending in '.tf' 49 | function check_terraform() { 50 | echo "Running terraform validate" 51 | #shellcheck disable=SC2156 52 | find . -name "*.tf" -exec bash -c 'terraform validate --check-variables=false $(dirname "{}")' \; 53 | } 54 | 55 | # This function runs 'go fmt' and 'go vet' on eery file 56 | # that ends in '.go' 57 | function golang() { 58 | echo "Running go fmt and go vet" 59 | find . -name "*.go" -exec go fmt {} \; 60 | find . -name "*.go" -exec go vet {} \; 61 | } 62 | 63 | # This function runs the flake8 linter on every file 64 | # ending in '.py' 65 | function check_python() { 66 | echo "Running flake8" 67 | find . -name "*.py" -exec flake8 {} \; 68 | } 69 | 70 | # This function runs the shellcheck linter on every 71 | # file ending in '.sh' 72 | function check_shell() { 73 | echo "Running shellcheck" 74 | find . -name "*.sh" -exec shellcheck -x {} \; 75 | } 76 | 77 | # This function makes sure that there is no trailing whitespace 78 | # in any files in the project. 79 | # There are some exclusions 80 | function check_trailing_whitespace() { 81 | echo "The following lines have trailing whitespace" 82 | grep -r '[[:blank:]]$' --exclude-dir=".terraform" --exclude="*.png" --exclude-dir=".git" --exclude="*.pyc" . 83 | rc=$? 84 | if [ $rc = 0 ]; then 85 | exit 1 86 | fi 87 | } 88 | -------------------------------------------------------------------------------- /test/verify_boilerplate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2018 Google LLC 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # https://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # Verifies that all source files contain the necessary copyright boilerplate 17 | # snippet. 18 | # This is based on existing work 19 | # https://github.com/kubernetes/test-infra/blob/master/hack 20 | # /verify_boilerplate.py 21 | from __future__ import print_function 22 | import argparse 23 | import glob 24 | import os 25 | import re 26 | import sys 27 | 28 | 29 | def get_args(): 30 | """Parses command line arguments. 31 | 32 | Configures and runs argparse.ArgumentParser to extract command line 33 | arguments. 34 | 35 | Returns: 36 | An argparse.Namespace containing the arguments parsed from the 37 | command line 38 | """ 39 | parser = argparse.ArgumentParser() 40 | parser.add_argument("filenames", 41 | help="list of files to check, " 42 | "all files if unspecified", 43 | nargs='*') 44 | rootdir = os.path.dirname(__file__) + "/../" 45 | rootdir = os.path.abspath(rootdir) 46 | parser.add_argument( 47 | "--rootdir", 48 | default=rootdir, 49 | help="root directory to examine") 50 | 51 | default_boilerplate_dir = os.path.join(rootdir, "test/boilerplate") 52 | parser.add_argument("--boilerplate-dir", default=default_boilerplate_dir) 53 | return parser.parse_args() 54 | 55 | 56 | def get_refs(ARGS): 57 | """Converts the directory of boilerplate files into a map keyed by file 58 | extension. 59 | 60 | Reads each boilerplate file's contents into an array, then adds that array 61 | to a map keyed by the file extension. 62 | 63 | Returns: 64 | A map of boilerplate lines, keyed by file extension. For example, 65 | boilerplate.py.txt would result in the k,v pair {".py": py_lines} where 66 | py_lines is an array containing each line of the file. 67 | """ 68 | refs = {} 69 | 70 | # Find and iterate over the absolute path for each boilerplate template 71 | for path in glob.glob(os.path.join( 72 | ARGS.boilerplate_dir, 73 | "boilerplate.*.txt")): 74 | extension = os.path.basename(path).split(".")[1] 75 | ref_file = open(path, 'r') 76 | ref = ref_file.read().splitlines() 77 | ref_file.close() 78 | refs[extension] = ref 79 | return refs 80 | 81 | 82 | # pylint: disable=too-many-locals 83 | def has_valid_header(filename, refs, regexs): 84 | """Test whether a file has the correct boilerplate header. 85 | 86 | Tests each file against the boilerplate stored in refs for that file type 87 | (based on extension), or by the entire filename (eg Dockerfile, Makefile). 88 | Some heuristics are applied to remove build tags and shebangs, but little 89 | variance in header formatting is tolerated. 90 | 91 | Args: 92 | filename: A string containing the name of the file to test 93 | refs: A map of boilerplate headers, keyed by file extension 94 | regexs: a map of compiled regex objects used in verifying boilerplate 95 | 96 | Returns: 97 | True if the file has the correct boilerplate header, otherwise returns 98 | False. 99 | """ 100 | try: 101 | with open(filename, 'r') as fp: # pylint: disable=invalid-name 102 | data = fp.read() 103 | except IOError: 104 | return False 105 | basename = os.path.basename(filename) 106 | extension = get_file_extension(filename) 107 | if extension: 108 | ref = refs[extension] 109 | else: 110 | ref = refs[basename] 111 | # remove build tags from the top of Go files 112 | if extension == "go": 113 | con = regexs["go_build_constraints"] 114 | (data, found) = con.subn("", data, 1) 115 | # remove shebang 116 | elif extension == "sh" or extension == "py": 117 | she = regexs["shebang"] 118 | (data, found) = she.subn("", data, 1) 119 | data = data.splitlines() 120 | # if our test file is smaller than the reference it surely fails! 121 | if len(ref) > len(data): 122 | return False 123 | # trim our file to the same number of lines as the reference file 124 | data = data[:len(ref)] 125 | year = regexs["year"] 126 | for datum in data: 127 | if year.search(datum): 128 | return False 129 | 130 | # if we don't match the reference at this point, fail 131 | if ref != data: 132 | return False 133 | return True 134 | 135 | 136 | def get_file_extension(filename): 137 | """Extracts the extension part of a filename. 138 | 139 | Identifies the extension as everything after the last period in filename. 140 | 141 | Args: 142 | filename: string containing the filename 143 | 144 | Returns: 145 | A string containing the extension in lowercase 146 | """ 147 | return os.path.splitext(filename)[1].split(".")[-1].lower() 148 | 149 | 150 | # These directories will be omitted from header checks 151 | SKIPPED_DIRS = [ 152 | 'Godeps', 'third_party', '_gopath', '_output', 153 | '.git', 'vendor', '__init__.py', 'node_modules' 154 | ] 155 | 156 | 157 | def normalize_files(files): 158 | """Extracts the files that require boilerplate checking from the files 159 | argument. 160 | 161 | A new list will be built. Each path from the original files argument will 162 | be added unless it is within one of SKIPPED_DIRS. All relative paths will 163 | be converted to absolute paths by prepending the root_dir path parsed from 164 | the command line, or its default value. 165 | 166 | Args: 167 | files: a list of file path strings 168 | 169 | Returns: 170 | A modified copy of the files list where any any path in a skipped 171 | directory is removed, and all paths have been made absolute. 172 | """ 173 | newfiles = [] 174 | for pathname in files: 175 | if any(x in pathname for x in SKIPPED_DIRS): 176 | continue 177 | newfiles.append(pathname) 178 | for idx, pathname in enumerate(newfiles): 179 | if not os.path.isabs(pathname): 180 | newfiles[idx] = os.path.join(ARGS.rootdir, pathname) 181 | return newfiles 182 | 183 | 184 | def get_files(extensions, ARGS): 185 | """Generates a list of paths whose boilerplate should be verified. 186 | 187 | If a list of file names has been provided on the command line, it will be 188 | treated as the initial set to search. Otherwise, all paths within rootdir 189 | will be discovered and used as the initial set. 190 | 191 | Once the initial set of files is identified, it is normalized via 192 | normalize_files() and further stripped of any file name whose extension is 193 | not in extensions. 194 | 195 | Args: 196 | extensions: a list of file extensions indicating which file types 197 | should have their boilerplate verified 198 | 199 | Returns: 200 | A list of absolute file paths 201 | """ 202 | files = [] 203 | if ARGS.filenames: 204 | files = ARGS.filenames 205 | else: 206 | for root, dirs, walkfiles in os.walk(ARGS.rootdir): 207 | # don't visit certain dirs. This is just a performance improvement 208 | # as we would prune these later in normalize_files(). But doing it 209 | # cuts down the amount of filesystem walking we do and cuts down 210 | # the size of the file list 211 | for dpath in SKIPPED_DIRS: 212 | if dpath in dirs: 213 | dirs.remove(dpath) 214 | for name in walkfiles: 215 | pathname = os.path.join(root, name) 216 | files.append(pathname) 217 | files = normalize_files(files) 218 | outfiles = [] 219 | for pathname in files: 220 | basename = os.path.basename(pathname) 221 | extension = get_file_extension(pathname) 222 | if extension in extensions or basename in extensions: 223 | outfiles.append(pathname) 224 | return outfiles 225 | 226 | 227 | def get_regexs(): 228 | """Builds a map of regular expressions used in boilerplate validation. 229 | 230 | There are two scenarios where these regexes are used. The first is in 231 | validating the date referenced is the boilerplate, by ensuring it is an 232 | acceptable year. The second is in identifying non-boilerplate elements, 233 | like shebangs and compiler hints that should be ignored when validating 234 | headers. 235 | 236 | Returns: 237 | A map of compiled regular expression objects, keyed by mnemonic. 238 | """ 239 | regexs = {} 240 | # Search for "YEAR" which exists in the boilerplate, but shouldn't in the 241 | # real thing 242 | regexs["year"] = re.compile('YEAR') 243 | # dates can be 2014, 2015, 2016 or 2017, company holder names can be 244 | # anything 245 | regexs["date"] = re.compile('(2014|2015|2016|2017|2018)') 246 | # strip // +build \n\n build constraints 247 | regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", 248 | re.MULTILINE) 249 | # strip #!.* from shell/python scripts 250 | regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE) 251 | return regexs 252 | 253 | 254 | def main(args): 255 | """Identifies and verifies files that should have the desired boilerplate. 256 | 257 | Retrieves the lists of files to be validated and tests each one in turn. 258 | If all files contain correct boilerplate, this function terminates 259 | normally. Otherwise it prints the name of each non-conforming file and 260 | exists with a non-zero status code. 261 | """ 262 | regexs = get_regexs() 263 | refs = get_refs(args) 264 | filenames = get_files(refs.keys(), args) 265 | nonconforming_files = [] 266 | for filename in filenames: 267 | if not has_valid_header(filename, refs, regexs): 268 | nonconforming_files.append(filename) 269 | if nonconforming_files: 270 | print('%d files have incorrect boilerplate headers:' % len( 271 | nonconforming_files)) 272 | for filename in sorted(nonconforming_files): 273 | print(os.path.relpath(filename, args.rootdir)) 274 | sys.exit(1) 275 | 276 | 277 | if __name__ == "__main__": 278 | ARGS = get_args() 279 | main(ARGS) 280 | -------------------------------------------------------------------------------- /validate-pod-to-service-communication.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | # Copyright 2018 Google LLC 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # https://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | ### Executes commands to verify pod-to-service communication from test container 18 | ### of the pods in cluster1. 19 | ### cluster zones can be modified as needed. 20 | 21 | ### Obtain current active PROJECT_ID 22 | PROJECT_ID=$(gcloud config get-value project) 23 | 24 | ### Obtain Cluster Zone 25 | CLUSTER1_ZONE=$(gcloud container clusters list \ 26 | --filter="name=cluster-deployment-cluster1" --format "value(zone)") 27 | CLUSTER2_ZONE=$(gcloud container clusters list \ 28 | --filter="name=cluster-deployment-cluster2" --format "value(zone)") 29 | CLUSTER3_ZONE=$(gcloud container clusters list \ 30 | --filter="name=cluster-deployment-cluster3" --format "value(zone)") 31 | CLUSTER4_ZONE=$(gcloud container clusters list \ 32 | --filter="name=cluster-deployment-cluster4" --format "value(zone)") 33 | 34 | CLUSTER1_CONTEXT="gke_${PROJECT_ID}_${CLUSTER1_ZONE}_cluster-deployment-cluster1" 35 | CLUSTER2_CONTEXT="gke_${PROJECT_ID}_${CLUSTER2_ZONE}_cluster-deployment-cluster2" 36 | CLUSTER3_CONTEXT="gke_${PROJECT_ID}_${CLUSTER3_ZONE}_cluster-deployment-cluster3" 37 | CLUSTER4_CONTEXT="gke_${PROJECT_ID}_${CLUSTER4_ZONE}_cluster-deployment-cluster4" 38 | 39 | ### use cluster1 context 40 | kubectl config use-context "${CLUSTER1_CONTEXT}" 41 | kubectl config set-context "$(kubectl config current-context)" --namespace=default 42 | 43 | POD_NAME=$(kubectl get pods -l run=my-nginx -o jsonpath='{.items[].metadata.name}') 44 | 45 | ### Within cluster tests 46 | echo "----------------------------------------" 47 | echo "Testing: cluster1 -> cluster1 clusterIP service" 48 | SERVICE_IP=$(kubectl get services --field-selector metadata.name=my-nginx \ 49 | -o jsonpath='{.items[].spec.clusterIP}') 50 | echo "kubectl exec ${POD_NAME} -c my-nginx -- curl -s -I ${SERVICE_IP}" 51 | kubectl exec "${POD_NAME}" -c my-nginx -- curl -s -I "${SERVICE_IP}" 52 | echo "----------------------------------------" 53 | 54 | echo "----------------------------------------" 55 | echo "Testing: cluster1 -> cluster1 nodeport service" 56 | SERVICE_IP=$(kubectl get services --field-selector metadata.name=my-nginx-nodeport \ 57 | -o jsonpath='{.items[].spec.clusterIP}') 58 | echo "kubectl exec ${POD_NAME} -c my-nginx -- curl -s -I ${SERVICE_IP}:8080" 59 | kubectl exec "${POD_NAME}" -c my-nginx -- curl -s -I "${SERVICE_IP}":8080 60 | echo "----------------------------------------" 61 | 62 | ### Internal Load Balancer tests 63 | echo "----------------------------------------" 64 | echo "Testing: cluster1 -> cluster1 ILB service" 65 | SERVICE_IP=$(kubectl get services --field-selector metadata.name=my-nginx-ilb \ 66 | -o jsonpath='{.items[].status.loadBalancer.ingress[].ip}') 67 | echo "kubectl exec ${POD_NAME} -c my-nginx -- curl -s -I ${SERVICE_IP}:8080" 68 | kubectl exec "${POD_NAME}" -c my-nginx -- curl -s -I "${SERVICE_IP}":8080 69 | echo "----------------------------------------" 70 | 71 | echo "----------------------------------------" 72 | echo "Testing: cluster1 -> cluster3 ILB (same region)" 73 | SERVICE_IP=$( kubectl get services --cluster "${CLUSTER3_CONTEXT}" \ 74 | --field-selector metadata.name=my-nginx-ilb \ 75 | -o jsonpath='{.items[].status.loadBalancer.ingress[].ip}') 76 | echo "kubectl exec ${POD_NAME} -c my-nginx -- curl -s -I ${SERVICE_IP}:8080" 77 | kubectl exec "${POD_NAME}" -c my-nginx -- curl -s -I "${SERVICE_IP}":8080 78 | echo "----------------------------------------" 79 | 80 | #### Ingress tests 81 | echo "----------------------------------------" 82 | echo "Testing: cluster1 -> cluster2 ingress service" 83 | SERVICE_IP=$(kubectl get ingress --cluster "${CLUSTER2_CONTEXT}" \ 84 | --field-selector metadata.name=my-nginx-ingress \ 85 | -o jsonpath='{.items[].status.loadBalancer.ingress[].ip}') 86 | echo "kubectl exec ${POD_NAME} -c my-nginx -- curl -s -I ${SERVICE_IP}" 87 | kubectl exec "${POD_NAME}" -c my-nginx -- curl -s -I "${SERVICE_IP}" 88 | echo "----------------------------------------" 89 | 90 | echo "----------------------------------------" 91 | echo "Testing: cluster1 -> cluster4 ingress service" 92 | SERVICE_IP=$(kubectl get ingress --cluster "${CLUSTER4_CONTEXT}" \ 93 | --field-selector metadata.name=my-nginx-ingress \ 94 | -o jsonpath='{.items[].status.loadBalancer.ingress[].ip}') 95 | echo "kubectl exec ${POD_NAME} -c my-nginx -- curl -s -I ${SERVICE_IP}" 96 | kubectl exec "${POD_NAME}" -c my-nginx -- curl -s -I "${SERVICE_IP}" 97 | echo "----------------------------------------" 98 | 99 | #### Load Balancer tests 100 | echo "----------------------------------------" 101 | echo "Testing: cluster1 -> cluster1 LB service" 102 | SERVICE_IP=$(kubectl get services --field-selector metadata.name=my-nginx-lb \ 103 | -o jsonpath='{.items[].status.loadBalancer.ingress[].ip}') 104 | echo "kubectl exec ${POD_NAME} -c my-nginx -- curl -s -I ${SERVICE_IP}:8080" 105 | kubectl exec "${POD_NAME}" -c my-nginx -- curl -s -I "${SERVICE_IP}":8080 106 | echo "----------------------------------------" 107 | 108 | echo "----------------------------------------" 109 | echo "Testing: cluster1 -> cluster2 LB service (cross region)" 110 | SERVICE_IP=$( kubectl get services --cluster "${CLUSTER2_CONTEXT}" \ 111 | --field-selector metadata.name=my-nginx-lb \ 112 | -o jsonpath='{.items[].status.loadBalancer.ingress[].ip}') 113 | echo "kubectl exec ${POD_NAME} -c my-nginx -- curl -s -I ${SERVICE_IP}:8080" 114 | kubectl exec "${POD_NAME}" -c my-nginx -- curl -s -I "${SERVICE_IP}":8080 115 | echo "----------------------------------------" 116 | 117 | echo "----------------------------------------" 118 | echo "Testing: cluster1 -> cluster3 LB service" 119 | SERVICE_IP=$( kubectl get services --cluster "${CLUSTER3_CONTEXT}" \ 120 | --field-selector metadata.name=my-nginx-lb \ 121 | -o jsonpath='{.items[].status.loadBalancer.ingress[].ip}') 122 | echo "kubectl exec ${POD_NAME} -c my-nginx -- curl -s -I ${SERVICE_IP}:8080" 123 | kubectl exec "${POD_NAME}" -c my-nginx -- curl -s -I "${SERVICE_IP}":8080 124 | echo "----------------------------------------" 125 | 126 | echo "----------------------------------------" 127 | echo "Testing: cluster1 -> cluster4 LB service (cross region)" 128 | SERVICE_IP=$( kubectl get services --cluster "${CLUSTER4_CONTEXT}" \ 129 | --field-selector metadata.name=my-nginx-lb \ 130 | -o jsonpath='{.items[].status.loadBalancer.ingress[].ip}') 131 | echo "kubectl exec ${POD_NAME} -c my-nginx -- curl -s -I ${SERVICE_IP}:8080" 132 | kubectl exec "${POD_NAME}" -c my-nginx -- curl -s -I "${SERVICE_IP}":8080 133 | echo "----------------------------------------" 134 | -------------------------------------------------------------------------------- /verify-functions.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | # Copyright 2018 Google LLC 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # https://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Library of functions used by the validate the project 18 | 19 | # Check if a resource quota is met 20 | # Globals: 21 | # None 22 | # Arguments: 23 | # PROJECT 24 | # METRICS 25 | # QUOTA 26 | # Returns: 27 | # status code 28 | function meets_quota() { 29 | local PROJECT="$1" 30 | local METRIC="$2" 31 | local QUOTA="$3" 32 | local LIMIT 33 | LIMIT=$(gcloud compute project-info describe --project "$PROJECT" \ 34 | --format=json | jq --arg METRIC "$METRIC" '.quotas[] | select(.metric==$METRIC).limit') 35 | if [[ "${LIMIT}" -ge "$QUOTA" ]]; then 36 | return 0 37 | fi 38 | echo "" 39 | echo "$METRIC quota of $QUOTA is not met" 40 | echo "" 41 | return 1 42 | } 43 | 44 | # Check if a given deployment exists 45 | # Globals: 46 | # None 47 | # Arguments: 48 | # PROJECT 49 | # DEPLOY 50 | # Returns: 51 | # status code 52 | # 0: the deployment should exist, except that it does not 53 | # 1: the deployment exists 54 | # 2: no trace of the deployment left, it can be an indicator of successful clean up earlier 55 | function deployment_exists() { 56 | local PROJECT="${1}" 57 | local DEPLOY="${2}" 58 | local EXISTS 59 | EXISTS=$(gcloud deployment-manager deployments list --project "${PROJECT}" \ 60 | --filter="name=${DEPLOY} AND operation.status=DONE" --format "value(operation.error.errors)") 61 | if [[ "${EXISTS}" != "" ]]; then 62 | if [[ "${EXISTS}" != "[]" ]]; then 63 | echo "ERROR ${DEPLOY}: ${EXISTS}" 64 | return 0 65 | fi 66 | else ## EXISTS = "" the resource cannot be found at all 67 | return 2 68 | fi 69 | # default exit code, where the resource can be found and there are no operation.error.errors 70 | return 1 71 | } 72 | 73 | # Delete a deployment, including retry logic where necessary 74 | # Globals: 75 | # None 76 | # Arguments: 77 | # PROJECT 78 | # DEPLOY 79 | # RETRY - number of retrys, default to 3 80 | # Returns: 81 | # status code 82 | function deployment_deletes() { 83 | local PROJECT="${1:-}" 84 | local DEPLOY="${2:-}" 85 | local RETRY="${3:-3}" 86 | 87 | while [ "${RETRY}" -gt 0 ]; do 88 | echo "Trying to delete ${DEPLOY}" 89 | gcloud deployment-manager deployments delete "${DEPLOY}" --quiet --project "${PROJECT}" 90 | deployment_exists "${PROJECT}" "${DEPLOY}" 91 | if [[ "$?" != "2" ]]; then 92 | echo "failed deleting ${DEPLOY}. Retrying ..." 93 | sleep 20 94 | RETRY=$((RETRY-1)) 95 | else 96 | break 97 | fi 98 | done 99 | 100 | # Fail the deletion if there are still traces of deployment left after multiple attempts of deletion 101 | deployment_exists "${PROJECT}" "${DEPLOY}" 102 | if [[ "$?" != "2" ]]; then 103 | return 1 104 | fi 105 | 106 | return 0 107 | } 108 | 109 | # Check if a given network exists 110 | # Globals: 111 | # None 112 | # Arguments: 113 | # PROJECT 114 | # NETWORK 115 | # Returns: 116 | # status code 117 | function network_exists() { 118 | local PROJECT="${1}" 119 | local NETWORK="${2}" 120 | local EXISTS 121 | EXISTS=$(gcloud compute networks list --project "${PROJECT}" \ 122 | --filter="name=${NETWORK}" --format "value(name)") 123 | if [[ "${EXISTS}" != "" ]]; then 124 | echo "${NETWORK} network exists" 125 | return 0 126 | fi 127 | return 1 128 | } 129 | 130 | # Check if a given vpn exists 131 | # Globals: 132 | # None 133 | # Arguments: 134 | # PROJECT 135 | # VPN 136 | # Returns: 137 | # status code 138 | function vpn_exists() { 139 | local PROJECT="${1}" 140 | local VPN="${2}" 141 | local EXISTS 142 | EXISTS=$(gcloud compute vpn-tunnels list --project "${PROJECT}" \ 143 | --filter="name=${VPN} and status=ESTABLISHED" --format "value(name)") 144 | if [[ "${EXISTS}" != "" ]]; then 145 | echo "${VPN} vpn exists" 146 | return 0 147 | fi 148 | return 1 149 | } 150 | 151 | # Check if a given network peering exists 152 | # Globals: 153 | # None 154 | # Arguments: 155 | # PROJECT 156 | # NETWORK 157 | # Returns: 158 | # status code 159 | # 0: the network peering exists 160 | # 1: the network peering does not exist 161 | function network_peering_exists() { 162 | local PROJECT="${1}" 163 | local NETWORK="${2}" 164 | local EXISTS 165 | EXISTS=$(gcloud compute networks peerings list --project "${PROJECT}" \ 166 | --filter="name=${NETWORK}" --format "value(name)") 167 | if [[ "${EXISTS}" != "" ]]; then 168 | echo "${NETWORK} peering exists" 169 | return 0 170 | fi 171 | return 1 172 | } 173 | 174 | # Delete a network peering, including retry logic where necessary 175 | # Globals: 176 | # None 177 | # Arguments: 178 | # PROJECT 179 | # NETWORK 180 | # PEERING 181 | # RETRY - number of retrys, default to 3 182 | # Returns: 183 | # status code 184 | function network_peering_deletes() { 185 | local PROJECT="${1:-}" 186 | local NETWORK="${2:-}" 187 | local PEERING="${3:-}" 188 | local RETRY="${4:-3}" 189 | 190 | while [ "${RETRY}" -gt 0 ]; do 191 | gcloud compute networks peerings delete "${PEERING}" --network "${NETWORK}" --project "${PROJECT}" --quiet 192 | network_peering_exists "${PROJECT}" "${NETWORK}" 193 | if [[ "$?" != "1" ]]; then 194 | sleep 10 195 | RETRY=$((RETRY-1)) 196 | else 197 | break 198 | fi 199 | done 200 | 201 | # Fail the deletion if there are still traces of deployment left after multiple attempts of deletion 202 | if ! network_peering_exists "${PROJECT}" "${NETWORK}" 203 | then 204 | return 1 205 | fi 206 | 207 | return 0 208 | } 209 | 210 | 211 | # Verify cidr range 212 | # Globals: 213 | # None 214 | # Arguments: 215 | # PROJECT 216 | # SUBNET 217 | # RANGE 218 | # Returns: 219 | # status code 220 | function verify_cidr_range() { 221 | local PROJECT="${1}" 222 | local SUBNET="${2}" 223 | local CIDR="${3}" 224 | local RANGE 225 | RANGE=$(gcloud compute networks subnets list --project "${PROJECT}" \ 226 | --filter="name=${SUBNET}" --format "value(RANGE)") 227 | if [[ "${RANGE}" == "${CIDR}" ]]; then 228 | echo "Subnet ${SUBNET} has the ip range ${RANGE}" 229 | return 0 230 | fi 231 | return 1 232 | } 233 | 234 | # Check if a cluster exists 235 | # Globals: 236 | # None 237 | # Arguments: 238 | # PROJECT 239 | # CLUSTER 240 | # Returns: 241 | # status code 242 | function cluster_running() { 243 | local PROJECT="${1}" 244 | local CLUSTER="${2}" 245 | local RUNNING 246 | RUNNING=$(gcloud container clusters list --project "${PROJECT}" \ 247 | --filter="name=${CLUSTER} AND status:RUNNING" --format "value(name)") 248 | if [[ "${RUNNING}" == "${CLUSTER}" ]]; then 249 | echo "Cluster ${CLUSTER} is running" 250 | return 0 251 | fi 252 | return 1 253 | } 254 | 255 | # Check if service ip is available 256 | # Globals: 257 | # None 258 | # Arguments: 259 | # PROJECT 260 | # CLUSTER 261 | # SERVICE 262 | # RETRY_COUNT - Number of times to retry 263 | # INTERVAL - Amount of time to sleep between retries 264 | # NAMESPACE - k8s namespace the service lives in 265 | # Returns: 266 | # status code 267 | function access_service () { 268 | local PROJECT="${1}" 269 | local CLUSTER="${2}" 270 | local SERVICE="${3}" 271 | local RETRY_COUNT="15" 272 | local SLEEP="15" 273 | local NAMESPACE="default" 274 | local SERVICE_IP 275 | 276 | for ((i=0; i<"${RETRY_COUNT}"; i++)); do 277 | SERVICE_IP=$(kubectl get -n "${NAMESPACE}" --cluster "${CLUSTER}" \ 278 | service "${SERVICE}" -o jsonpath='{.status.loadBalancer.ingress[0].ip}') 279 | if [ "${SERVICE_IP}" == "" ] ; then 280 | echo "Attempt $((i + 1)): IP not yet allocated for service ${SERVICE}" >&1 281 | else 282 | echo "$SERVICE_IP has been allocated for service ${SERVICE} in ${CLUSTER}" >&1 283 | return 0 284 | fi 285 | sleep "${SLEEP}" 286 | done 287 | echo "Timed out waiting for service ${SERVICE} to be allocated an IP address." >&1 288 | return 1 289 | } 290 | 291 | # Check if service backends exist 292 | # Globals: 293 | # None 294 | # Arguments: 295 | # PROJECT 296 | # NAME 297 | # RETRY_COUNT - Number of times to retry 298 | # INTERVAL - Amount of time to sleep between retries 299 | # NAMESPACE - k8s namespace the service lives in 300 | # Returns: 301 | # status code 302 | function backends_exists () { 303 | local PROJECT="${1}" 304 | local NAME="${2}" 305 | local RETRY_COUNT="50" 306 | local SLEEP="10" 307 | local BACKEND 308 | 309 | for ((i=0; i<"${RETRY_COUNT}"; i++)); do 310 | BACKEND=$(gcloud compute backend-services list --project "$PROJECT" \ 311 | --format "value(backends.group)" | grep "${NAME}") 312 | if [ "${BACKEND}" == "" ] ; then 313 | return 0 314 | else 315 | echo "Attempt $((i + 1)): Checking if service backends are removed" >&1 316 | fi 317 | sleep "${SLEEP}" 318 | done 319 | echo "Timed out waiting for service backends to be removed." >&1 320 | return 1 321 | } 322 | --------------------------------------------------------------------------------