├── .gitignore ├── CHANGELOG.md ├── CODE-OF-CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── MAINTAINERS_GUIDE.md ├── README.md ├── aws ├── README.md ├── ops-manager-buckets.tf ├── ops-manager-dns.tf ├── ops-manager-nat.tf ├── ops-manager-outputs.tf ├── ops-manager-route-tables.tf ├── ops-manager-security-groups.tf ├── ops-manager-subnets.tf ├── ops-manager-vpc.tf ├── ops-manager.tf ├── pas-buckets.tf ├── pas-dns.tf ├── pas-iam.tf ├── pas-lbs.tf ├── pas-outputs.tf ├── pas-route-tables.tf ├── pas-security-groups.tf ├── pas-subnets.tf ├── pks-dns.tf ├── pks-iam.tf ├── pks-lbs.tf ├── pks-outputs.tf ├── pks-route-tables.tf ├── pks-security-group.tf ├── pks-subnets.tf ├── provider.tf ├── terraform.tfvars.example ├── variables.tf └── versions.tf ├── azure ├── .gitignore ├── README.md ├── ops-manager-dns.tf ├── ops-manager-networks.tf ├── ops-manager-outputs.tf ├── ops-manager-resource-groups.tf ├── ops-manager-security-groups.tf ├── ops-manager-storage.tf ├── ops-manager-subnets.tf ├── ops-manager.tf ├── pas-dns.tf ├── pas-lbs.tf ├── pas-outputs.tf ├── pas-storage.tf ├── pas-subnets.tf ├── pks-as.tf ├── pks-dns.tf ├── pks-iam.tf ├── pks-lbs.tf ├── pks-outputs.tf ├── pks-security-groups.tf ├── pks-subnets.tf ├── provider.tf ├── terraform.tfvars.example ├── variables.tf └── versions.tf ├── ci ├── configuration │ ├── auth.yml │ ├── aws │ │ ├── director.yml │ │ ├── director │ │ │ ├── pas.yml │ │ │ └── pks.yml │ │ ├── ops-manager.yml │ │ ├── pks.yml │ │ └── srt.yml │ ├── azure │ │ ├── director.yml │ │ ├── director │ │ │ ├── pas.yml │ │ │ └── pks.yml │ │ ├── ops-manager.yml │ │ ├── pks.yml │ │ └── srt.yml │ ├── env.yml │ ├── gcp │ │ ├── director.yml │ │ ├── director │ │ │ ├── pas.yml │ │ │ └── pks.yml │ │ ├── ops-manager.yml │ │ ├── pks.yml │ │ └── srt.yml │ └── nsxt │ │ ├── director.yml │ │ └── ops-manager.yml ├── pipelines │ └── pipeline.yml ├── scripts │ └── update-ci.sh └── tasks │ ├── get-ops-manager-config │ ├── task │ └── task.yml │ ├── leftovers │ ├── task │ └── task.yml │ └── write-om-files │ ├── task │ └── task.yml ├── example-extensions ├── README.md └── http2-lb-gcp │ ├── README.md │ └── terraform │ └── http2-lb_override.tf ├── gcp ├── README.md ├── ops-manager-buckets.tf ├── ops-manager-certs.tf ├── ops-manager-dns.tf ├── ops-manager-firewalls.tf ├── ops-manager-iam.tf ├── ops-manager-nat.tf ├── ops-manager-network.tf ├── ops-manager-outputs.tf ├── ops-manager-subnets.tf ├── ops-manager.tf ├── pas-buckets.tf ├── pas-dns.tf ├── pas-firewalls.tf ├── pas-lbs.tf ├── pas-outputs.tf ├── pas-subnets.tf ├── pks-dns.tf ├── pks-firewalls.tf ├── pks-iam.tf ├── pks-lbs.tf ├── pks-outputs.tf ├── provider.tf ├── terraform.tfvars.example ├── variables.tf └── versions.tf └── nsxt ├── README.md ├── data.tf ├── ops-manager-network.tf ├── ops-manager-outputs.tf ├── ops-manager.tf ├── pas-lbs.tf ├── pas-outputs.tf ├── provider.tf ├── terraform.tfvars.example ├── variables.tf └── versions.tf /.gitignore: -------------------------------------------------------------------------------- 1 | *.tfvars 2 | **/.terraform/* 3 | 4 | # .tfstate files 5 | **/*.tfstate 6 | **/*.tfstate.* 7 | 8 | **/*.tfplan 9 | 10 | **/*.orig 11 | 12 | crash.log 13 | 14 | override.tf 15 | override.tf.json 16 | *_override.tf 17 | *_override.tf.json 18 | 19 | **/*.pem 20 | **/*.pem.pub 21 | 22 | # Ignore platform automation's default output 23 | **/output.json 24 | 25 | .idea 26 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## 3.0.0 4 | 5 | ### Breaking changes 6 | 7 | - `GCP`: Added a new variable `location` on the variables which sets the location of the bucket `ops-manager`. 8 | 9 | - Updated the templates to use `terraform` version `1.0.11`. 10 | - Terraform providers updated and templates adjusted to remove deprecation warnings and errors. 11 | 12 | |Provider| version| 13 | |-|-| 14 | | GCP | 4.1.0 | 15 | | AWS | 3.65.0 | 16 | | Azure | 2.85.0 | 17 | 18 | ### Features 19 | - #70 : `GCP` Add NAT router configuration also for Ops Manager. 20 | - #64 : `Azure` fix subnet reserved ranges. 21 | - #63 : `All Providers` Inconsistent DNS A record entries for PKS API endpoints. 22 | - `All Providers` : All the version constraints for the providers were moved to the `version.tf` files. 23 | - `Azure`: Updated the `azurerm_subnet` resources to use the new `address_prefixes` property. 24 | - `Azure`: Fixed an interpolation warning on `azure/ops-manager-resource-groups.tf`. 25 | - `Azure` : Removed the property `enable_advanced_thread_protection` from the `azurerm_storage_account` resource, which was removed on the latest version of the provider. 26 | - `Azure` : Added resourcce `azurerm_advanced_thread_protection` which substitutes the use of the property `enable_advanced_thread_protection` on the `azurerm_storage_account` resources. 27 | - `Azure` : Added the property `allow_blob_public_access` with value `true` on the `azurerm_storage_account.bosh` resource, this should mimic previous behavior which was throwing errors after the provider upgrade. 28 | - `Azure` : Changed the value of the `role_definition_id` of the `azurerm_role_assignment` resources to use the `azurerm_role_definition.resource_name.resource` property instead of `azurerm_role_definition.pks-worker.id`, this fixes a problem introduced after upgrading the provider, caused by a change on the format on the property `id` for the `azurerm_role_definition`. 29 | - `Azure` : Removed the use of the propert `resource_group_name` on the `azurerm_lb_backend_address_pool` resources. 30 | - `Azure` : Updated the `azurerm_lb_probe` resources to use the property `backend_address_pool_ids` instead of the property `backend_address_pool_id`, this removes a deprecation warning. 31 | - `GCP`: Added propert `source_ranges` to firewall rules which didn't had it, this setting can be change using the `ingress_source_ranges` variable on the `terraform.tfvars` file. 32 | 33 | ### Bug Fixes 34 | - Fixed an issue introduced by [fc36573](https://github.com/pivotal/paving/fc36573) which caused the terraform to fail on certain circumstances. 35 | 36 | ## 2.1.0 37 | 38 | ### Features 39 | - [47539b2](https://github.com/pivotal/paving/47539b2) - Add an override to use http2 in gcp load balancer 40 | - [0e5e31c](https://github.com/pivotal/paving/0e5e31c) - Add note about the environment FQDN 41 | - [c8d8eb4](https://github.com/pivotal/paving/c8d8eb4) - Update example.tfvars for GCP to be more clear 42 | - [5cb3ce3](https://github.com/pivotal/paving/5cb3ce3) - Add a prerequisite to paving GCP 43 | - [fc36573](https://github.com/pivotal/paving/fc36573) - Generate list of backends dynamically in the PAS lb 44 | - [1691aea](https://github.com/pivotal/paving/1691aea) - adding api prefix to the configuration value 45 | - [ff21c1c](https://github.com/pivotal/paving/ff21c1c) - Create ssl certificate before destroying it so certs can be rotated 46 | 47 | ### Bug Fixes 48 | - [8bd00d7](https://github.com/pivotal/paving/8bd00d7) - Restrict Google Platform Provider to v3.90 patch releases, this fixes a breaking change present on `v4.00` of the `gcp` provider. 49 | 50 | ## 2.0.0 51 | 52 | ### Features 53 | * Updated to terraform v0.13.0 HCL formatting. 54 | 55 | ### Breaking Changes 56 | * When using `paving` for creating a foundation, 57 | there are instances provisioned that may not be used. 58 | For example, only deploying Ops Manager and PAS tile, 59 | but still having networking resources created for PKS. 60 | 61 | This update allows control over that. 62 | The terraform resources have been organized into namespaced files. 63 | The namespacing identifies the resources and what they are for. 64 | For example, `pas-iam.tf` creates IAM resources for PAS tile. 65 | There are corresponding namespaces for `ops-manager-*.tf` and `pks-*.tf`. 66 | 67 | If you don't require PAS, run `rm pas-*.tf`. 68 | If you don't require PKS, run `rm pks-*.tf`. 69 | 70 | OpsManager resources cannot be removed as Ops Manager is required. 71 | 72 | This change also affects the `stable_config` pattern of outputs. 73 | Because there is no way to test the existence of a resource in `terraform`, 74 | `stable_config` has to be separated by Ops Manager `stable_config_opsmanager`, 75 | PAS `stable_config_pas`, and PKS `stable_config_pks`. 76 | 77 | These changes are on all IAASes. 78 | NOTE: `nsxt` does not have paving resources for PKS, which is why prefixed files are not there. 79 | 80 | ## 1.0.0 81 | 82 | ### Features 83 | * AWS, GCP, and Azure includes resources to pave Ops Manager, PAS, and PKS. 84 | * NSX-T include includes resources to pave Ops Manager and PAS. 85 | -------------------------------------------------------------------------------- /CODE-OF-CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in Paving project and our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, religion, or sexual identity 10 | and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at oss-coc@vmware.com. 63 | All complaints will be reviewed and investigated promptly and fairly. 64 | 65 | All community leaders are obligated to respect the privacy and security of the 66 | reporter of any incident. 67 | 68 | ## Enforcement Guidelines 69 | 70 | Community leaders will follow these Community Impact Guidelines in determining 71 | the consequences for any action they deem in violation of this Code of Conduct: 72 | 73 | ### 1. Correction 74 | 75 | **Community Impact**: Use of inappropriate language or other behavior deemed 76 | unprofessional or unwelcome in the community. 77 | 78 | **Consequence**: A private, written warning from community leaders, providing 79 | clarity around the nature of the violation and an explanation of why the 80 | behavior was inappropriate. A public apology may be requested. 81 | 82 | ### 2. Warning 83 | 84 | **Community Impact**: A violation through a single incident or series 85 | of actions. 86 | 87 | **Consequence**: A warning with consequences for continued behavior. No 88 | interaction with the people involved, including unsolicited interaction with 89 | those enforcing the Code of Conduct, for a specified period of time. This 90 | includes avoiding interactions in community spaces as well as external channels 91 | like social media. Violating these terms may lead to a temporary or 92 | permanent ban. 93 | 94 | ### 3. Temporary Ban 95 | 96 | **Community Impact**: A serious violation of community standards, including 97 | sustained inappropriate behavior. 98 | 99 | **Consequence**: A temporary ban from any sort of interaction or public 100 | communication with the community for a specified period of time. No public or 101 | private interaction with the people involved, including unsolicited interaction 102 | with those enforcing the Code of Conduct, is allowed during this period. 103 | Violating these terms may lead to a permanent ban. 104 | 105 | ### 4. Permanent Ban 106 | 107 | **Community Impact**: Demonstrating a pattern of violation of community 108 | standards, including sustained inappropriate behavior, harassment of an 109 | individual, or aggression toward or disparagement of classes of individuals. 110 | 111 | **Consequence**: A permanent ban from any sort of public interaction within 112 | the community. 113 | 114 | ## Attribution 115 | 116 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 117 | version 2.0, available at 118 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 119 | 120 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 121 | enforcement ladder](https://github.com/mozilla/diversity). 122 | 123 | [homepage]: https://www.contributor-covenant.org 124 | 125 | For answers to common questions about this code of conduct, see the FAQ at 126 | https://www.contributor-covenant.org/faq. Translations are available at 127 | https://www.contributor-covenant.org/translations. -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to the docs 2 | 3 | The Paving project team welcomes contributions from the community. If you wish to contribute code 4 | and you have not signed our [contributor license agreement](https://cla.vmware.com/cla/1/preview), our bot will update the issue when 5 | you open a Pull Request. For any questions about the CLA process, please refer to our [FAQ](https://cla.vmware.com/faq). 6 | 7 | ## Start with a github issue 8 | 9 | In all cases, following this workflow will help all contributors to docs to 10 | participate more equitably: 11 | 12 | 1. Search existing github issues that may already describe the idea you have. 13 | If you find one, consider adding a comment that adds additional context 14 | about your use case, 15 | the exact problem you need solved and why, 16 | and/or your interest in helping to contribute to that effort. 17 | 2. If there is no existing issue that covers your idea, 18 | open a new issue to describe the change you would like to see in docs. 19 | Please provide as much context as you can about your use case, 20 | the exact problem you need solved and why, 21 | and the reason why you would like to see this change. 22 | If you are reporting a bug, please include steps 23 | to reproduce the issue if possible. 24 | 3. Any number of folks from the community 25 | may comment on your issue and ask additional questions. 26 | A maintainer will add the `pr welcome` label to the issue 27 | when it has been determined that the change will be welcome. 28 | Anyone from the community may step in to make that change. 29 | 4. If you intend to make the changes, comment on the issue 30 | to indicate your interest in working on it to reduce the likelihood that 31 | more than one person starts to work on it independently. 32 | 33 | ## Contributing your changes 34 | 35 | 1. When you have a set of changes to contribute back to docs, 36 | create a pull request (PR) 37 | and reference the issue that the changes in the PR are addressing. 38 | Ensure the PR is made against the correct version of the docs, 39 | which are branched by the release version on Tanzu Network, 40 | or contribute new docs directly to `develop`. 41 | 1. Your pull request will be reviewed by one or more maintainers. 42 | You may also receive feedback from others in the community. 43 | The feedback may come in the form of requests for additional changes 44 | to meet expectations for code quality or consistency. 45 | Or it could be clarifying questions 46 | to better understand the decisions you made in your implementation. 47 | 1. When a maintainer accepts your changes, 48 | they will merge your pull request. 49 | If there are outstanding requests for changes 50 | or other small changes they feel can be made to improve the changed code, 51 | they may make additional changes or merge the changes manually. 52 | It's always nice to have changes come in just as the team would like to see them, 53 | but we'll try not to hold up a pull request for a long period of time 54 | due to minor changes. 55 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /MAINTAINERS_GUIDE.md: -------------------------------------------------------------------------------- 1 | # Maintainer/Contributor Guidelines for this repository 2 | 3 | This guide is intended for maintainers. 4 | 5 | ## CI 6 | 7 | - The pipelines run on an internal deployment of Concourse. 8 | 9 | - The script to update the pipelines is called `ci/scripts/update-ci.sh` and its called using a preconfigured concourse target. 10 | 11 | ## PRs 12 | 13 | - We recommend that any feature requests / bug fixes be submitted as PRs. We recommend that when individuals 14 | submit PRs, they have read the [Decisions](README.md#decisions) section of the readme in order to determine whether or not 15 | the functionality they are requesting is generic and minimal enough to be added. If not, it is recommended 16 | that this functionality live in a separate repository. 17 | 18 | ## Cutting a release 19 | 20 | - Update the [CHANGELOG.md](CHANGELOG.md) file. 21 | - Create a new tag with the version number and pointing to the commit with the latest change introduced. 22 | - Create a release. 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Paving 2 | 3 | This repository contains Terraform templates for paving the necessary 4 | infrastructure to deploy Tanzu application Platform (PKS and TAS) to a single foundation. 5 | The templates support AWS, vSphere, Azure, and GCP. 6 | 7 | 8 | ## Requirements 9 | As of `v3.0.0` of this repository the following are the supported versions of the `Terraform CLI` and providers: 10 | 11 | - Terraform CLI `v1.0.11` or higher 12 | - Provider versions 13 | |Provider| version| 14 | |-|-| 15 | | GCP | 4.1.0 | 16 | | AWS | 3.65.0 | 17 | | Azure | 2.85.0 | 18 | 19 | ## Usage 20 | 21 | ### Configuration 22 | 23 | In each IaaS directory, there is a `terraform.tfvars.example` you can copy 24 | and modify with your configuration choices and credentials. 25 | 26 | 1. `terraform init` 27 | 1. `terraform plan -var-file terraform.tfvars` 28 | 1. `terraform apply -var-file terraform.tfvars` 29 | 1. `terraform output stable_config_output` 30 | 1. `terraform destroy -var-file terraform.tfvars` 31 | 32 | ### Removing unnecessary resources 33 | 34 | The terraform templates are namespaced for the resources that consume them. 35 | In each IAAS, the prefix `opsmanager-`, `pks-`, and `pas-` are on the file names. 36 | 37 | There are cases that some resources aren't required in a foundation. 38 | For example, just deploying PKS and not PAS. 39 | To remove PAS resources, just `rm pas-*.tf` the file from the directory. 40 | 41 | Please note that the `opsmanager-*.tf` files cannot be removed. 42 | Every foundation requires an Ops Manager. 43 | 44 | ## Decisions 45 | 46 | - These templates support deploying Tanzu Application Service (TAS) 47 | and Pivotal Container Service (PKS) to the same foundation. 48 | 49 | - The templates **do not** create an Ops Manager VM but **do** 50 | create the necessary infrastructure for the VM (security groups, keys, etc). 51 | 52 | - These templates demonstrate a modest production deployment in three (3) AZs on each IaaS. 53 | 54 | - These templates contain extremely minimal interdependence or cleverness, to facilitate incorporating these templates into your own automation easily. 55 | 56 | ## Versioning 57 | 58 | The semantics of the versioning of paving's releases are based on the contents 59 | of `terraform output stable_config_(opsmanager|pas|pks)`. `stable_config` should always represent 60 | the minimum necessary to install Pivotal Platform. Any other output may be 61 | added or removed without a change in version. However, MAJOR.MINOR.PATCH should 62 | change according to the following: 63 | - If an output is removed or a major breaking change is introduced, the MAJOR version should be incremented 64 | - If an output is added, the MINOR version should be incremented 65 | - Otherwise, the patch version should be incremented 66 | 67 | ## Customization 68 | 69 | ### Jumpbox 70 | 71 | In our current configuration, we are using the Ops Manager VM as the 72 | jumpbox. The Ops Manager VM is deployed in the public subnet with a 73 | configuration (`var.ops_manager_allowed_ips`) to restrict it by IP. If you want to use a 74 | jumpbox instead, you may deploy ops manager in the management subnet. 75 | -------------------------------------------------------------------------------- /aws/README.md: -------------------------------------------------------------------------------- 1 | # AWS 2 | 3 | Follow [these instructions](https://docs.pivotal.io/platform/ops-manager/2-8/aws/prepare-env-terraform.html) 4 | to create an IAM user that is needed to run the terraform templates. 5 | 6 | The above instructions specify manual steps for creating the IAM user. If you have the `aws` cli, 7 | you can follow these steps: 8 | 9 | ```console 10 | $ export AWS_IAM_POLICY_DOCUMENT=/tmp/policy-document.json 11 | 12 | $ echo '{ 13 | "Version": "2012-10-17", 14 | "Statement": [ 15 | { 16 | "Sid": "VisualEditor0", 17 | "Effect": "Allow", 18 | "Action": [ 19 | "ec2:*", 20 | "elasticloadbalancing:*", 21 | "iam:*", 22 | "route53:*", 23 | "s3:*" 24 | ], 25 | "Resource": "*" 26 | } 27 | ] 28 | }' > $AWS_IAM_POLICY_DOCUMENT 29 | 30 | $ export AWS_IAM_USER_NAME="REPLACE-ME" 31 | 32 | $ aws iam create-user --user-name $AWS_IAM_USER_NAME 33 | 34 | $ aws iam put-user-policy --user-name $AWS_IAM_USER_NAME \ 35 | --policy-name "policy" \ 36 | --policy-document file://$AWS_IAM_POLICY_DOCUMENT 37 | 38 | $ aws iam create-access-key --user-name $AWS_IAM_USER_NAME 39 | ``` 40 | -------------------------------------------------------------------------------- /aws/ops-manager-buckets.tf: -------------------------------------------------------------------------------- 1 | resource "random_integer" "ops_manager_bucket_suffix" { 2 | min = 1 3 | max = 100000 4 | } 5 | 6 | resource "aws_s3_bucket" "ops-manager-bucket" { 7 | bucket = "${var.environment_name}-ops-manager-bucket-${random_integer.ops_manager_bucket_suffix.result}" 8 | 9 | versioning { 10 | enabled = true 11 | } 12 | 13 | tags = merge( 14 | var.tags, 15 | { "Name" = "${var.environment_name}-ops-manager-bucket-${random_integer.ops_manager_bucket_suffix.result}" }, 16 | ) 17 | } 18 | -------------------------------------------------------------------------------- /aws/ops-manager-dns.tf: -------------------------------------------------------------------------------- 1 | data "aws_route53_zone" "hosted" { 2 | name = var.hosted_zone 3 | } 4 | 5 | resource "aws_route53_record" "ops-manager" { 6 | name = "opsmanager.${var.environment_name}.${data.aws_route53_zone.hosted.name}" 7 | 8 | zone_id = data.aws_route53_zone.hosted.zone_id 9 | type = "A" 10 | ttl = 300 11 | 12 | records = [aws_eip.ops-manager.public_ip] 13 | } 14 | -------------------------------------------------------------------------------- /aws/ops-manager-nat.tf: -------------------------------------------------------------------------------- 1 | resource "aws_internet_gateway" "gw" { 2 | vpc_id = aws_vpc.vpc.id 3 | } 4 | 5 | resource "aws_eip" "nat" { 6 | count = length(var.availability_zones) 7 | 8 | vpc = true 9 | 10 | tags = merge( 11 | var.tags, 12 | { "Name" = "${var.environment_name}-nat-eip" }, 13 | ) 14 | } 15 | 16 | resource "aws_nat_gateway" "nat" { 17 | count = length(var.availability_zones) 18 | 19 | allocation_id = element(aws_eip.nat.*.id, count.index) 20 | subnet_id = element(aws_subnet.public-subnet.*.id, count.index) 21 | 22 | tags = merge( 23 | var.tags, 24 | { "Name" = "${var.environment_name}-nat-gateway" }, 25 | ) 26 | 27 | depends_on = [aws_internet_gateway.gw] 28 | } 29 | -------------------------------------------------------------------------------- /aws/ops-manager-outputs.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | stable_config_opsmanager = { 3 | access_key = var.access_key 4 | secret_key = var.secret_key 5 | environment_name = var.environment_name 6 | availability_zones = var.availability_zones 7 | region = var.region 8 | 9 | vpc_id = aws_vpc.vpc.id 10 | 11 | public_subnet_ids = aws_subnet.public-subnet[*].id 12 | public_subnet_cidrs = aws_subnet.public-subnet[*].cidr_block 13 | 14 | management_subnet_ids = aws_subnet.management-subnet[*].id 15 | management_subnet_cidrs = aws_subnet.management-subnet[*].cidr_block 16 | management_subnet_gateways = [ 17 | for i in range(length(var.availability_zones)) : 18 | cidrhost(aws_subnet.management-subnet[i].cidr_block, 1) 19 | ] 20 | management_subnet_reserved_ip_ranges = [ 21 | for i in range(length(var.availability_zones)) : 22 | "${cidrhost(aws_subnet.management-subnet[i].cidr_block, 1)}-${cidrhost(aws_subnet.management-subnet[i].cidr_block, 9)}" 23 | ] 24 | 25 | ops_manager_subnet_id = aws_subnet.public-subnet[0].id 26 | ops_manager_public_ip = aws_eip.ops-manager.public_ip 27 | ops_manager_dns = aws_route53_record.ops-manager.name 28 | ops_manager_iam_user_access_key = aws_iam_access_key.ops-manager.id 29 | ops_manager_iam_user_secret_key = aws_iam_access_key.ops-manager.secret 30 | ops_manager_iam_instance_profile_name = aws_iam_instance_profile.ops-manager.name 31 | ops_manager_key_pair_name = aws_key_pair.ops-manager.key_name 32 | ops_manager_ssh_public_key = tls_private_key.ops-manager.public_key_openssh 33 | ops_manager_ssh_private_key = tls_private_key.ops-manager.private_key_pem 34 | ops_manager_bucket = aws_s3_bucket.ops-manager-bucket.bucket 35 | ops_manager_security_group_id = aws_security_group.ops-manager.id 36 | ops_manager_security_group_name = aws_security_group.ops-manager.name 37 | 38 | platform_vms_security_group_id = aws_security_group.platform.id 39 | platform_vms_security_group_name = aws_security_group.platform.name 40 | 41 | nat_security_group_id = aws_security_group.nat.id 42 | nat_security_group_name = aws_security_group.nat.name 43 | 44 | services_subnet_ids = aws_subnet.services-subnet[*].id 45 | services_subnet_cidrs = aws_subnet.services-subnet[*].cidr_block 46 | services_subnet_gateways = [ 47 | for i in range(length(var.availability_zones)) : 48 | cidrhost(aws_subnet.services-subnet[i].cidr_block, 1) 49 | ] 50 | services_subnet_reserved_ip_ranges = [ 51 | for i in range(length(var.availability_zones)) : 52 | "${cidrhost(aws_subnet.services-subnet[i].cidr_block, 1)}-${cidrhost(aws_subnet.services-subnet[i].cidr_block, 9)}" 53 | ] 54 | 55 | ssl_certificate = var.ssl_certificate 56 | ssl_private_key = var.ssl_private_key 57 | } 58 | } 59 | 60 | output "stable_config_opsmanager" { 61 | value = jsonencode(local.stable_config_opsmanager) 62 | sensitive = true 63 | } 64 | -------------------------------------------------------------------------------- /aws/ops-manager-route-tables.tf: -------------------------------------------------------------------------------- 1 | resource "aws_route_table" "deployment" { 2 | count = length(var.availability_zones) 3 | vpc_id = aws_vpc.vpc.id 4 | } 5 | 6 | resource "aws_route" "nat-gateway-route" { 7 | count = length(var.availability_zones) 8 | 9 | route_table_id = element(aws_route_table.deployment[*].id, count.index) 10 | nat_gateway_id = aws_nat_gateway.nat[0].id 11 | destination_cidr_block = "0.0.0.0/0" 12 | } 13 | 14 | resource "aws_route_table_association" "route-management-subnet" { 15 | count = length(var.availability_zones) 16 | subnet_id = element(aws_subnet.management-subnet[*].id, count.index) 17 | route_table_id = element(aws_route_table.deployment[*].id, count.index) 18 | } 19 | 20 | resource "aws_route_table_association" "route-services-subnet" { 21 | count = length(var.availability_zones) 22 | subnet_id = element(aws_subnet.services-subnet[*].id, count.index) 23 | route_table_id = element(aws_route_table.deployment[*].id, count.index) 24 | } 25 | 26 | resource "aws_route_table" "public-route-table" { 27 | vpc_id = aws_vpc.vpc.id 28 | 29 | route { 30 | cidr_block = "0.0.0.0/0" 31 | gateway_id = aws_internet_gateway.gw.id 32 | } 33 | } 34 | 35 | resource "aws_route_table_association" "route-public-subnet" { 36 | count = length(var.availability_zones) 37 | subnet_id = element(aws_subnet.public-subnet[*].id, count.index) 38 | route_table_id = aws_route_table.public-route-table.id 39 | } 40 | -------------------------------------------------------------------------------- /aws/ops-manager-security-groups.tf: -------------------------------------------------------------------------------- 1 | resource "aws_security_group" "nat" { 2 | name = "${var.environment_name}-nat-sg" 3 | vpc_id = aws_vpc.vpc.id 4 | 5 | ingress { 6 | cidr_blocks = [aws_vpc.vpc.cidr_block] 7 | protocol = "-1" 8 | from_port = 0 9 | to_port = 0 10 | } 11 | 12 | egress { 13 | cidr_blocks = ["0.0.0.0/0"] 14 | protocol = "-1" 15 | from_port = 0 16 | to_port = 0 17 | } 18 | 19 | tags = merge( 20 | var.tags, 21 | { "Name" = "${var.environment_name}-nat-sg" }, 22 | ) 23 | } 24 | 25 | resource "aws_security_group" "ops-manager" { 26 | name = "${var.environment_name}-ops-manager-sg" 27 | vpc_id = aws_vpc.vpc.id 28 | 29 | ingress { 30 | cidr_blocks = var.ops_manager_allowed_ips 31 | protocol = "tcp" 32 | from_port = 22 33 | to_port = 22 34 | } 35 | 36 | ingress { 37 | cidr_blocks = var.ops_manager_allowed_ips 38 | protocol = "tcp" 39 | from_port = 80 40 | to_port = 80 41 | } 42 | 43 | ingress { 44 | cidr_blocks = var.ops_manager_allowed_ips 45 | protocol = "tcp" 46 | from_port = 443 47 | to_port = 443 48 | } 49 | 50 | egress { 51 | cidr_blocks = ["0.0.0.0/0"] 52 | protocol = "-1" 53 | from_port = 0 54 | to_port = 0 55 | } 56 | 57 | tags = merge( 58 | var.tags, 59 | { "Name" = "${var.environment_name}-ops-manager-sg" }, 60 | ) 61 | } 62 | 63 | resource "aws_security_group" "platform" { 64 | name = "${var.environment_name}-platform-vms-sg" 65 | vpc_id = aws_vpc.vpc.id 66 | 67 | ingress { 68 | cidr_blocks = [aws_vpc.vpc.cidr_block] 69 | protocol = "-1" 70 | from_port = 0 71 | to_port = 0 72 | } 73 | 74 | egress { 75 | cidr_blocks = ["0.0.0.0/0"] 76 | protocol = "-1" 77 | from_port = 0 78 | to_port = 0 79 | } 80 | 81 | tags = merge( 82 | var.tags, 83 | { "Name" = "${var.environment_name}-platform-vms-sg" }, 84 | ) 85 | } 86 | -------------------------------------------------------------------------------- /aws/ops-manager-subnets.tf: -------------------------------------------------------------------------------- 1 | resource "aws_subnet" "public-subnet" { 2 | count = length(var.public_subnet_cidrs) 3 | 4 | vpc_id = aws_vpc.vpc.id 5 | cidr_block = element(var.public_subnet_cidrs, count.index) 6 | availability_zone = element(var.availability_zones, count.index) 7 | 8 | tags = merge( 9 | var.tags, 10 | { Name = "${var.environment_name}-public-subnet-${count.index}" }, 11 | { "kubernetes.io/role/elb" = "1"} 12 | ) 13 | } 14 | 15 | resource "aws_subnet" "management-subnet" { 16 | count = length(var.management_subnet_cidrs) 17 | 18 | vpc_id = aws_vpc.vpc.id 19 | cidr_block = element(var.management_subnet_cidrs, count.index) 20 | availability_zone = element(var.availability_zones, count.index) 21 | 22 | tags = merge( 23 | var.tags, 24 | { Name = "${var.environment_name}-management-subnet-${count.index}" } 25 | ) 26 | } 27 | 28 | resource "aws_subnet" "services-subnet" { 29 | count = length(var.services_subnet_cidrs) 30 | 31 | vpc_id = aws_vpc.vpc.id 32 | cidr_block = element(var.services_subnet_cidrs, count.index) 33 | availability_zone = element(var.availability_zones, count.index) 34 | 35 | tags = merge( 36 | var.tags, 37 | { Name = "${var.environment_name}-services-subnet-${count.index}" }, 38 | ) 39 | } 40 | -------------------------------------------------------------------------------- /aws/ops-manager-vpc.tf: -------------------------------------------------------------------------------- 1 | resource "aws_vpc" "vpc" { 2 | cidr_block = "10.0.0.0/16" 3 | instance_tenancy = "default" 4 | enable_dns_hostnames = true 5 | 6 | tags = merge( 7 | var.tags, 8 | { Name = "${var.environment_name}-vpc" }, 9 | ) 10 | } 11 | -------------------------------------------------------------------------------- /aws/ops-manager.tf: -------------------------------------------------------------------------------- 1 | resource "aws_eip" "ops-manager" { 2 | vpc = true 3 | 4 | tags = merge( 5 | var.tags, 6 | { "Name" = "${var.environment_name}-ops-manager-eip" }, 7 | ) 8 | } 9 | 10 | resource "aws_key_pair" "ops-manager" { 11 | key_name = "${var.environment_name}-ops-manager-key" 12 | public_key = tls_private_key.ops-manager.public_key_openssh 13 | } 14 | 15 | resource "tls_private_key" "ops-manager" { 16 | algorithm = "RSA" 17 | rsa_bits = "4096" 18 | } 19 | 20 | resource "aws_iam_access_key" "ops-manager" { 21 | user = aws_iam_user.ops-manager.name 22 | } 23 | 24 | resource "aws_iam_policy" "ops-manager-role" { 25 | name = "${var.environment_name}-ops-manager-role" 26 | policy = data.aws_iam_policy_document.ops-manager.json 27 | } 28 | 29 | resource "aws_iam_role_policy_attachment" "ops-manager-policy" { 30 | role = aws_iam_role.ops-manager.name 31 | policy_arn = aws_iam_policy.ops-manager-role.arn 32 | } 33 | 34 | resource "aws_iam_role" "ops-manager" { 35 | name = "${var.environment_name}-ops-manager-role" 36 | 37 | lifecycle { 38 | create_before_destroy = true 39 | } 40 | 41 | assume_role_policy = </dev/null 2>&1 && pwd )" 6 | 7 | echo "$WORKING_DIR" 8 | 9 | which ytt || ( 10 | echo "This requires ytt to be installed" 11 | exit 1 12 | ) 13 | which fly || ( 14 | echo "This requires fly to be installed" 15 | exit 1 16 | ) 17 | 18 | echo "Setting CI pipeline..." 19 | 20 | fly -t platform-automation sp -p paving-ci -c "${WORKING_DIR}/../pipelines/pipeline.yml" \ 21 | --check-creds 22 | -------------------------------------------------------------------------------- /ci/tasks/get-ops-manager-config/task: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | 3 | main() { 4 | om -v 5 | 6 | metadata="${PWD}/env-state/metadata" 7 | vars_file="${PWD}/env-state/vars-file" 8 | cat "${metadata}" | yq -r .stable_config | jq -r . > "${vars_file}" 9 | 10 | config="${PWD}/paving/ci/configuration/${IAAS}/ops-manager.yml" 11 | updated_config="${PWD}/config/ops-manager.yml" 12 | 13 | if [ "${IAAS}" == "gcp" ]; then 14 | key="$(mktemp)" 15 | echo "${GCP_SERVICE_ACCOUNT_KEY}" > "${key}" 16 | 17 | bosh interpolate "${config}" \ 18 | --var-file gcp_service_account_key="${key}" \ 19 | --vars-file "${vars_file}" \ 20 | > "${updated_config}" 21 | 22 | else 23 | bosh interpolate "${config}" --vars-file "${vars_file}" > "${updated_config}" 24 | fi 25 | } 26 | 27 | main "$@" 28 | -------------------------------------------------------------------------------- /ci/tasks/get-ops-manager-config/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: cfinfrastructure/releng-tools 8 | 9 | inputs: 10 | - name: env-state 11 | - name: paving 12 | 13 | outputs: 14 | - name: env-state 15 | - name: config 16 | 17 | run: 18 | path: paving/ci/tasks/get-ops-manager-config/task 19 | 20 | params: 21 | IAAS: 22 | 23 | GCP_SERVICE_ACCOUNT_KEY: 24 | -------------------------------------------------------------------------------- /ci/tasks/leftovers/task: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | 3 | main() { 4 | if [ "$IAAS" == "" ]; then 5 | echo "IAAS is required" 6 | exit 1 7 | fi 8 | 9 | metadata="${PWD}/env-state/metadata" 10 | vars_file="${PWD}/env-state/vars-file" 11 | cat "${metadata}" | yq -r .stable_config | jq -r . > "${vars_file}" 12 | environment=$(cat ${vars_file} | jq -r .environment_name) 13 | 14 | if [ "${environment}" == "" ]; then 15 | echo "Environment name is required as filter." 16 | exit 1 17 | fi 18 | 19 | if [ "$IAAS" == "gcp" ]; then 20 | file=$(mktemp) 21 | echo "$(cat ${vars_file} | jq -r .service_account_key)" > $file 22 | export BBL_GCP_SERVICE_ACCOUNT_KEY="$file" 23 | elif [ "$IAAS" == "azure" ]; then 24 | export BBL_AZURE_CLIENT_ID="$(cat ${vars_file} | jq -r .client_id)" 25 | export BBL_AZURE_CLIENT_SECRET="$(cat ${vars_file} | jq -r .client_secret)" 26 | export BBL_AZURE_TENANT_ID="$(cat ${vars_file} | jq -r .tenant_id)" 27 | export BBL_AZURE_SUBSCRIPTION_ID="$(cat ${vars_file} | jq -r .subscription_id)" 28 | elif [ "$IAAS" == "aws" ]; then 29 | export BBL_AWS_ACCESS_KEY_ID="$(cat ${vars_file} | jq -r .access_key)" 30 | export BBL_AWS_SECRET_ACCESS_KEY="$(cat ${vars_file} | jq -r .secret_key)" 31 | export BBL_AWS_REGION="$(cat ${vars_file} | jq -r .region)" 32 | fi 33 | 34 | set -x 35 | 36 | leftovers --no-confirm --filter ${environment} --iaas ${IAAS} 37 | } 38 | 39 | main "$@" 40 | -------------------------------------------------------------------------------- /ci/tasks/leftovers/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pivotalcfreleng/infrastructure 8 | 9 | inputs: 10 | - name: env-state 11 | - name: paving-ci 12 | 13 | params: 14 | # aws | gcp | azure 15 | IAAS: 16 | 17 | run: 18 | path: paving-ci/ci/tasks/leftovers/task 19 | -------------------------------------------------------------------------------- /ci/tasks/write-om-files/task: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | 3 | main() { 4 | om -v 5 | 6 | metadata="${PWD}/env-state/metadata" 7 | vars_file="${PWD}/env-state/vars-file" 8 | cat "${metadata}" | yq -r .stable_config | jq -r . > "${vars_file}" 9 | ops_manager_dns="$(cat "${vars_file}" | jq -r .ops_manager_dns)" 10 | 11 | updated_env_yml="${PWD}/env/env.yml" 12 | updated_auth_config_file="${PWD}/config/auth.yml" 13 | updated_director_config_path="${PWD}/config/director.yml" 14 | updated_srt_config_file="${PWD}/config/srt.yml" 15 | updated_pks_config_file="${PWD}/config/pks.yml" 16 | 17 | bosh interpolate "${ENV_YML}" \ 18 | --var om_username="${OM_USERNAME}" \ 19 | --var om_password="${OM_PASSWORD}" \ 20 | --var ops_manager_dns="${om_target}" \ 21 | > "${updated_env_yml}" 22 | 23 | bosh interpolate "${AUTH_CONFIG_FILE}" \ 24 | --var om_username="${OM_USERNAME}" \ 25 | --var om_password="${OM_PASSWORD}" \ 26 | --var om_decryption_passphrase="passphrase" \ 27 | > "${updated_auth_config_file}" 28 | 29 | bosh interpolate "${DIRECTOR_CONFIG_PATH}" \ 30 | --vars-file "${vars_file}" \ 31 | > "${updated_director_config_path}" 32 | 33 | bosh interpolate "${SRT_CONFIG_FILE}" \ 34 | --vars-file "${vars_file}" \ 35 | > "${updated_srt_config_file}" 36 | 37 | bosh interpolate "${PKS_CONFIG_FILE}" \ 38 | --vars-file "${vars_file}" \ 39 | > "${updated_pks_config_file}" 40 | } 41 | 42 | main "$@" 43 | -------------------------------------------------------------------------------- /ci/tasks/write-om-files/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: cfinfrastructure/releng-tools 8 | 9 | inputs: 10 | - name: env-state 11 | - name: paving-ci 12 | - name: config 13 | 14 | outputs: 15 | - name: env 16 | - name: config 17 | 18 | params: 19 | OM_USERNAME: 20 | OM_PASSWORD: 21 | 22 | AUTH_CONFIG_FILE: 23 | ENV_YML: 24 | DIRECTOR_CONFIG_PATH: 25 | SRT_CONFIG_FILE: 26 | PKS_CONFIG_FILE: 27 | 28 | run: 29 | path: paving-ci/ci/tasks/write-om-files/task 30 | -------------------------------------------------------------------------------- /example-extensions/README.md: -------------------------------------------------------------------------------- 1 | ### Example Extensions 2 | 3 | This folder contain terraform override files that you can use to 4 | change/add settings to the base terraform templates. 5 | 6 | -------------------------------------------------------------------------------- /example-extensions/http2-lb-gcp/README.md: -------------------------------------------------------------------------------- 1 | ## HTTP2 load balancer for GCP 2 | 3 | This override will enable the HTTP2 protocol for the backend services used in the GCP load 4 | balancer. **DO NOT USE** Unless you've enabled the HTTP/2 feature.. 5 | 6 | We see that some clients (e.g. `curl`) will automatically upgrade to an HTTP2 connection. 7 | **Be aware** The gorouter explicitly blocks any HTTP2 request. Outbound HTTP2 requests from the load balancer will fail when forwarding to gorouter. 8 | 9 | Here is an issue to track the HTTP2 feature, and to see when gorouter will be ready for HTTP2: https://github.com/cloudfoundry/routing-release/issues/200 10 | 11 | We're anticipating this feature to be available in TAS version 2.12. 12 | 13 | To use, please copy all the contents in the `http2-lb-gcp/terraform` directory to your current terraform templates. 14 | -------------------------------------------------------------------------------- /example-extensions/http2-lb-gcp/terraform/http2-lb_override.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_backend_service" "http-lb" { 2 | protocol = "HTTP2" 3 | } 4 | -------------------------------------------------------------------------------- /gcp/README.md: -------------------------------------------------------------------------------- 1 | # GCP 2 | 3 | Follow [these instructions](https://docs.pivotal.io/platform/ops-manager/2-8/gcp/prepare-env-terraform.html) 4 | to create the service account that is needed to run the terraform templates. 5 | 6 | 7 | ### Prerequisites 8 | - a pre-created zone in GCP (see the [google docs](https://cloud.google.com/dns/docs/zones#creating_managed_zones) for details) 9 | - the `Zone name` maps to `hosted_zone` in `terraform.tfvars` 10 | - The Fully Qualified Domain Name (FQDN) for the environment will be `environment_name.hosted-zone` 11 | 12 | ### Roles & Permissions 13 | 14 | If you are looking to create a service account with more restrictive permissions, 15 | you can follow these instructions. 16 | 17 | The roles required: 18 | - Compute Instance Admin (v1) - `compute.instanceAdmin` 19 | - Compute Network Admin - `compute.networkAdmin` 20 | - Compute Security Admin - `compute.securityAdmin` 21 | - DNS Administrator - `dns.admin` 22 | - Security Admin - `iam.securityAdmin` 23 | - Service Account Admin - `iam.serviceAccountAdmin` 24 | - Service Account Key Admin - `iam.serviceAccountKeyAdmin` 25 | - Storage Admin - `storage.admin` 26 | - Service Account User - `iam.serviceAccountUser` 27 | 28 | For each role, you can run the following command: 29 | 30 | ```console 31 | gcloud projects add-iam-policy-binding $PROJECT_ID --member serviceAccount:$SERVICE_ACCOUNT_EMAIL --role "roles/$ROLE" 32 | ``` 33 | 34 | To understand the mapping between permissions and roles, you can see [this document](https://cloud.google.com/iam/docs/understanding-roles). 35 | -------------------------------------------------------------------------------- /gcp/ops-manager-buckets.tf: -------------------------------------------------------------------------------- 1 | resource "random_integer" "bucket_suffix" { 2 | min = 1 3 | max = 100000 4 | } 5 | 6 | resource "google_storage_bucket" "ops-manager" { 7 | name = "${var.project}-${var.environment_name}-ops-manager-${random_integer.bucket_suffix.result}" 8 | force_destroy = true 9 | location = var.location 10 | } 11 | -------------------------------------------------------------------------------- /gcp/ops-manager-certs.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_ssl_certificate" "certificate" { 2 | name_prefix = "${var.environment_name}-cert" 3 | certificate = var.ssl_certificate 4 | private_key = var.ssl_private_key 5 | 6 | lifecycle { 7 | create_before_destroy = true 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /gcp/ops-manager-dns.tf: -------------------------------------------------------------------------------- 1 | data "google_dns_managed_zone" "hosted-zone" { 2 | name = var.hosted_zone 3 | } 4 | 5 | resource "google_dns_record_set" "ops-manager" { 6 | name = "opsmanager.${var.environment_name}.${data.google_dns_managed_zone.hosted-zone.dns_name}" 7 | type = "A" 8 | ttl = 300 9 | 10 | managed_zone = var.hosted_zone 11 | 12 | rrdatas = [google_compute_address.ops-manager.address] 13 | } 14 | -------------------------------------------------------------------------------- /gcp/ops-manager-firewalls.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_firewall" "ops-manager" { 2 | name = "${var.environment_name}-ops-manager" 3 | network = google_compute_network.network.name 4 | 5 | direction = "INGRESS" 6 | 7 | allow { 8 | protocol = "icmp" 9 | } 10 | 11 | allow { 12 | protocol = "tcp" 13 | ports = ["22", "80", "443"] 14 | } 15 | 16 | source_ranges = var.ingress_source_ranges 17 | 18 | target_tags = ["${var.environment_name}-ops-manager"] 19 | } 20 | 21 | resource "google_compute_firewall" "internal" { 22 | name = "${var.environment_name}-internal" 23 | network = google_compute_network.network.self_link 24 | 25 | allow { 26 | protocol = "icmp" 27 | } 28 | 29 | allow { 30 | protocol = "tcp" 31 | } 32 | 33 | allow { 34 | protocol = "udp" 35 | } 36 | 37 | source_ranges = [ 38 | local.management_subnet_cidr, 39 | local.pas_subnet_cidr, 40 | local.services_subnet_cidr, 41 | local.pks_subnet_cidr, 42 | ] 43 | } 44 | -------------------------------------------------------------------------------- /gcp/ops-manager-iam.tf: -------------------------------------------------------------------------------- 1 | resource "google_service_account" "ops-manager" { 2 | account_id = "${var.environment_name}-ops-manager" 3 | display_name = "${var.environment_name} Ops Manager VM Service Account" 4 | } 5 | 6 | resource "google_service_account_key" "ops-manager" { 7 | service_account_id = google_service_account.ops-manager.id 8 | } 9 | 10 | resource "google_project_iam_member" "iam-service-account-user" { 11 | project = var.project 12 | role = "roles/iam.serviceAccountUser" 13 | member = "serviceAccount:${google_service_account.ops-manager.email}" 14 | } 15 | 16 | resource "google_project_iam_member" "iam-service-account-token-creator" { 17 | project = var.project 18 | role = "roles/iam.serviceAccountTokenCreator" 19 | member = "serviceAccount:${google_service_account.ops-manager.email}" 20 | } 21 | 22 | resource "google_project_iam_member" "compute-instance-admin-v1" { 23 | project = var.project 24 | role = "roles/compute.instanceAdmin.v1" 25 | member = "serviceAccount:${google_service_account.ops-manager.email}" 26 | } 27 | 28 | resource "google_project_iam_member" "compute-network-admin" { 29 | project = var.project 30 | role = "roles/compute.networkAdmin" 31 | member = "serviceAccount:${google_service_account.ops-manager.email}" 32 | } 33 | 34 | resource "google_project_iam_member" "compute-storage-admin" { 35 | project = var.project 36 | role = "roles/compute.storageAdmin" 37 | member = "serviceAccount:${google_service_account.ops-manager.email}" 38 | } 39 | 40 | resource "google_project_iam_member" "storage-admin" { 41 | project = var.project 42 | role = "roles/storage.admin" 43 | member = "serviceAccount:${google_service_account.ops-manager.email}" 44 | } 45 | -------------------------------------------------------------------------------- /gcp/ops-manager-nat.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_address" "nat-address" { 2 | name = "${var.environment_name}-cloud-nat" 3 | region = var.region 4 | } 5 | 6 | resource "google_compute_router" "nat-router" { 7 | name = "${var.environment_name}-nat-router" 8 | region = var.region 9 | network = google_compute_network.network.self_link 10 | 11 | bgp { 12 | asn = 64514 13 | } 14 | } 15 | 16 | resource "google_compute_router_nat" "nat" { 17 | name = "${var.environment_name}-cloud-nat" 18 | router = google_compute_router.nat-router.name 19 | region = var.region 20 | nat_ip_allocate_option = "MANUAL_ONLY" 21 | nat_ips = [google_compute_address.nat-address.self_link] 22 | source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" 23 | 24 | subnetwork { 25 | name = google_compute_subnetwork.pks.self_link 26 | source_ip_ranges_to_nat = ["ALL_IP_RANGES"] 27 | } 28 | 29 | subnetwork { 30 | name = google_compute_subnetwork.services.self_link 31 | source_ip_ranges_to_nat = ["ALL_IP_RANGES"] 32 | } 33 | 34 | subnetwork { 35 | name = google_compute_subnetwork.management.self_link 36 | source_ip_ranges_to_nat = ["ALL_IP_RANGES"] 37 | } 38 | } 39 | 40 | -------------------------------------------------------------------------------- /gcp/ops-manager-network.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_network" "network" { 2 | name = "${var.environment_name}-network" 3 | auto_create_subnetworks = false 4 | } 5 | -------------------------------------------------------------------------------- /gcp/ops-manager-outputs.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | stable_config_opsmanager = { 3 | environment_name = var.environment_name 4 | service_account_key = var.service_account_key 5 | project = var.project 6 | region = var.region 7 | availability_zones = var.availability_zones 8 | 9 | network_name = google_compute_network.network.name 10 | 11 | hosted_zone_name_servers = data.google_dns_managed_zone.hosted-zone.name_servers 12 | 13 | management_subnet_name = google_compute_subnetwork.management.name 14 | management_subnet_cidr = google_compute_subnetwork.management.ip_cidr_range 15 | management_subnet_gateway = google_compute_subnetwork.management.gateway_address 16 | management_subnet_reserved_ip_ranges = "${cidrhost(google_compute_subnetwork.management.ip_cidr_range, 1)}-${cidrhost(google_compute_subnetwork.management.ip_cidr_range, 9)}" 17 | 18 | ops_manager_bucket = google_storage_bucket.ops-manager.name 19 | ops_manager_dns = replace(google_dns_record_set.ops-manager.name, "/\\.$/", "") 20 | ops_manager_public_ip = google_compute_address.ops-manager.address 21 | ops_manager_service_account_key = base64decode(google_service_account_key.ops-manager.private_key) 22 | ops_manager_ssh_public_key = tls_private_key.ops-manager.public_key_openssh 23 | ops_manager_ssh_private_key = tls_private_key.ops-manager.private_key_pem 24 | ops_manager_tags = "${var.environment_name}-ops-manager" 25 | 26 | platform_vms_tag = "${var.environment_name}-vms" 27 | 28 | services_subnet_name = google_compute_subnetwork.services.name 29 | services_subnet_cidr = google_compute_subnetwork.services.ip_cidr_range 30 | services_subnet_gateway = google_compute_subnetwork.services.gateway_address 31 | services_subnet_reserved_ip_ranges = "${cidrhost(google_compute_subnetwork.services.ip_cidr_range, 1)}-${cidrhost(google_compute_subnetwork.services.ip_cidr_range, 9)}" 32 | 33 | ssl_certificate = var.ssl_certificate 34 | ssl_private_key = var.ssl_private_key 35 | } 36 | } 37 | 38 | output "stable_config_opsmanager" { 39 | value = jsonencode(local.stable_config_opsmanager) 40 | sensitive = true 41 | } 42 | -------------------------------------------------------------------------------- /gcp/ops-manager-subnets.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | management_subnet_cidr = "10.0.0.0/26" 3 | pas_subnet_cidr = "10.0.4.0/24" 4 | services_subnet_cidr = "10.0.8.0/24" 5 | pks_subnet_cidr = "10.0.10.0/24" 6 | } 7 | 8 | resource "google_compute_subnetwork" "management" { 9 | name = "${var.environment_name}-management-subnet" 10 | ip_cidr_range = local.management_subnet_cidr 11 | network = google_compute_network.network.self_link 12 | region = var.region 13 | } 14 | 15 | resource "google_compute_subnetwork" "services" { 16 | name = "${var.environment_name}-services-subnet" 17 | ip_cidr_range = local.services_subnet_cidr 18 | network = google_compute_network.network.self_link 19 | region = var.region 20 | } 21 | 22 | //NOTE: here because it is a requirement on the NAT 23 | // and terraform has no way of extending the NAT resource 24 | resource "google_compute_subnetwork" "pks" { 25 | name = "${var.environment_name}-pks-subnet" 26 | ip_cidr_range = local.pks_subnet_cidr 27 | network = google_compute_network.network.self_link 28 | region = var.region 29 | } 30 | -------------------------------------------------------------------------------- /gcp/ops-manager.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_address" "ops-manager" { 2 | name = "${var.environment_name}-ops-manager-ip" 3 | } 4 | 5 | resource "tls_private_key" "ops-manager" { 6 | algorithm = "RSA" 7 | rsa_bits = "4096" 8 | } 9 | -------------------------------------------------------------------------------- /gcp/pas-buckets.tf: -------------------------------------------------------------------------------- 1 | resource "google_storage_bucket" "buildpacks" { 2 | name = "${var.project}-${var.environment_name}-buildpacks-${random_integer.bucket_suffix.result}" 3 | force_destroy = true 4 | location = var.location 5 | } 6 | 7 | resource "google_storage_bucket" "droplets" { 8 | name = "${var.project}-${var.environment_name}-droplets-${random_integer.bucket_suffix.result}" 9 | force_destroy = true 10 | location = var.location 11 | } 12 | 13 | resource "google_storage_bucket" "packages" { 14 | name = "${var.project}-${var.environment_name}-packages-${random_integer.bucket_suffix.result}" 15 | force_destroy = true 16 | location = var.location 17 | } 18 | 19 | resource "google_storage_bucket" "resources" { 20 | name = "${var.project}-${var.environment_name}-resources-${random_integer.bucket_suffix.result}" 21 | force_destroy = true 22 | location = var.location 23 | } 24 | 25 | resource "google_storage_bucket" "backup" { 26 | name = "${var.project}-${var.environment_name}-backup-${random_integer.bucket_suffix.result}" 27 | force_destroy = true 28 | location = var.location 29 | } 30 | -------------------------------------------------------------------------------- /gcp/pas-dns.tf: -------------------------------------------------------------------------------- 1 | resource "google_dns_record_set" "wildcard-sys" { 2 | name = "*.sys.${var.environment_name}.${data.google_dns_managed_zone.hosted-zone.dns_name}" 3 | type = "A" 4 | ttl = 300 5 | 6 | managed_zone = var.hosted_zone 7 | 8 | rrdatas = [google_compute_global_address.http-lb.address] 9 | } 10 | 11 | resource "google_dns_record_set" "wildcard-apps" { 12 | name = "*.apps.${var.environment_name}.${data.google_dns_managed_zone.hosted-zone.dns_name}" 13 | type = "A" 14 | ttl = 300 15 | 16 | managed_zone = var.hosted_zone 17 | 18 | rrdatas = [google_compute_global_address.http-lb.address] 19 | } 20 | 21 | resource "google_dns_record_set" "wildcard-websocket" { 22 | name = "*.ws.${var.environment_name}.${data.google_dns_managed_zone.hosted-zone.dns_name}" 23 | type = "A" 24 | ttl = 300 25 | 26 | managed_zone = var.hosted_zone 27 | 28 | rrdatas = [google_compute_address.websocket-lb.address] 29 | } 30 | 31 | resource "google_dns_record_set" "doppler-sys" { 32 | name = "doppler.sys.${var.environment_name}.${data.google_dns_managed_zone.hosted-zone.dns_name}" 33 | type = "A" 34 | ttl = 300 35 | 36 | managed_zone = var.hosted_zone 37 | 38 | rrdatas = [google_compute_address.websocket-lb.address] 39 | } 40 | 41 | resource "google_dns_record_set" "loggregator-sys" { 42 | name = "loggregator.sys.${var.environment_name}.${data.google_dns_managed_zone.hosted-zone.dns_name}" 43 | type = "A" 44 | ttl = 300 45 | 46 | managed_zone = var.hosted_zone 47 | 48 | rrdatas = [google_compute_address.websocket-lb.address] 49 | } 50 | 51 | resource "google_dns_record_set" "ssh" { 52 | name = "ssh.sys.${var.environment_name}.${data.google_dns_managed_zone.hosted-zone.dns_name}" 53 | type = "A" 54 | ttl = 300 55 | 56 | managed_zone = var.hosted_zone 57 | 58 | rrdatas = [google_compute_address.ssh-lb.address] 59 | } 60 | 61 | resource "google_dns_record_set" "tcp" { 62 | name = "tcp.${var.environment_name}.${data.google_dns_managed_zone.hosted-zone.dns_name}" 63 | type = "A" 64 | ttl = 300 65 | 66 | managed_zone = var.hosted_zone 67 | 68 | rrdatas = [google_compute_address.tcp-lb.address] 69 | } 70 | -------------------------------------------------------------------------------- /gcp/pas-firewalls.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_firewall" "tcp-lb-health-check" { 2 | name = "${var.environment_name}-tcp-lb-health-check" 3 | network = google_compute_network.network.name 4 | 5 | direction = "INGRESS" 6 | 7 | allow { 8 | protocol = "tcp" 9 | ports = ["80"] 10 | } 11 | 12 | source_ranges = ["130.211.0.0/22", "35.191.0.0/16"] 13 | 14 | target_tags = ["${var.environment_name}-tcp-lb"] 15 | } 16 | 17 | resource "google_compute_firewall" "tcp-lb" { 18 | name = "${var.environment_name}-tcp-lb-firewall" 19 | network = google_compute_network.network.name 20 | 21 | direction = "INGRESS" 22 | 23 | allow { 24 | protocol = "tcp" 25 | ports = ["1024-65535"] 26 | } 27 | 28 | source_ranges = var.ingress_source_ranges 29 | 30 | target_tags = ["${var.environment_name}-tcp-lb"] 31 | } 32 | 33 | resource "google_compute_firewall" "websocket-lb-health-check" { 34 | name = "${var.environment_name}-websocket-lb-health-check" 35 | network = google_compute_network.network.name 36 | 37 | direction = "INGRESS" 38 | 39 | allow { 40 | protocol = "tcp" 41 | ports = ["8080"] 42 | } 43 | 44 | source_ranges = ["130.211.0.0/22", "35.191.0.0/16"] 45 | 46 | target_tags = [google_compute_http_health_check.websocket-lb.name] 47 | } 48 | 49 | resource "google_compute_firewall" "websocket-lb" { 50 | name = "${var.environment_name}-websocket-lb-firewall" 51 | network = google_compute_network.network.name 52 | 53 | direction = "INGRESS" 54 | 55 | allow { 56 | protocol = "tcp" 57 | ports = ["80", "443"] 58 | } 59 | 60 | source_ranges = var.ingress_source_ranges 61 | 62 | target_tags = ["${var.environment_name}-websocket-lb"] 63 | } 64 | 65 | resource "google_compute_firewall" "http-lb" { 66 | name = "${var.environment_name}-http-lb-firewall" 67 | network = google_compute_network.network.self_link 68 | 69 | direction = "INGRESS" 70 | 71 | allow { 72 | protocol = "tcp" 73 | ports = ["80", "443"] 74 | } 75 | 76 | source_ranges = var.ingress_source_ranges 77 | 78 | target_tags = ["${var.environment_name}-http-lb"] 79 | } 80 | -------------------------------------------------------------------------------- /gcp/pas-lbs.tf: -------------------------------------------------------------------------------- 1 | # HTTP/S 2 | resource "google_compute_backend_service" "http-lb" { 3 | name = "${var.environment_name}-http-lb" 4 | port_name = "http" 5 | protocol = "HTTP" 6 | timeout_sec = 900 7 | enable_cdn = false 8 | 9 | dynamic "backend" { 10 | for_each = { for group in google_compute_instance_group.http-lb.* : group.self_link => group } 11 | iterator = instance_group 12 | content { 13 | group = instance_group.value.self_link 14 | } 15 | } 16 | 17 | health_checks = [google_compute_http_health_check.http-lb.self_link] 18 | } 19 | 20 | resource "google_compute_instance_group" "http-lb" { 21 | name = "${var.environment_name}-http-lb-${count.index}" 22 | zone = element(var.availability_zones, count.index) 23 | 24 | count = length(var.availability_zones) 25 | } 26 | 27 | resource "google_compute_global_address" "http-lb" { 28 | name = "${var.environment_name}-http-lb" 29 | } 30 | 31 | resource "google_compute_url_map" "https-lb" { 32 | name = "${var.environment_name}-https-lb" 33 | 34 | default_service = google_compute_backend_service.http-lb.self_link 35 | } 36 | 37 | resource "google_compute_target_http_proxy" "http-lb" { 38 | name = "${var.environment_name}-http-lb" 39 | url_map = google_compute_url_map.https-lb.self_link 40 | } 41 | 42 | resource "google_compute_target_https_proxy" "https-lb" { 43 | name = "${var.environment_name}-https-lb" 44 | url_map = google_compute_url_map.https-lb.self_link 45 | ssl_certificates = [google_compute_ssl_certificate.certificate.self_link] 46 | } 47 | 48 | resource "google_compute_global_forwarding_rule" "http-lb-80" { 49 | name = "${var.environment_name}-http-lb" 50 | ip_address = google_compute_global_address.http-lb.address 51 | target = google_compute_target_http_proxy.http-lb.self_link 52 | port_range = "80" 53 | } 54 | 55 | resource "google_compute_global_forwarding_rule" "https-lb-443" { 56 | name = "${var.environment_name}-https-lb" 57 | ip_address = google_compute_global_address.http-lb.address 58 | target = google_compute_target_https_proxy.https-lb.self_link 59 | port_range = "443" 60 | } 61 | 62 | resource "google_compute_http_health_check" "http-lb" { 63 | name = "${var.environment_name}-http-lb-health-check" 64 | port = 8080 65 | request_path = "/health" 66 | check_interval_sec = 5 67 | timeout_sec = 3 68 | healthy_threshold = 6 69 | unhealthy_threshold = 3 70 | } 71 | 72 | # SSH 73 | resource "google_compute_address" "ssh-lb" { 74 | name = "${var.environment_name}-ssh-lb" 75 | } 76 | 77 | resource "google_compute_forwarding_rule" "ssh-lb-2222" { 78 | name = "${var.environment_name}-ssh-lb" 79 | ip_address = google_compute_address.ssh-lb.address 80 | target = google_compute_target_pool.ssh-lb.self_link 81 | port_range = "2222" 82 | ip_protocol = "TCP" 83 | } 84 | 85 | resource "google_compute_target_pool" "ssh-lb" { 86 | name = "${var.environment_name}-ssh-lb" 87 | } 88 | 89 | # TCP 90 | locals { 91 | tcp_ports = ["1024-1123"] 92 | } 93 | 94 | resource "google_compute_address" "tcp-lb" { 95 | name = "${var.environment_name}-tcp-lb" 96 | } 97 | 98 | resource "google_compute_forwarding_rule" "tcp-lb" { 99 | name = "${var.environment_name}-tcp-lb-${count.index}" 100 | ip_address = google_compute_address.tcp-lb.address 101 | target = google_compute_target_pool.tcp-lb.self_link 102 | port_range = element(local.tcp_ports, count.index) 103 | ip_protocol = "TCP" 104 | 105 | count = length(local.tcp_ports) 106 | } 107 | 108 | resource "google_compute_target_pool" "tcp-lb" { 109 | name = "${var.environment_name}-tcp-lb" 110 | 111 | health_checks = [google_compute_http_health_check.tcp-lb.name] 112 | } 113 | 114 | resource "google_compute_http_health_check" "tcp-lb" { 115 | name = "${var.environment_name}-tcp-lb-health-check" 116 | port = 80 117 | request_path = "/health" 118 | check_interval_sec = 30 119 | timeout_sec = 5 120 | healthy_threshold = 10 121 | unhealthy_threshold = 2 122 | } 123 | 124 | # Web 125 | resource "google_compute_address" "websocket-lb" { 126 | name = "${var.environment_name}-websocket-lb" 127 | } 128 | 129 | resource "google_compute_forwarding_rule" "websocket-lb-80" { 130 | name = "${var.environment_name}-websocket-lb-80" 131 | ip_address = google_compute_address.websocket-lb.address 132 | target = google_compute_target_pool.websocket-lb.self_link 133 | port_range = "80" 134 | ip_protocol = "TCP" 135 | } 136 | 137 | resource "google_compute_forwarding_rule" "websocket-lb-443" { 138 | name = "${var.environment_name}-websocket-lb-443" 139 | ip_address = google_compute_address.websocket-lb.address 140 | target = google_compute_target_pool.websocket-lb.self_link 141 | port_range = "443" 142 | ip_protocol = "TCP" 143 | } 144 | 145 | resource "google_compute_target_pool" "websocket-lb" { 146 | name = "${var.environment_name}-websocket-lb" 147 | 148 | health_checks = [google_compute_http_health_check.websocket-lb.self_link] 149 | } 150 | 151 | resource "google_compute_http_health_check" "websocket-lb" { 152 | name = "${var.environment_name}-websocket-lb" 153 | port = 8080 154 | request_path = "/health" 155 | check_interval_sec = 5 156 | timeout_sec = 3 157 | healthy_threshold = 6 158 | unhealthy_threshold = 3 159 | } 160 | -------------------------------------------------------------------------------- /gcp/pas-outputs.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | stable_config_pas = { 3 | pas_subnet_name = google_compute_subnetwork.pas.name 4 | pas_subnet_cidr = google_compute_subnetwork.pas.ip_cidr_range 5 | pas_subnet_gateway = google_compute_subnetwork.pas.gateway_address 6 | pas_subnet_reserved_ip_ranges = "${cidrhost(google_compute_subnetwork.pas.ip_cidr_range, 1)}-${cidrhost(google_compute_subnetwork.pas.ip_cidr_range, 9)}" 7 | 8 | buildpacks_bucket_name = google_storage_bucket.buildpacks.name 9 | droplets_bucket_name = google_storage_bucket.droplets.name 10 | packages_bucket_name = google_storage_bucket.packages.name 11 | resources_bucket_name = google_storage_bucket.resources.name 12 | backup_bucket_name = google_storage_bucket.backup.name 13 | 14 | http_backend_service_name = google_compute_backend_service.http-lb.name 15 | ssh_target_pool_name = google_compute_target_pool.ssh-lb.name 16 | tcp_target_pool_name = google_compute_target_pool.tcp-lb.name 17 | web_target_pool_name = google_compute_target_pool.websocket-lb.name 18 | 19 | sys_dns_domain = replace(replace(google_dns_record_set.wildcard-sys.name, "/\\.$/", ""), "*.", "") 20 | apps_dns_domain = replace(replace(google_dns_record_set.wildcard-apps.name, "/\\.$/", ""), "*.", "") 21 | doppler_dns = replace(google_dns_record_set.doppler-sys.name, "/\\.$/", "") 22 | loggregator_dns = replace(google_dns_record_set.loggregator-sys.name, "/\\.$/", "") 23 | ssh_dns = replace(google_dns_record_set.ssh.name, "/\\.$/", "") 24 | tcp_dns = replace(google_dns_record_set.tcp.name, "/\\.$/", "") 25 | } 26 | } 27 | 28 | output "stable_config_pas" { 29 | value = jsonencode(local.stable_config_pas) 30 | sensitive = true 31 | } 32 | -------------------------------------------------------------------------------- /gcp/pas-subnets.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_subnetwork" "pas" { 2 | name = "${var.environment_name}-pas-subnet" 3 | ip_cidr_range = local.pas_subnet_cidr 4 | network = google_compute_network.network.self_link 5 | region = var.region 6 | } 7 | -------------------------------------------------------------------------------- /gcp/pks-dns.tf: -------------------------------------------------------------------------------- 1 | resource "google_dns_record_set" "pks-api" { 2 | name = "*.pks.${var.environment_name}.${data.google_dns_managed_zone.hosted-zone.dns_name}" 3 | type = "A" 4 | ttl = 300 5 | 6 | managed_zone = var.hosted_zone 7 | 8 | rrdatas = [google_compute_address.pks-api-lb.address] 9 | } 10 | -------------------------------------------------------------------------------- /gcp/pks-firewalls.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_firewall" "pks-api-lb" { 2 | name = "${var.environment_name}-pks-api-lb-firewall" 3 | network = google_compute_network.network.name 4 | 5 | direction = "INGRESS" 6 | 7 | allow { 8 | protocol = "tcp" 9 | ports = ["8443", "9021"] 10 | } 11 | 12 | source_ranges = var.ingress_source_ranges 13 | 14 | target_tags = ["${var.environment_name}-pks-api-lb"] 15 | } 16 | -------------------------------------------------------------------------------- /gcp/pks-iam.tf: -------------------------------------------------------------------------------- 1 | resource "google_service_account" "pks-master-node-service-account" { 2 | account_id = "${var.environment_name}-pks-master" 3 | display_name = "${var.environment_name} PKS Service Account" 4 | } 5 | 6 | resource "google_service_account_key" "pks-master-node-service-account-key" { 7 | service_account_id = google_service_account.pks-master-node-service-account.id 8 | } 9 | 10 | resource "google_service_account" "pks-worker-node-service-account" { 11 | account_id = "${var.environment_name}-pks-worker" 12 | display_name = "${var.environment_name} PKS Service Account" 13 | } 14 | 15 | resource "google_service_account_key" "pks-worker-node-service-account-key" { 16 | service_account_id = google_service_account.pks-worker-node-service-account.id 17 | } 18 | 19 | resource "google_project_iam_member" "pks-master-node-compute-instance-admin-v1" { 20 | project = var.project 21 | role = "roles/compute.instanceAdmin.v1" 22 | member = "serviceAccount:${google_service_account.pks-master-node-service-account.email}" 23 | } 24 | 25 | resource "google_project_iam_member" "pks-master-node-compute-network-admin" { 26 | project = var.project 27 | role = "roles/compute.networkAdmin" 28 | member = "serviceAccount:${google_service_account.pks-master-node-service-account.email}" 29 | } 30 | 31 | resource "google_project_iam_member" "pks-master-node-compute-storage-admin" { 32 | project = var.project 33 | role = "roles/compute.storageAdmin" 34 | member = "serviceAccount:${google_service_account.pks-master-node-service-account.email}" 35 | } 36 | 37 | resource "google_project_iam_member" "pks-master-node-compute-security-admin" { 38 | project = var.project 39 | role = "roles/compute.securityAdmin" 40 | member = "serviceAccount:${google_service_account.pks-master-node-service-account.email}" 41 | } 42 | 43 | resource "google_project_iam_member" "pks-master-node-iam-service-account-user" { 44 | project = var.project 45 | role = "roles/iam.serviceAccountUser" 46 | member = "serviceAccount:${google_service_account.pks-master-node-service-account.email}" 47 | } 48 | 49 | resource "google_project_iam_member" "pks-master-node-compute-viewer" { 50 | project = var.project 51 | role = "roles/compute.viewer" 52 | member = "serviceAccount:${google_service_account.pks-master-node-service-account.email}" 53 | } 54 | 55 | resource "google_project_iam_member" "pks-worker-node-compute-viewer" { 56 | project = var.project 57 | role = "roles/compute.viewer" 58 | member = "serviceAccount:${google_service_account.pks-worker-node-service-account.email}" 59 | } 60 | 61 | resource "google_project_iam_member" "pks-worker-node-storage-object-viewer" { 62 | project = var.project 63 | role = "roles/storage.objectViewer" 64 | member = "serviceAccount:${google_service_account.pks-worker-node-service-account.email}" 65 | } 66 | 67 | -------------------------------------------------------------------------------- /gcp/pks-lbs.tf: -------------------------------------------------------------------------------- 1 | # PKS API 2 | resource "google_compute_address" "pks-api-lb" { 3 | name = "${var.environment_name}-pks-api-lb" 4 | } 5 | 6 | resource "google_compute_forwarding_rule" "pks-api-lb-9021" { 7 | name = "${var.environment_name}-pks-api-lb-9021" 8 | ip_address = google_compute_address.pks-api-lb.address 9 | target = google_compute_target_pool.pks-api-lb.self_link 10 | port_range = "9021" 11 | ip_protocol = "TCP" 12 | } 13 | 14 | resource "google_compute_forwarding_rule" "pks-api-lb-8443" { 15 | name = "${var.environment_name}-pks-api-lb-8443" 16 | ip_address = google_compute_address.pks-api-lb.address 17 | target = google_compute_target_pool.pks-api-lb.self_link 18 | port_range = "8443" 19 | ip_protocol = "TCP" 20 | } 21 | 22 | resource "google_compute_target_pool" "pks-api-lb" { 23 | name = "${var.environment_name}-pks-api-lb" 24 | } 25 | -------------------------------------------------------------------------------- /gcp/pks-outputs.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | stable_config_pks = { 3 | pks_subnet_name = google_compute_subnetwork.pks.name 4 | pks_subnet_cidr = google_compute_subnetwork.pks.ip_cidr_range 5 | pks_subnet_gateway = google_compute_subnetwork.pks.gateway_address 6 | pks_subnet_reserved_ip_ranges = "${cidrhost(google_compute_subnetwork.pks.ip_cidr_range, 1)}-${cidrhost(google_compute_subnetwork.pks.ip_cidr_range, 9)}" 7 | pks_master_node_service_account_id = google_service_account.pks-master-node-service-account.email 8 | pks_worker_node_service_account_id = google_service_account.pks-worker-node-service-account.email 9 | pks_api_target_pool_name = google_compute_target_pool.pks-api-lb.name 10 | pks_api_dns_domain = replace(replace(google_dns_record_set.pks-api.name, "/\\.$/", ""), "*.", "") 11 | } 12 | } 13 | 14 | output "stable_config_pks" { 15 | value = jsonencode(local.stable_config_pks) 16 | sensitive = true 17 | } 18 | -------------------------------------------------------------------------------- /gcp/provider.tf: -------------------------------------------------------------------------------- 1 | provider "google" { 2 | project = var.project 3 | region = var.region 4 | credentials = var.service_account_key 5 | } 6 | -------------------------------------------------------------------------------- /gcp/terraform.tfvars.example: -------------------------------------------------------------------------------- 1 | environment_name = "YOUR-ENVIRONMENT-NAME" 2 | 3 | project = "YOUR-GCP-PROJECT" 4 | 5 | service_account_key = < 8 | } 9 | SERVICE_ACCOUNT_KEY 10 | 11 | region = "YOUR-GCP-REGION" 12 | availability_zones = ["YOUR-GCP-ZONE-1", "YOUR-GCP-ZONE-2", "YOUR-GCP-ZONE-3"] 13 | location = "US" 14 | ingress_source_ranges = ["0.0.0.0/0"] 15 | hosted_zone = "foo" 16 | 17 | ssl_certificate = < 20 | -----END CERTIFICATE----- 21 | SSL_CERTIFICATE 22 | 23 | ssl_private_key = < 26 | -----END RSA PRIVATE KEY----- 27 | SSL_PRIVATE_KEY 28 | 29 | -------------------------------------------------------------------------------- /gcp/variables.tf: -------------------------------------------------------------------------------- 1 | variable "project" { 2 | type = string 3 | } 4 | 5 | variable "environment_name" { 6 | type = string 7 | } 8 | 9 | variable "region" { 10 | type = string 11 | } 12 | 13 | variable "service_account_key" { 14 | type = string 15 | } 16 | 17 | variable "hosted_zone" { 18 | description = "Hosted zone name (e.g. foo is the zone name and foo.example.com is the DNS name)." 19 | } 20 | 21 | variable "availability_zones" { 22 | description = "Requires exactly THREE availability zones that must belong to the provided region." 23 | type = list 24 | } 25 | 26 | variable "ssl_certificate" { 27 | description = "The contents of an SSL certificate to be used by the LB." 28 | } 29 | 30 | variable "ssl_private_key" { 31 | description = "The contents of an SSL private key to be used by the LB." 32 | } 33 | 34 | variable "location" { 35 | default = "US" 36 | description = "The location to store the bucket data" 37 | } 38 | 39 | variable "ingress_source_ranges" { 40 | default = ["0.0.0.0/0"] 41 | type = list(string) 42 | description = "IP Source ranges for ingress firewall rule" 43 | } -------------------------------------------------------------------------------- /gcp/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | google = { 4 | source = "hashicorp/google" 5 | version = "~> 4.1" 6 | } 7 | random = { 8 | source = "hashicorp/random" 9 | } 10 | tls = { 11 | source = "hashicorp/tls" 12 | } 13 | } 14 | required_version = "~> 1.0" 15 | } 16 | -------------------------------------------------------------------------------- /nsxt/README.md: -------------------------------------------------------------------------------- 1 | ## Pre-requisites: 2 | 3 | * NSX-T Manager deployed 4 | * NSX-T Edge Cluster deployed 5 | * An East-West Transport Zone created (e.g. `overlay-tz`) 6 | * Tier-0 Router created and attached to North-South transport zone, 7 | most importantly, you should be able to ping the `external_ip_pool_gateway` 8 | from your workstation 9 | * A DNS entry which resolves to your Operation Manager's IP address (e.g. `om.example.com` resolves to `10.195.74.16`) 10 | 11 | 12 | ## After paving the infrastructure 13 | 14 | If you log in to your vCenter console, you should be able to find the opaque 15 | networks `PAS-Infrastructure` and `PAS-Deployment` under the Navigator's "Networking" tab. 16 | 17 | 1. Follow [the pcf docs](https://docs.pivotal.io/pivotalcf/2-4/customizing/deploying-vm.html) to 18 | upload an opsman OVA. When it prompts you for a network, Select the opaque network 19 | `PAS-Infrastructure`. When prompted for an 20 | IP address, use `192.168.1.10`. When you reach the step to select "Power on after 21 | deployment," click yes and hit finish. 22 | 23 | 1. Follow along with the PCF documentation to [deploy a bosh director](https://docs.pivotal.io/pivotalcf/2-4/customizing/vsphere-config.html), 24 | but with the following changes: 25 | 26 | 1. Under vCenter Config, enable `NSX networking → NSX-T Mode`, and supply your 27 | `nsxt_host`, `nsxt_username`, and `nsxt_password` from `terraform.tfvars`. 28 | 29 | 1. When prompted to enter networks, use the values: 30 | 31 | | | Infrastructure | Deployment | 32 | |---|---|---| 33 | | vSphere Network Name | `PAS-Infrastructure` | `PAS-Deployment` | 34 | | CIDR | `192.168.1.0/24` | `192.168.2.0/24` | 35 | | Gateway | `192.168.1.1` | `192.168.2.1` | 36 | | Reserved IP Ranges | `192.168.1.1-192.168.1.10` | `192.168.2.1` | 37 | 38 | 1. Follow along with the PCF documentation to [deploy PAS](https://docs.pivotal.io/pivotalcf/2-4/customizing/config-er-vmware.html) 39 | -------------------------------------------------------------------------------- /nsxt/data.tf: -------------------------------------------------------------------------------- 1 | data "nsxt_edge_cluster" "edge_cluster" { 2 | display_name = var.nsxt_edge_cluster_name 3 | } 4 | 5 | data "nsxt_transport_zone" "east-west-overlay" { 6 | display_name = var.east_west_transport_zone_name 7 | } 8 | 9 | data "nsxt_logical_tier0_router" "t0_router" { 10 | display_name = var.nsxt_t0_router_name 11 | } -------------------------------------------------------------------------------- /nsxt/ops-manager-network.tf: -------------------------------------------------------------------------------- 1 | resource "nsxt_logical_router_link_port_on_tier0" "t0_to_t1_infrastructure" { 2 | display_name = "${var.environment_name}-T0-to-T1-Infrastructure" 3 | 4 | description = "Link Port on Logical Tier 0 Router for connecting to Tier 1 Infrastructure Router." 5 | logical_router_id = data.nsxt_logical_tier0_router.t0_router.id 6 | 7 | tag { 8 | scope = "terraform" 9 | tag = var.environment_name 10 | } 11 | } 12 | 13 | resource "nsxt_logical_tier1_router" "t1_infrastructure" { 14 | display_name = "${var.environment_name}-T1-Router-PAS-Infrastructure" 15 | 16 | description = "Infrastructure Tier 1 Router." 17 | failover_mode = "PREEMPTIVE" 18 | edge_cluster_id = data.nsxt_edge_cluster.edge_cluster.id 19 | 20 | enable_router_advertisement = true 21 | advertise_connected_routes = true 22 | advertise_lb_vip_routes = true 23 | advertise_lb_snat_ip_routes = true 24 | 25 | tag { 26 | scope = "terraform" 27 | tag = var.environment_name 28 | } 29 | } 30 | 31 | resource "nsxt_logical_router_link_port_on_tier0" "t0_to_t1_deployment" { 32 | display_name = "${var.environment_name}-T0-to-T1-Deployment" 33 | 34 | description = "Link Port on Logical Tier 0 Router for connecting to Tier 1 Deployment Router." 35 | logical_router_id = data.nsxt_logical_tier0_router.t0_router.id 36 | 37 | tag { 38 | scope = "terraform" 39 | tag = var.environment_name 40 | } 41 | } 42 | 43 | resource "nsxt_logical_router_link_port_on_tier1" "t1_infrastructure_to_t0" { 44 | display_name = "${var.environment_name}-T1-Infrastructure-to-T0" 45 | 46 | description = "Link Port on Infrastructure Tier 1 Router connecting to Logical Tier 0 Router. Provisioned by Terraform." 47 | logical_router_id = nsxt_logical_tier1_router.t1_infrastructure.id 48 | linked_logical_router_port_id = nsxt_logical_router_link_port_on_tier0.t0_to_t1_infrastructure.id 49 | 50 | tag { 51 | scope = "terraform" 52 | tag = var.environment_name 53 | } 54 | } 55 | 56 | resource "nsxt_logical_switch" "infrastructure_ls" { 57 | display_name = "${var.environment_name}-PAS-Infrastructure" 58 | 59 | transport_zone_id = data.nsxt_transport_zone.east-west-overlay.id 60 | admin_state = "UP" 61 | 62 | description = "Logical Switch for the T1 Infrastructure Router." 63 | replication_mode = "MTEP" 64 | 65 | tag { 66 | scope = "terraform" 67 | tag = var.environment_name 68 | } 69 | } 70 | 71 | resource "nsxt_logical_port" "infrastructure_lp" { 72 | display_name = "${var.environment_name}-PAS-Infrastructure-lp" 73 | 74 | admin_state = "UP" 75 | description = "Logical Port on the Logical Switch for the T1 Infrastructure Router." 76 | logical_switch_id = nsxt_logical_switch.infrastructure_ls.id 77 | 78 | tag { 79 | scope = "terraform" 80 | tag = var.environment_name 81 | } 82 | } 83 | 84 | resource "nsxt_logical_router_downlink_port" "infrastructure_dp" { 85 | display_name = "${var.environment_name}-PAS-Infrastructure-dp" 86 | 87 | description = "Downlink port connecting PAS-Infrastructure router to its Logical Switch" 88 | logical_router_id = nsxt_logical_tier1_router.t1_infrastructure.id 89 | linked_logical_switch_port_id = nsxt_logical_port.infrastructure_lp.id 90 | ip_address = "${var.subnet_prefix}.1.1/24" 91 | 92 | tag { 93 | scope = "terraform" 94 | tag = var.environment_name 95 | } 96 | } 97 | 98 | resource "nsxt_logical_tier1_router" "t1_deployment" { 99 | display_name = "${var.environment_name}-T1-Router-PAS-Deployment" 100 | 101 | description = "Deployment Tier 1 Router." 102 | failover_mode = "NON_PREEMPTIVE" 103 | edge_cluster_id = data.nsxt_edge_cluster.edge_cluster.id 104 | 105 | enable_router_advertisement = true 106 | advertise_connected_routes = true 107 | advertise_lb_vip_routes = true 108 | advertise_lb_snat_ip_routes = true 109 | 110 | tag { 111 | scope = "terraform" 112 | tag = var.environment_name 113 | } 114 | } 115 | 116 | resource "nsxt_logical_router_link_port_on_tier1" "t1_deployment_to_t0" { 117 | display_name = "${var.environment_name}-T1-Deployment-to-T0" 118 | 119 | description = "Link Port on Deployment Tier 1 Router connecting to Logical Tier 0 Router. Provisioned by Terraform." 120 | logical_router_id = nsxt_logical_tier1_router.t1_deployment.id 121 | linked_logical_router_port_id = nsxt_logical_router_link_port_on_tier0.t0_to_t1_deployment.id 122 | 123 | tag { 124 | scope = "terraform" 125 | tag = var.environment_name 126 | } 127 | } 128 | 129 | resource "nsxt_logical_switch" "deployment_ls" { 130 | display_name = "${var.environment_name}-PAS-Deployment" 131 | 132 | transport_zone_id = data.nsxt_transport_zone.east-west-overlay.id 133 | admin_state = "UP" 134 | 135 | description = "Logical Switch for the T1 Deployment Router." 136 | replication_mode = "MTEP" 137 | 138 | tag { 139 | scope = "terraform" 140 | tag = var.environment_name 141 | } 142 | } 143 | 144 | resource "nsxt_logical_port" "deployment_lp" { 145 | display_name = "${var.environment_name}-PAS-Deployment-lp" 146 | 147 | admin_state = "UP" 148 | description = "Logical Port on the Logical Switch for the T1 Deployment Router." 149 | logical_switch_id = nsxt_logical_switch.deployment_ls.id 150 | 151 | tag { 152 | scope = "terraform" 153 | tag = var.environment_name 154 | } 155 | } 156 | 157 | resource "nsxt_logical_router_downlink_port" "deployment_dp" { 158 | display_name = "${var.environment_name}-PAS-Deployment-dp" 159 | 160 | description = "Downlink port connecting PAS-Deployment router to its Logical Switch" 161 | logical_router_id = nsxt_logical_tier1_router.t1_deployment.id 162 | linked_logical_switch_port_id = nsxt_logical_port.deployment_lp.id 163 | ip_address = "${var.subnet_prefix}.2.1/24" 164 | 165 | tag { 166 | scope = "terraform" 167 | tag = var.environment_name 168 | } 169 | } 170 | 171 | resource "nsxt_nat_rule" "snat_vm" { 172 | display_name = "${var.environment_name}-snat-vm" 173 | action = "SNAT" 174 | 175 | logical_router_id = data.nsxt_logical_tier0_router.t0_router.id 176 | description = "SNAT Rule for all VMs in deployment with exception of sockets coming in through LBs" 177 | enabled = true 178 | logging = false 179 | nat_pass = true 180 | 181 | match_source_network = "${var.subnet_prefix}.0.0/16" 182 | translated_network = var.nat_gateway_ip 183 | 184 | tag { 185 | scope = "terraform" 186 | tag = var.environment_name 187 | } 188 | } 189 | 190 | resource "nsxt_nat_rule" "snat_om" { 191 | display_name = "${var.environment_name}-snat-om" 192 | action = "SNAT" 193 | 194 | logical_router_id = data.nsxt_logical_tier0_router.t0_router.id 195 | description = "SNAT Rule for Operations Manager" 196 | enabled = true 197 | logging = false 198 | nat_pass = true 199 | 200 | match_source_network = "${var.subnet_prefix}.1.10" 201 | translated_network = var.ops_manager_public_ip 202 | 203 | tag { 204 | scope = "terraform" 205 | tag = var.environment_name 206 | } 207 | } 208 | 209 | resource "nsxt_nat_rule" "dnat_om" { 210 | display_name = "${var.environment_name}-dnat-om" 211 | action = "DNAT" 212 | 213 | logical_router_id = data.nsxt_logical_tier0_router.t0_router.id 214 | description = "DNAT Rule for Operations Manager" 215 | enabled = true 216 | logging = false 217 | nat_pass = true 218 | 219 | match_destination_network = var.ops_manager_public_ip 220 | translated_network = "${var.subnet_prefix}.1.10" 221 | 222 | tag { 223 | scope = "terraform" 224 | tag = var.environment_name 225 | } 226 | } 227 | 228 | resource "nsxt_ip_pool" "external_ip_pool" { 229 | description = "IP Pool that provides IPs for each of the NSX-T container networks." 230 | display_name = "${var.environment_name}-external-ip-pool" 231 | 232 | subnet { 233 | allocation_ranges = var.external_ip_pool_ranges 234 | cidr = var.external_ip_pool_cidr 235 | gateway_ip = var.external_ip_pool_gateway 236 | } 237 | 238 | tag { 239 | scope = "terraform" 240 | tag = var.environment_name 241 | } 242 | } 243 | 244 | resource "nsxt_ip_block" "container_ip_block" { 245 | description = "Subnets are allocated from this pool to each newly-created Org" 246 | display_name = "${var.environment_name}-pas-container-ip-block" 247 | cidr = "10.12.0.0/14" 248 | } 249 | -------------------------------------------------------------------------------- /nsxt/ops-manager-outputs.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | stable_config_opsmanager = { 3 | environment_name = var.environment_name 4 | 5 | nsxt_host = var.nsxt_host 6 | nsxt_username = var.nsxt_username 7 | nsxt_password = var.nsxt_password 8 | nsxt_ca_cert = var.nsxt_ca_cert 9 | 10 | vcenter_datacenter = var.vcenter_datacenter 11 | vcenter_datastore = var.vcenter_datastore 12 | vcenter_host = var.vcenter_host 13 | vcenter_username = var.vcenter_username 14 | vcenter_password = var.vcenter_password 15 | vcenter_resource_pool = var.vcenter_resource_pool 16 | vcenter_cluster = var.vcenter_cluster 17 | 18 | ops_manager_ntp = var.ops_manager_ntp 19 | ops_manager_netmask = var.ops_manager_netmask 20 | ops_manager_dns = var.ops_manager_dns 21 | ops_manager_dns_servers = var.ops_manager_dns_servers 22 | ops_manager_folder = var.ops_manager_folder 23 | ops_manager_ssh_public_key = tls_private_key.ops-manager.public_key_openssh 24 | ops_manager_ssh_private_key = tls_private_key.ops-manager.private_key_pem 25 | ops_manager_public_ip = var.ops_manager_public_ip 26 | ops_manager_private_ip = nsxt_nat_rule.dnat_om.translated_network 27 | 28 | management_subnet_name = nsxt_logical_switch.infrastructure_ls.display_name 29 | management_subnet_cidr = "${var.subnet_prefix}.1.0/24" 30 | management_subnet_gateway = "${var.subnet_prefix}.1.1" 31 | management_subnet_reserved_ip_ranges = "${var.subnet_prefix}.1.1-${var.subnet_prefix}.1.10" 32 | 33 | allow_unverified_ssl = var.allow_unverified_ssl 34 | disable_ssl_verification = !var.allow_unverified_ssl 35 | } 36 | } 37 | 38 | output "stable_config_opsmanager" { 39 | value = jsonencode(local.stable_config_opsmanager) 40 | sensitive = true 41 | } 42 | -------------------------------------------------------------------------------- /nsxt/ops-manager.tf: -------------------------------------------------------------------------------- 1 | resource "tls_private_key" "ops-manager" { 2 | algorithm = "RSA" 3 | rsa_bits = "4096" 4 | } 5 | -------------------------------------------------------------------------------- /nsxt/pas-lbs.tf: -------------------------------------------------------------------------------- 1 | resource "nsxt_lb_http_monitor" "pas-web" { 2 | description = "The Active Health Monitor (healthcheck) for Web (HTTP(S)) traffic." 3 | display_name = "${var.environment_name}-pas-web-monitor" 4 | monitor_port = 8080 5 | request_method = "GET" 6 | request_url = "/health" 7 | request_version = "HTTP_VERSION_1_1" 8 | response_status_codes = [200] 9 | 10 | tag { 11 | scope = "terraform" 12 | tag = var.environment_name 13 | } 14 | } 15 | 16 | resource "nsxt_lb_http_monitor" "pas-tcp" { 17 | description = "The Active Health Monitor (healthcheck) for TCP traffic." 18 | display_name = "${var.environment_name}-pas-tcp-monitor" 19 | monitor_port = 80 20 | request_method = "GET" 21 | request_url = "/health" 22 | request_version = "HTTP_VERSION_1_1" 23 | 24 | tag { 25 | scope = "terraform" 26 | tag = var.environment_name 27 | } 28 | response_status_codes = [200] 29 | } 30 | 31 | resource "nsxt_lb_tcp_monitor" "pas-ssh" { 32 | description = "The Active Health Monitor (healthcheck) for SSH traffic." 33 | display_name = "${var.environment_name}-pas-ssh-monitor" 34 | monitor_port = 2222 35 | 36 | tag { 37 | scope = "terraform" 38 | tag = var.environment_name 39 | } 40 | } 41 | 42 | resource "nsxt_lb_pool" "pas-web" { 43 | description = "The Server Pool of Web (HTTP(S)) traffic handling VMs" 44 | display_name = "${var.environment_name}-pas-web-pool" 45 | algorithm = "ROUND_ROBIN" 46 | tcp_multiplexing_enabled = false 47 | active_monitor_id = nsxt_lb_http_monitor.pas-web.id 48 | 49 | snat_translation { 50 | type = "SNAT_AUTO_MAP" 51 | } 52 | 53 | tag { 54 | scope = "terraform" 55 | tag = var.environment_name 56 | } 57 | } 58 | 59 | resource "nsxt_lb_pool" "pas-tcp" { 60 | description = "The Server Pool of TCP traffic handling VMs" 61 | display_name = "${var.environment_name}-pas-tcp-pool" 62 | algorithm = "ROUND_ROBIN" 63 | tcp_multiplexing_enabled = false 64 | active_monitor_id = nsxt_lb_http_monitor.pas-tcp.id 65 | 66 | snat_translation { 67 | type = "TRANSPARENT" 68 | } 69 | 70 | tag { 71 | scope = "terraform" 72 | tag = var.environment_name 73 | } 74 | } 75 | 76 | resource "nsxt_lb_pool" "pas-ssh" { 77 | description = "The Server Pool of SSH traffic handling VMs" 78 | display_name = "${var.environment_name}-pas-ssh-pool" 79 | algorithm = "ROUND_ROBIN" 80 | tcp_multiplexing_enabled = false 81 | active_monitor_id = nsxt_lb_tcp_monitor.pas-ssh.id 82 | 83 | snat_translation { 84 | type = "TRANSPARENT" 85 | } 86 | 87 | tag { 88 | scope = "terraform" 89 | tag = var.environment_name 90 | } 91 | } 92 | 93 | resource "nsxt_lb_fast_tcp_application_profile" "pas_lb_tcp_application_profile" { 94 | display_name = "${var.environment_name}-pas-lb-tcp-application-profile" 95 | close_timeout = "8" 96 | idle_timeout = "1800" 97 | 98 | tag { 99 | scope = "terraform" 100 | tag = var.environment_name 101 | } 102 | } 103 | 104 | resource "nsxt_lb_tcp_virtual_server" "lb_web_virtual_server" { 105 | description = "The Virtual Server for Web (HTTP(S)) traffic" 106 | display_name = "${var.environment_name}-pas-web-vs" 107 | application_profile_id = nsxt_lb_fast_tcp_application_profile.pas_lb_tcp_application_profile.id 108 | ip_address = var.nsxt_lb_web_virtual_server_ip_address 109 | ports = ["80", "443"] 110 | pool_id = nsxt_lb_pool.pas-web.id 111 | 112 | tag { 113 | scope = "terraform" 114 | tag = var.environment_name 115 | } 116 | } 117 | 118 | resource "nsxt_lb_tcp_virtual_server" "lb_tcp_virtual_server" { 119 | description = "The Virtual Server for TCP traffic" 120 | display_name = "${var.environment_name}-pas-tcp-vs" 121 | application_profile_id = nsxt_lb_fast_tcp_application_profile.pas_lb_tcp_application_profile.id 122 | ip_address = var.nsxt_lb_tcp_virtual_server_ip_address 123 | ports = var.nsxt_lb_tcp_virtual_server_ports 124 | pool_id = nsxt_lb_pool.pas-tcp.id 125 | 126 | tag { 127 | scope = "terraform" 128 | tag = var.environment_name 129 | } 130 | } 131 | 132 | resource "nsxt_lb_tcp_virtual_server" "lb_ssh_virtual_server" { 133 | description = "The Virtual Server for SSH traffic" 134 | display_name = "${var.environment_name}-pas-ssh-vs" 135 | application_profile_id = nsxt_lb_fast_tcp_application_profile.pas_lb_tcp_application_profile.id 136 | ip_address = var.nsxt_lb_ssh_virtual_server_ip_address 137 | ports = ["2222"] 138 | pool_id = nsxt_lb_pool.pas-ssh.id 139 | 140 | tag { 141 | scope = "terraform" 142 | tag = var.environment_name 143 | } 144 | } 145 | 146 | resource "nsxt_lb_service" "pas_lb" { 147 | description = "The Load Balancer for handling Web (HTTP(S)), TCP, and SSH traffic." 148 | display_name = "${var.environment_name}-pas-lb" 149 | 150 | enabled = true 151 | logical_router_id = nsxt_logical_tier1_router.t1_deployment.id 152 | size = "SMALL" 153 | virtual_server_ids = [ 154 | nsxt_lb_tcp_virtual_server.lb_web_virtual_server.id, 155 | nsxt_lb_tcp_virtual_server.lb_tcp_virtual_server.id, 156 | nsxt_lb_tcp_virtual_server.lb_ssh_virtual_server.id 157 | ] 158 | 159 | depends_on = [ 160 | nsxt_logical_router_link_port_on_tier1.t1_infrastructure_to_t0, 161 | nsxt_logical_router_link_port_on_tier1.t1_deployment_to_t0, 162 | ] 163 | 164 | tag { 165 | scope = "terraform" 166 | tag = var.environment_name 167 | } 168 | } 169 | -------------------------------------------------------------------------------- /nsxt/pas-outputs.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | stable_config_pas = { 3 | lb_pool_web = nsxt_lb_pool.pas-web.display_name 4 | lb_pool_tcp = nsxt_lb_pool.pas-tcp.display_name 5 | lb_pool_ssh = nsxt_lb_pool.pas-ssh.display_name 6 | } 7 | } 8 | 9 | output "stable_config_pas" { 10 | value = jsonencode(local.stable_config_pas) 11 | sensitive = true 12 | } 13 | -------------------------------------------------------------------------------- /nsxt/provider.tf: -------------------------------------------------------------------------------- 1 | provider "nsxt" { 2 | username = var.nsxt_username 3 | password = var.nsxt_password 4 | host = var.nsxt_host 5 | allow_unverified_ssl = var.allow_unverified_ssl 6 | version = "~> 1.1" 7 | } 8 | -------------------------------------------------------------------------------- /nsxt/terraform.tfvars.example: -------------------------------------------------------------------------------- 1 | nsxt_username = "admin" 2 | nsxt_password = "a_secret_password" 3 | nsxt_host = "nsxmgr.domain.tld" 4 | 5 | environment_name = "environment_name" # An identifier used to tag resources; examples: dev, EMEA, prod 6 | east_west_transport_zone_name = "overlay-tz" 7 | 8 | # Routers 9 | nsxt_edge_cluster_name = "edge-cluster-1" 10 | nat_gateway_ip = "10.195.74.251" 11 | ops_manager_public_ip = "10.195.74.16" 12 | 13 | # Each PAS Org will draw an IP address from this pool; make sure you have enough 14 | # Your LB Virtual Servers, gateway, NAT gateway, OM should be in the CIDR but not in the available range 15 | external_ip_pool_cidr = "10.195.74.0/24" 16 | external_ip_pool_ranges = ["10.195.74.128-10.195.74.250"] 17 | external_ip_pool_gateway = "10.195.74.1" 18 | 19 | # Load Balancers 20 | nsxt_lb_web_virtual_server_ip_address = "10.195.74.17" 21 | nsxt_lb_tcp_virtual_server_ip_address = "10.195.74.19" 22 | nsxt_lb_ssh_virtual_server_ip_address = "10.195.74.18" 23 | nsxt_lb_tcp_virtual_server_ports = ["8080", "52135", "34000-35000"] 24 | 25 | 26 | # OPTIONAL 27 | # These variables have reasonable default values. 28 | # If your foundation setup is tricky, you may need to set different values. 29 | allow_unverified_ssl = true # set to true if NSX manager's TLS cert is self-signed 30 | -------------------------------------------------------------------------------- /nsxt/variables.tf: -------------------------------------------------------------------------------- 1 | variable "nsxt_host" { 2 | description = "The NSX-T host. Must resolve to a reachable IP address, e.g. `nsxmgr.example.tld`" 3 | type = string 4 | } 5 | 6 | variable "nsxt_username" { 7 | description = "The NSX-T username, probably `admin`" 8 | type = string 9 | } 10 | 11 | variable "nsxt_password" { 12 | description = "The NSX-T password" 13 | type = string 14 | } 15 | 16 | variable "nsxt_ca_cert" { 17 | type = string 18 | } 19 | 20 | variable "allow_unverified_ssl" { 21 | default = false 22 | description = "Allow connection to NSX-T manager with self-signed certificates. Set to `true` for POC or development environments" 23 | type = string 24 | } 25 | 26 | variable "environment_name" { 27 | description = "An identifier used to tag resources; examples: `dev`, `EMEA`, `prod`" 28 | type = string 29 | } 30 | 31 | variable "east_west_transport_zone_name" { 32 | description = "The name of the Transport Zone that carries internal traffic between the NSX-T components. Also known as the `overlay` transport zone" 33 | type = string 34 | } 35 | 36 | variable "external_ip_pool_cidr" { 37 | description = "The CIDR for the External IP Pool. Must be reachable from clients outside the foundation. Can be RFC1918 addresses (10.x, 172.16-31.x, 192.168.x), e.g. `10.195.74.0/24`" 38 | type = string 39 | } 40 | 41 | variable "external_ip_pool_ranges" { 42 | description = "The IP Ranges for the External IP Pool. Each PAS Org will draw an IP address from this pool; make sure you have enough, e.g. `[\"10.195.74.128-10.195.74.250\"]`" 43 | type = list(string) 44 | } 45 | 46 | variable "external_ip_pool_gateway" { 47 | description = "The gateway for the External IP Pool, e.g. `10.195.74.1`" 48 | type = string 49 | } 50 | 51 | variable "nsxt_edge_cluster_name" { 52 | description = "The name of the deployed Edge Cluster, e.g. `edge-cluster-1`" 53 | type = string 54 | } 55 | 56 | variable "nsxt_t0_router_name" { 57 | default = "T0-Router" 58 | description = "The name of the T0 router" 59 | type = string 60 | } 61 | 62 | variable "nat_gateway_ip" { 63 | description = "The IP Address of the SNAT rule for egress traffic from the Infra & Deployment subnets; should be in the same subnet as the external IP pool, but not in the range of available IP addresses, e.g. `10.195.74.17`" 64 | type = string 65 | } 66 | 67 | variable "ops_manager_public_ip" { 68 | description = "The public IP Address of the Operations Manager. The om's DNS (e.g. `om.system.tld`) should resolve to this IP, e.g. `10.195.74.16`" 69 | type = string 70 | } 71 | 72 | variable "nsxt_lb_web_virtual_server_ip_address" { 73 | description = "The ip address on which the Virtual Server listens for Web (HTTP(S)) traffic, should be in the same subnet as the external IP pool, but not in the range of available IP addresses, e.g. `10.195.74.17`" 74 | type = string 75 | } 76 | 77 | variable "nsxt_lb_tcp_virtual_server_ip_address" { 78 | description = "The ip address on which the Virtual Server listens for TCP traffic, should be in the same subnet as the external IP pool, but not in the range of available IP addresses, e.g. `10.195.74.19`" 79 | type = string 80 | } 81 | 82 | variable "nsxt_lb_tcp_virtual_server_ports" { 83 | description = "The list of port(s) on which the Virtual Server listens for TCP traffic, e.g. `[\"8080\", \"52135\", \"34000-35000\"]`" 84 | type = list(string) 85 | } 86 | 87 | variable "nsxt_lb_ssh_virtual_server_ip_address" { 88 | description = "The ip address on which the Virtual Server listens for SSH traffic, should be in the same subnet as the external IP pool, but not in the range of available IP addresses, e.g. `10.195.74.18`" 89 | type = string 90 | } 91 | 92 | variable "vcenter_datacenter" { 93 | type = string 94 | } 95 | 96 | variable "vcenter_datastore" { 97 | type = string 98 | } 99 | 100 | variable "vcenter_host" { 101 | type = string 102 | } 103 | 104 | variable "vcenter_username" { 105 | type = string 106 | } 107 | 108 | variable "vcenter_password" { 109 | type = string 110 | } 111 | 112 | variable "vcenter_resource_pool" { 113 | type = string 114 | } 115 | 116 | variable "vcenter_cluster" { 117 | type = string 118 | } 119 | 120 | variable "ops_manager_ntp" { 121 | type = string 122 | } 123 | 124 | variable "ops_manager_netmask" { 125 | type = string 126 | } 127 | 128 | variable "ops_manager_dns" { 129 | type = string 130 | } 131 | 132 | variable "ops_manager_dns_servers" { 133 | type = string 134 | } 135 | 136 | variable "ops_manager_folder" { 137 | type = string 138 | default = "" 139 | } 140 | 141 | variable "subnet_prefix" { 142 | type = string 143 | default = "192.168" 144 | } 145 | -------------------------------------------------------------------------------- /nsxt/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | nsxt = { 4 | source = "terraform-providers/nsxt" 5 | } 6 | tls = { 7 | source = "hashicorp/tls" 8 | } 9 | } 10 | required_version = ">= 0.13" 11 | } 12 | --------------------------------------------------------------------------------