├── .envrc ├── .gitignore ├── CODE-OF-CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── bosh ├── bootstrap-ops-file.yml ├── cloud-config.yml ├── datadog-ops-file.yml └── debug-ops-file.yml ├── ci ├── configure-core ├── configure-nimbus-core ├── configure-shared ├── pipeline-core.yml ├── pipeline-nimbus-core.yml ├── pipeline-shared.yml ├── tasks │ ├── cleanup-nimbus-testbed │ ├── cleanup-nimbus-testbed.yml │ ├── create-nimbus-vcenter-vars │ ├── create-nimbus-vcenter-vars.yml │ ├── delete-firewall-rules │ ├── delete-firewall-rules.yml │ ├── delete-worker │ ├── delete-worker.yml │ ├── deploy-concourse │ ├── deploy-concourse.yml │ ├── deploy-director │ ├── deploy-director.yml │ ├── deploy-jumpbox │ ├── deploy-jumpbox.yml │ ├── deploy-natbox │ ├── deploy-natbox.yml │ ├── deploy-nimbus-testbed │ ├── deploy-nimbus-testbed.yml │ ├── deploy-vpn-server │ ├── deploy-vpn-server.yml │ ├── deploy-worker │ ├── deploy-worker.yml │ ├── extend-lease-nimbus-testbed │ ├── extend-lease-nimbus-testbed.yml │ ├── set-teams-core │ ├── set-teams-core.yml │ ├── update-cloud-config │ ├── update-cloud-config.yml │ ├── wait-for-ssh │ ├── wait-for-ssh.yml │ ├── wait-ssh │ └── wait-ssh.yml └── utils ├── concourse ├── concourse-core.yml ├── team-authorized-public-key-ops.yml ├── teams │ └── core │ │ ├── bosh-director.yml │ │ ├── bosh-io.yml │ │ ├── bosh-packages.yml │ │ ├── dev.yml │ │ ├── main.yml │ │ └── pcf.yml ├── worker-ops.yml └── workers │ ├── google-asia-worker │ └── worker.yml │ ├── nimbus │ └── worker.yml │ ├── openstack │ ├── README │ └── worker.yml │ ├── ops │ └── add-team.yml │ └── vsphere │ └── worker.yml ├── connect-bosh.sh ├── docker ├── Dockerfile └── build-image.sh ├── docs ├── troubleshooting.md └── vcenter-outages.md ├── jumpbox ├── custom-type-ops.yml └── remove-users-ops.yml ├── natbox └── natbox.yml ├── nimbus-testbed └── nimbus_vc70_dual_networks.rb ├── runway ├── README.md ├── access.json ├── fly-login.sh └── update-namespace.sh ├── scripts ├── generate-director-ca.sh ├── generate-jumpbox-ssh-key.sh ├── open-vcenter-nimbus-ui.sh └── provision-gcloud-for-concourse.sh ├── shell.nix ├── ssh-jumpbox.sh ├── terraform ├── allow_jumpbox │ └── allow_jumpbox.tf ├── concourse │ ├── inputs.tf │ ├── main.tf │ └── outputs.tf └── modules │ ├── concourse │ └── concourse.tf │ ├── create_env_thru_jumpbox │ └── create_env.tf │ ├── director │ └── director.tf │ ├── jumpbox │ └── jumpbox.tf │ └── private_subnet │ └── private_subnet.tf ├── upgrader ├── deploy-upgrader ├── upgrader-state.json └── upgrader.yml └── vpn ├── README.md ├── caddy-ops.yml ├── manifest.yml ├── openvpn-team-ops.yml └── ssoca_ca_cert.pem /.envrc: -------------------------------------------------------------------------------- 1 | export REPO_DIR=$PWD 2 | 3 | if command -v lorri &> /dev/null; then 4 | eval "$(lorri direnv)" 5 | fi 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /tmp/ 2 | upgrader.vars 3 | pipeline.vars 4 | */fly 5 | .idea/ 6 | -------------------------------------------------------------------------------- /CODE-OF-CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in [INSERT YOUR PROJECT NAME] project and our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, religion, or sexual identity 10 | and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at oss-coc@vmware.com. 63 | All complaints will be reviewed and investigated promptly and fairly. 64 | 65 | All community leaders are obligated to respect the privacy and security of the 66 | reporter of any incident. 67 | 68 | ## Enforcement Guidelines 69 | 70 | Community leaders will follow these Community Impact Guidelines in determining 71 | the consequences for any action they deem in violation of this Code of Conduct: 72 | 73 | ### 1. Correction 74 | 75 | **Community Impact**: Use of inappropriate language or other behavior deemed 76 | unprofessional or unwelcome in the community. 77 | 78 | **Consequence**: A private, written warning from community leaders, providing 79 | clarity around the nature of the violation and an explanation of why the 80 | behavior was inappropriate. A public apology may be requested. 81 | 82 | ### 2. Warning 83 | 84 | **Community Impact**: A violation through a single incident or series 85 | of actions. 86 | 87 | **Consequence**: A warning with consequences for continued behavior. No 88 | interaction with the people involved, including unsolicited interaction with 89 | those enforcing the Code of Conduct, for a specified period of time. This 90 | includes avoiding interactions in community spaces as well as external channels 91 | like social media. Violating these terms may lead to a temporary or 92 | permanent ban. 93 | 94 | ### 3. Temporary Ban 95 | 96 | **Community Impact**: A serious violation of community standards, including 97 | sustained inappropriate behavior. 98 | 99 | **Consequence**: A temporary ban from any sort of interaction or public 100 | communication with the community for a specified period of time. No public or 101 | private interaction with the people involved, including unsolicited interaction 102 | with those enforcing the Code of Conduct, is allowed during this period. 103 | Violating these terms may lead to a permanent ban. 104 | 105 | ### 4. Permanent Ban 106 | 107 | **Community Impact**: Demonstrating a pattern of violation of community 108 | standards, including sustained inappropriate behavior, harassment of an 109 | individual, or aggression toward or disparagement of classes of individuals. 110 | 111 | **Consequence**: A permanent ban from any sort of public interaction within 112 | the community. 113 | 114 | ## Attribution 115 | 116 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 117 | version 2.0, available at 118 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 119 | 120 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 121 | enforcement ladder](https://github.com/mozilla/diversity). 122 | 123 | [homepage]: https://www.contributor-covenant.org 124 | 125 | For answers to common questions about this code of conduct, see the FAQ at 126 | https://www.contributor-covenant.org/faq. Translations are available at 127 | https://www.contributor-covenant.org/translations. -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | ## Contributing 2 | 3 | 1. Fork it ( https://github.com/pivotal-cf/bosh-concourse-deployments/fork ) 4 | 2. Create your feature branch (`git checkout -b my-new-feature`) 5 | 3. Commit your changes (`git commit -am 'Add some feature'`) 6 | 4. Push to the branch (`git push origin my-new-feature`) 7 | 5. Create a new Pull Request 8 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## BOSH Concourse Deployments 2 | 3 | This repo holds the Concourse Pipelines, Jobs, and Tasks to setup a Concourse environment with: 4 | * Terraform scripts to provision the environment 5 | * Strict security groups: e.g. the DB port is only accessible from the ATC VM 6 | * SSH traffic disabled by default: The SSH port is opened automatically by Concourse tasks to perform deployments and closed after. 7 | * VMs configured with no public IPs: Only natbox and jumpbox have external IPs. 8 | 9 | In affect what this will deploy is an upgrader concourse which managages a second concourse. It will provide jobs to create workers, update the director and concourse version and provide some access security to the second concourse. The upgrader concourse can be deployed on Nimbus (/ci/configure-nimbus-core) or on a seperate IAAS (/ci/configure-core and /ci/configure-shared) 10 | 11 | ## Bootstrapping a Concourse Environment 12 | 13 | ### Deploy Upgrader Concourse 14 | 15 | We'll start by deploying a secondary "Upgrader" Concourse VM. 16 | This Concourse will be used to setup the main Concourse environment on GCP as well as perform upgrades later on. 17 | These steps assume you'll deploy the Upgrader to a local vSphere environment. 18 | Alternatively you can `vagrant up` a Concourse instance on your workstation. 19 | 20 | 1. Create a DNS record for the Upgrader VM pointing to a valid vSphere IP. 21 | 1. Register Upgrader Concourse as an OAuth application with GitHub: https://github.com/settings/applications/new 22 | - Callback URL: `https://YOUR_UPGRADER_URL/auth/github/callback` 23 | 1. Copy the contents of `./upgrader/upgrader.vars.tmpl` to a LastPass note or some other safe location, filling in the appropriate values. 24 | 1. Deploy the Upgrader VM: 25 | 26 | ```bash 27 | cd ./upgrader 28 | bosh create-env ./upgrader.yml -l <( lpass show --notes "bosh-concourse-upgrader-create-env" ) 29 | git add ./upgrader-state.json 30 | git commit && git push 31 | ``` 32 | 33 | ### Set pipelines on upgrader vm 34 | 35 | The upgrader vm must be configured with the pipelines that can deploy the 36 | main Concourse deployment. 37 | 38 | 1. Read `./scripts/provision-gcloud-for-concourse.sh` to make sure you're not blindly running an untrusted bash script on your system 39 | 1. Set up the required variables and run the provision scripts: 40 | 41 | ```bash 42 | TERRAFORM_SERVICE_ACCOUNT_ID=concourse-deployments \ 43 | DIRECTOR_SERVICE_ACCOUNT_ID=concourse-director \ 44 | PROJECT_ID=my-gcp-project-id \ 45 | CONCOURSE_BUCKET_NAME=concourse-deployments \ 46 | ./scripts/provision-gcloud-for-concourse.sh 47 | ``` 48 | - for debugging purposes you can also set `TRACE=true` to show all commands being run. 49 | 1. Generate a set of Google Cloud Storage Interoperability Keys as described [here](https://cloud.google.com/storage/docs/migrating#keys). 50 | 1. Create a GitHub access token to avoid rate limiting as described [here](https://help.github.com/articles/creating-an-access-token-for-command-line-use/). 51 | 1. Register main Concourse as an OAuth application with GitHub: https://github.com/settings/applications/new 52 | - Callback URL: `https://YOUR_CONCOURSE_URL/auth/github/callback` 53 | 1. Generate the Director CA Cert by running `./scripts/generate-director-ca.sh`. 54 | 1. Generate the jumpbox ssh keys by running `./scripts/generate-jumpbox-ssh-key.sh`. 55 | 1. Add the jumpbox key as a project-wide SSH key with the username `vcap` as described [here](https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys). 56 | 1. Copy the contents of `./ci/pipeline.vars.tmpl` to a LastPass note or some other safe location, filling in the appropriate values. 57 | 1. Log in using the fly cli to the newly deployed upgrader Concourse vm 58 | 1. Set the Concourse pipeline on the upgrader vm. 59 | 60 | ```bash 61 | fly -t upgrader sp -p concourse -c ~/workspace/bosh-concourse-deployments/ci/pipeline.yml -l <(lpass show note YOUR_LASTPASS_NOTE) 62 | ``` 63 | 64 | #### Additional Configuration for Optional External Workers 65 | 66 | 1. Configure external worker pipeline: 67 | The CPI Core team needs a few external workers and deploys them with this pipeline. If you'd like to deploy external workers 68 | yourself you can use this pipeline as an example. 69 | 70 | ```bash 71 | fly -t upgrader sp -p concourse-workers -c ~/workspace/bosh-concourse-deployments/ci/pipeline-cpi-workers.yml -l <(lpass show note YOUR_LASTPASS_NOTE) 72 | ``` 73 | 1. Seed empty statefiles: 74 | 75 | ```bash 76 | gsutil cp -n <( echo '{}' ) gs://${CONCOURSE_BUCKET_NAME}/asia/natbox-state.json 77 | gsutil cp -n <( echo '{}' ) gs://${CONCOURSE_BUCKET_NAME}/asia/jumpbox-state.json 78 | gsutil cp -n <( echo '{}' ) gs://${CONCOURSE_BUCKET_NAME}/worker/vsphere-v6.5-worker-state.json 79 | gsutil cp -n <( echo '{}' ) gs://${CONCOURSE_BUCKET_NAME}/worker/vcloud-v5.5-worker-state.json 80 | gsutil cp -n <( echo '{}' ) gs://${CONCOURSE_BUCKET_NAME}/worker/google-asia-worker-state.json 81 | ``` 82 | 83 | ### Running Pipelines 84 | 85 | 1. Manually trigger `concourse/prepare-concourse-env` job. 86 | 1. Manually trigger `concourse/update-director` job. 87 | 1. Manually trigger `concourse/update-cloud-config` job. 88 | 1. Manually trigger `concourse/update-concourse` job. 89 | 90 | **Warning** if completely repaving concourse and this results in an IP change 91 | for the outbound requests from the VPC, then there are a few places that use 92 | this IP address to break open holes for database or ssh connectivity. 93 | 94 | At the time of this writing, two places were updated as a result of repaving: 95 | 96 | 1. Address for terraformed database instances present in main bosh pipeline. 97 | 1. 'Bosh lite' security group for bosh-agent integration tests for remote SSH 98 | 99 | Removing the need for hardcoding would be ideal, but likely a lot of work. The 100 | address should not change often, only when VPC-destructive concourse maintenance 101 | occurs. 102 | 103 | #### Running Pipelines with Optional External Workers 104 | 105 | If you have deployed optional external workers you must follow a slightly modified order: 106 | 107 | 1. Manually trigger `concourse/prepare-concourse-env` job. 108 | 1. Manually trigger `concourse/update-director` job. 109 | 1. Manually trigger `concourse/update-cloud-config` job. 110 | 1. Manually trigger `concourse-workers/prepare-asia-env` job. 111 | - the `concourse/update-concourse` job will place a file in `concourse-update-trigger` resource. 112 | This file is used to automatically trigger the external worker jobs across pipelines. 113 | 1. Manually trigger `concourse/update-concourse` job. This should trigger the external worker 114 | jobs (i.e. you don't need to manually trigger the worker jobs). 115 | 116 | ### External Teams 117 | 118 | Thanks to the distributed model of the CF Foundation many teams from many 119 | companies can share this CI environment to run builds against their CPIs. 120 | At the time of this writing, the CPI-only concourse is now destroyed, with the 121 | majority of active CPIs building in the 'main' bosh concourse. External worker 122 | setup is currently used for the Openstack CPI, as the director/worker lies 123 | within a protected egress-only openstack env. 124 | 125 | #### Creating a team on the ATC (Concourse Administrator) 126 | 127 | In this example, we are adding a new team 'DigitalOcean CPI' 128 | 129 | The DigitalOcean CPI team has provided following: 130 | 131 | - a worker public key 132 | - a GitHub organization 133 | - a GitHub team within that organization that will be able to authenticate against the Concourse environment. 134 | - a Concourse team name, no space, no special characters, all lowercase, (e.g. "digitalocean") 135 | 136 | The BOSH CPI team does the following: 137 | 138 | 1. Shares the TSA host public key (search for `concourse_tsa_public_key` in LastPass) 139 | with the DigitalOcean CPI team (e.g. "ssh-rsa AAAAB3NTSAHostPublicKey...") 140 | 1. Add the worker public key entry to the list under [`concourse_teams`](https://github.com/pivotal-cf/bosh-concourse-deployments/blob/d87f8b7134b407d78bfcda29dcd721e0ade746bd/ci/pipeline.vars.tmpl#L54-L56) on the secure note saved on LassPass. 141 | Example: 142 | 143 | ```json 144 | [{"name": "digitalocean", "github_team": "DigitalOcean/BOSH CPI", "worker_public_key": "ssh-rsa AAAAB3DigitalOceanWorker..."}] 145 | ``` 146 | 1. Trigger the [update-concourse](https://bosh-upgrader.ci.cf-app.com/teams/main/pipelines/concourse/jobs/update-concourse/) job, making sure there are no running jobs first. 147 | 148 | Let the DigitalOcean CPI team know when the deploy has finished so that they can 149 | rock. 150 | 151 | #### Creating external worker manifest (Team member) 152 | 153 | The BOSH CPI team has provided following: 154 | 155 | - TSA host public key 156 | - TSA URL (e.g. https://bosh-cpi.ci.cf-app.com) 157 | 158 | Do the following: 159 | 160 | 1. Generate a key for your worker. The following command will create a keypair; don't use passphrase: 161 | ``` 162 | ssh-keygen -N '' -b 4096 -f /tmp/openstack-cpi-worker -C team_name 163 | ``` 164 | 1. Transmit the _public_ portion to the BOSH CPI team (e.g. "ssh-rsa AAAAB3DigitalOceanWorker..."). 165 | 1. Let the BOSH CPI team know your GitHub organization (e.g. 166 | "DigitalOcean") and team handle (e.g. "DigitalOcean CPI"). 167 | 1. Pick a display name for your team and let the BOSH CPI team know. (e.g. "digitalocean") 168 | 1. Create the manifest for your worker and make sure to set the following properties: 169 | 170 | ``` 171 | team: ((worker_team_name)) 172 | tsa: 173 | host: ((concourse_tsa_hostname)) 174 | host_public_key: ((concourse_tsa_public_key)) 175 | private_key: ((worker_private_key)) 176 | ``` 177 | 178 | * worker_team_name, e.g. "digitalocean". This is the team name provided to BOSH CPI 179 | * concourse_tsa_hostname, e.g. https://bosh-cpi.ci.cf-app.com, provided by BOSH CPI 180 | * host_public_key: e.g. "ssh-rsa AAAAB3NTSAHostPublicKey...", provided by BOSH CPI 181 | * worker_private_key: the private key generated for the worker 182 | 183 | You can find a sample of a worker manifest [here](https://github.com/pivotal-cf/bosh-concourse-deployments/blob/master/vsphere-v6.5/worker.yml). 184 | 185 | After deploying the worker, authenticate with Concourse and confirm worker has registered: 186 | 187 | 1. Browse to the Concourse URL and download the `fly` client 188 | 1. Log into Concourse: `fly -t cpi login -c https://bosh-cpi.ci.cf-app.com -n digitalocean` 189 | 1. Confirm worker has registered: `fly -t cpi workers` 190 | 191 | ### Updating Trusted CIDRs for access (workers and humans) 192 | 193 | 1. Ensure lastpass note is updated with the CIDRs. Look in the `bosh-concourse-deployments gcp bosh-core` note for the CIDRs and their sources. 194 | 1. Run `configure-shared` to pick up any lastpass note changes 195 | 1. Start a `re-terraform` job in the shared environment pipeline to refresh the firewall rules. 196 | 197 | ### Troubleshooting 198 | 199 | Refer to the _Troubleshooting_ document under [docs/](`docs/`). 200 | 201 | ## Figures 202 | 203 | ### GCloud Network Topology 204 | ![gcloud network topology](https://docs.google.com/drawings/d/1TbnPOjp27vpwxI5hJi2ateVXEU0_2KQf6RbtMmLUyZ0/pub?w=925&h=1172) 205 | -------------------------------------------------------------------------------- /bosh/bootstrap-ops-file.yml: -------------------------------------------------------------------------------- 1 | # assumes you have a tunnel open from localhost:6868 to the private director IP 2 | # e.g. ssh -fnNT -L 6868:$INTERNAL_DIRECTOR_IP:6868 $JUMPBOX_USER@$JUMPBOX_IP 3 | - type: replace 4 | path: /cloud_provider/mbus 5 | value: https://mbus:((mbus_bootstrap_password))@127.0.0.1:6868 6 | - type: replace 7 | path: /variables/name=director_ssl/options/alternative_names 8 | value: [((internal_ip)), 127.0.0.1] 9 | - type: replace 10 | path: /variables/name=mbus_bootstrap_ssl/options/alternative_names 11 | value: [((internal_ip)), 127.0.0.1] 12 | -------------------------------------------------------------------------------- /bosh/cloud-config.yml: -------------------------------------------------------------------------------- 1 | azs: 2 | - name: us1 3 | cloud_properties: 4 | zone: ((zone)) 5 | - name: asia 6 | cloud_properties: 7 | zone: ((asia.zone)) 8 | 9 | vm_types: 10 | # BOSH Core team VM types 11 | # Used by Concourse ATC and TSA VM 12 | - name: concourse_core 13 | cloud_properties: 14 | cpu: 16 15 | ram: 32_768 16 | root_disk_size_gb: 20 17 | root_disk_type: pd-ssd 18 | tags: 19 | - ((bosh_core.atc_tag)) 20 | - ((nat_traffic_tag)) 21 | target_pool: ((bosh_core.target_pool)) 22 | # Used by Concourse database VM 23 | - name: concourse_core_database 24 | cloud_properties: 25 | cpu: 4 26 | ram: 4_096 27 | root_disk_size_gb: 20 28 | root_disk_type: pd-ssd 29 | tags: 30 | - ((bosh_core.db_tag)) 31 | - ((nat_traffic_tag)) 32 | # Concourse Worker VM types 33 | - name: concourse_core_worker_8_16 34 | cloud_properties: 35 | cpu: 8 36 | ram: 16_384 37 | root_disk_size_gb: 250 38 | root_disk_type: pd-ssd 39 | tags: 40 | - ((nat_traffic_tag)) 41 | - ((bosh_core.worker_tag)) 42 | - name: concourse_core_worker_12_16 43 | cloud_properties: 44 | cpu: 12 45 | ram: 16_384 46 | root_disk_size_gb: 350 47 | root_disk_type: pd-ssd 48 | tags: 49 | - ((nat_traffic_tag)) 50 | - ((bosh_core.worker_tag)) 51 | - name: concourse_core_worker_32_64 52 | cloud_properties: 53 | cpu: 32 54 | ram: 65_536 55 | root_disk_size_gb: 164 56 | root_disk_type: pd-ssd 57 | tags: 58 | - ((nat_traffic_tag)) 59 | - ((bosh_core.worker_tag)) 60 | - name: concourse_core_worker_2_8 61 | cloud_properties: 62 | cpu: 2 63 | ram: 8_192 64 | root_disk_size_gb: 164 65 | root_disk_type: pd-ssd 66 | tags: 67 | - ((nat_traffic_tag)) 68 | - ((bosh_core.worker_tag)) 69 | #test bosh-load-test worker vm 70 | - name: concourse_core_load_worker 71 | cloud_properties: 72 | cpu: 16 73 | ram: 32_768 74 | root_disk_size_gb: 164 75 | root_disk_type: pd-ssd 76 | tags: 77 | - ((nat_traffic_tag)) 78 | - ((bosh_core.worker_tag)) 79 | - name: concourse_core_deploy 80 | cloud_properties: 81 | cpu: 1 82 | ram: 1_024 83 | root_disk_size_gb: 20 84 | root_disk_type: pd-standard 85 | tags: 86 | - ((bosh_core.atc_tag)) 87 | - ((nat_traffic_tag)) 88 | - ((director_api_access_tag)) 89 | 90 | # Used by OpenVPN Server 91 | - name: openvpn_server 92 | cloud_properties: 93 | cpu: 1 94 | ram: 2_560 95 | root_disk_size_gb: 20 96 | root_disk_type: pd-standard 97 | tags: 98 | - ((vpn_server_tag)) 99 | 100 | - name: compilation 101 | cloud_properties: 102 | machine_type: n1-highcpu-4 103 | root_disk_size_gb: 128 104 | root_disk_type: pd-ssd 105 | 106 | compilation: 107 | workers: 5 108 | reuse_compilation_vms: true 109 | az: us1 110 | vm_type: compilation 111 | network: concourse 112 | 113 | networks: 114 | - name: concourse 115 | type: dynamic 116 | subnets: 117 | - az: us1 118 | cloud_properties: 119 | network_name: ((network)) 120 | subnetwork_name: ((subnetwork)) 121 | tags: 122 | - ((director_internal_tag)) 123 | - ((jumpbox_internal_management_tag)) 124 | - name: concourse-windows 125 | type: manual 126 | subnets: 127 | - az: us1 128 | cloud_properties: 129 | ephemeral_external_ip: true 130 | network_name: ((network)) 131 | subnetwork_name: ((windows_subnetwork)) 132 | tags: 133 | - ((director_internal_tag)) 134 | - ((jumpbox_internal_management_tag)) 135 | gateway: ((windows_internal_gw)) 136 | range: ((windows_internal_cidr)) 137 | reserved: [] 138 | - name: concourse-asia 139 | type: dynamic 140 | subnets: 141 | - az: asia 142 | cloud_properties: 143 | network_name: ((network)) 144 | subnetwork_name: ((asia.subnetwork)) 145 | tags: 146 | - ((director_internal_tag)) 147 | - ((jumpbox_internal_management_tag)) 148 | - name: vip 149 | type: vip 150 | 151 | disk_types: 152 | - name: persistent-ssd 153 | disk_size: 500_000 154 | cloud_properties: 155 | type: pd-ssd 156 | - name: caddy-ssd 157 | disk_size: 1_000 158 | cloud_properties: 159 | type: pd-ssd 160 | 161 | # this will force bosh to create new persistent disk on every deployment 162 | # this will orphan previous disk in case a failure during upgrader 163 | # Related story: https://www.pivotaltracker.com/story/show/155955816 164 | variables: 165 | - name: salt 166 | type: password 167 | -------------------------------------------------------------------------------- /bosh/datadog-ops-file.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - type: replace 3 | path: /instance_groups/name=bosh/properties/hm/datadog? 4 | value: 5 | api_key: ((datadog_api_key)) 6 | application_key: ((datadog_application_key)) 7 | 8 | - type: replace 9 | path: /instance_groups/name=bosh/properties/hm/datadog_enabled? 10 | value: true 11 | -------------------------------------------------------------------------------- /bosh/debug-ops-file.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - type: replace 3 | path: /instance_groups/name=bosh/properties/director/debug? 4 | value: 5 | keep_unreachable_vms: true 6 | -------------------------------------------------------------------------------- /ci/configure-core: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | FLY="${FLY_CLI:-fly}" 4 | 5 | until lpass status;do 6 | LPASS_DISABLE_PINENTRY=1 lpass ls a 7 | sleep 1 8 | done 9 | 10 | until "$FLY" -t "${CONCOURSE_TARGET:-runway@bosh-core}" status;do 11 | "$FLY" -t "${CONCOURSE_TARGET:-runway@bosh-core}" login 12 | sleep 1 13 | done 14 | 15 | REPO_DIR="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" 16 | 17 | "$FLY" -t "${CONCOURSE_TARGET:-runway@bosh-core}" set-pipeline \ 18 | -p concourse-core \ 19 | -c "$REPO_DIR/pipeline-core.yml" \ 20 | -l <( lpass show --note "bosh-concourse-deployments gcp bosh-core" ) \ 21 | -l <( lpass show --note "bosh-concourse-deployments gcp bosh-core external workers" ) 22 | -------------------------------------------------------------------------------- /ci/configure-nimbus-core: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | FLY="${FLY_CLI:-fly}" 4 | 5 | until lpass status;do 6 | LPASS_DISABLE_PINENTRY=1 lpass ls a 7 | sleep 1 8 | done 9 | 10 | until "$FLY" -t "${CONCOURSE_TARGET:-runway@bosh-core}" status;do 11 | "$FLY" -t "${CONCOURSE_TARGET:-runway@bosh-core}" login 12 | sleep 1 13 | done 14 | 15 | REPO_DIR="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" 16 | 17 | "$FLY" -t "${CONCOURSE_TARGET:-runway@bosh-core}" set-pipeline \ 18 | -p bosh-core \ 19 | -v nimbus-location=wdc \ 20 | -c "$REPO_DIR/pipeline-nimbus-core.yml" \ 21 | -l <( lpass show --note "bosh-concourse-deployments gcp bosh-core" ) \ 22 | -l <( lpass show --note "bosh-concourse-deployments gcp bosh-core external nimbus workers" ) \ 23 | -l <( lpass show --note "bosh-cli concourse secrets" ) 24 | -------------------------------------------------------------------------------- /ci/configure-shared: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | FLY="${FLY_CLI:-fly}" 4 | 5 | until lpass status;do 6 | LPASS_DISABLE_PINENTRY=1 lpass ls a 7 | sleep 1 8 | done 9 | 10 | until "$FLY" -t "${CONCOURSE_TARGET:-runway@bosh-core}" status;do 11 | "$FLY" -t "${CONCOURSE_TARGET:-runway@bosh-core}" login 12 | sleep 1 13 | done 14 | 15 | REPO_DIR="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" 16 | 17 | "$FLY" -t "${CONCOURSE_TARGET:-runway@bosh-core}" set-pipeline \ 18 | -p shared-environment \ 19 | -c "$REPO_DIR/pipeline-shared.yml" \ 20 | -l <( lpass show --note "bosh-concourse-deployments gcp bosh-core" ) \ 21 | -l <( lpass show --note "bosh-concourse-deployments gcp bosh-core director" ) \ 22 | -l <( lpass show --note bosh-concourse-upgrader-vpn ) -------------------------------------------------------------------------------- /ci/pipeline-core.yml: -------------------------------------------------------------------------------- 1 | --- 2 | jobs: 3 | - name: update-concourse 4 | serial_groups: [deploy] 5 | plan: 6 | - in_parallel: 7 | - get: deployments-src 8 | - get: stemcell 9 | resource: xenial-stemcell 10 | - get: windows-stemcell 11 | - get: bosh-cli 12 | - get: concourse 13 | - get: bpm 14 | - get: caddy 15 | - get: windows-utilities 16 | - get: windows-tools 17 | - put: terraform 18 | params: 19 | env_name: bosh-cpi-concourse 20 | terraform_source: deployments-src/terraform/concourse 21 | vars: 22 | allow_ssh_access_to_jumpbox: 1 23 | - task: wait-for-ssh 24 | file: deployments-src/ci/tasks/wait-ssh.yml 25 | params: 26 | JUMPBOX_HOST: {{jumpbox_host}} 27 | - task: update-cloud-config 28 | file: deployments-src/ci/tasks/update-cloud-config.yml 29 | params: 30 | BOSH_CLIENT: {{bosh_client_admin}} 31 | BOSH_CLIENT_SECRET: {{bosh_client_secret_admin}} 32 | BOSH_CA_CERT: {{bosh_ca_cert}} 33 | JUMPBOX_SSH_KEY: {{jumpbox_ssh_key}} 34 | JUMPBOX_SSH_USER: {{jumpbox_ssh_user}} 35 | - task: deploy-concourse 36 | file: deployments-src/ci/tasks/deploy-concourse.yml 37 | params: 38 | DEPLOYMENT_NAME: concourse-core 39 | DEPLOYMENT_CONFIG_PATH: concourse/concourse-core.yml 40 | CONCOURSE_SECRETS: {{concourse_secrets}} 41 | BOSH_ENVIRONMENT: {{bosh_environment}} 42 | BOSH_CLIENT: {{bosh_client}} 43 | BOSH_CLIENT_SECRET: {{bosh_client_secret}} 44 | BOSH_CA_CERT: {{bosh_ca_cert}} 45 | JUMPBOX_HOST: {{jumpbox_host}} 46 | JUMPBOX_SSH_KEY: {{jumpbox_ssh_key}} 47 | JUMPBOX_SSH_USER: {{jumpbox_ssh_user}} 48 | CONCOURSE_EXTERNAL_URL: {{concourse_external_url}} 49 | CONCOURSE_BASIC_AUTH_USERNAME: {{concourse_basic_auth_username}} 50 | CONCOURSE_BASIC_AUTH_PASSWORD: {{concourse_basic_auth_password_bcrypt}} 51 | CONCOURSE_GITHUB_CLIENT_ID: {{concourse_github_client_id}} 52 | CONCOURSE_GITHUB_CLIENT_SECRET: {{concourse_github_client_secret}} 53 | - task: set-teams-core 54 | file: deployments-src/ci/tasks/set-teams-core.yml 55 | params: 56 | CONCOURSE_EXTERNAL_URL: {{concourse_external_url}} 57 | CONCOURSE_BASIC_AUTH_USERNAME: {{concourse_basic_auth_username}} 58 | CONCOURSE_BASIC_AUTH_PASSWORD: {{concourse_basic_auth_password}} 59 | ensure: 60 | put: terraform 61 | params: 62 | env_name: bosh-cpi-concourse 63 | terraform_source: deployments-src/terraform/concourse 64 | vars: 65 | allow_ssh_access_to_jumpbox: 0 66 | 67 | resource_types: 68 | - name: terraform 69 | type: docker-image 70 | source: 71 | repository: harbor-repo.vmware.com/dockerhub-proxy-cache/ljfranklin/terraform-resource 72 | - name: gcs-resource 73 | type: docker-image 74 | source: 75 | repository: harbor-repo.vmware.com/dockerhub-proxy-cache/frodenas/gcs-resource 76 | 77 | resources: 78 | - name: deployments-src 79 | type: git 80 | source: 81 | uri: https://github.com/pivotal-cf/bosh-concourse-deployments.git 82 | branch: master 83 | - name: terraform 84 | type: terraform 85 | source: 86 | storage: 87 | bucket: {{deployments_bucket_name}} 88 | bucket_path: terraform/ 89 | access_key_id: {{storage_access_key}} 90 | secret_access_key: {{storage_secret_key}} 91 | endpoint: https://storage.googleapis.com 92 | vars: 93 | project_id: {{project_id}} 94 | gcp_credentials_json: {{gcp_credentials_json}} 95 | ssh_trusted_cidrs: {{ssh_trusted_cidrs}} 96 | create_env_trusted_cidrs: {{create_env_trusted_cidrs}} 97 | bosh_core_web_trusted_cidrs: {{bosh_core_web_trusted_cidrs}} 98 | - name: bosh-cli 99 | type: s3 100 | source: 101 | bucket: bosh-cli-artifacts 102 | regexp: bosh-cli-(\d+\.\d+\.\d+)-linux-amd64 103 | - name: xenial-stemcell 104 | type: bosh-io-stemcell 105 | source: 106 | name: bosh-google-kvm-ubuntu-xenial-go_agent 107 | - name: concourse 108 | type: bosh-io-release 109 | source: 110 | repository: concourse/concourse-bosh-release 111 | - name: bpm 112 | type: bosh-io-release 113 | source: 114 | repository: cloudfoundry/bpm-release 115 | - name: caddy 116 | type: bosh-io-release 117 | source: 118 | repository: dpb587/caddy-bosh-release 119 | - name: windows-utilities 120 | type: bosh-io-release 121 | source: 122 | repository: cloudfoundry-incubator/windows-utilities-release 123 | - name: windows-tools 124 | type: bosh-io-release 125 | source: 126 | repository: cloudfoundry-incubator/windows-tools-release 127 | - name: windows-stemcell 128 | type: bosh-io-stemcell 129 | source: 130 | name: bosh-google-kvm-windows2016-go_agent -------------------------------------------------------------------------------- /ci/pipeline-nimbus-core.yml: -------------------------------------------------------------------------------- 1 | --- 2 | jobs: 3 | - name: renew-lease-nimbus-testbed 4 | plan: 5 | - do: 6 | - in_parallel: 7 | - get: deployments-src 8 | - get: every-day 9 | trigger: true 10 | - task: extend-lease 11 | file: deployments-src/ci/tasks/extend-lease-nimbus-testbed.yml 12 | params: 13 | TESTBED_NAME: bosh-main-ci 14 | USER: ((nimbus-user)) 15 | NIMBUS_LOCATION: ((nimbus-location)) 16 | 17 | - name: create-nimbus-testbed 18 | plan: 19 | - do: 20 | - in_parallel: 21 | - get: deployments-src 22 | - task: testbed-cleanup 23 | file: deployments-src/ci/tasks/cleanup-nimbus-testbed.yml 24 | params: 25 | TESTBED_NAME: bosh-main-ci 26 | USER: ((nimbus-user)) 27 | NIMBUS_LOCATION: ((nimbus-location)) 28 | - task: testbed-deploy 29 | file: deployments-src/ci/tasks/deploy-nimbus-testbed.yml 30 | params: 31 | TESTBED_NAME: bosh-main-ci 32 | USER: ((nimbus-user)) 33 | NIMBUS_LOCATION: ((nimbus-location)) 34 | - put: vsphere-v7.0-worker-state-1 35 | params: 36 | file: worker-state/clean-env 37 | - put: testbed-info 38 | params: 39 | file: nimbus-testbed-info/testbedInfo.json 40 | - task: create-nimbus-vcenter-vars 41 | file: deployments-src/ci/tasks/create-nimbus-vcenter-vars.yml 42 | params: 43 | VCENTER_PASSWORD: ((vcenter-password)) 44 | - put: nimbus-vcenter-vars 45 | params: 46 | file: nimbus-vcenter-vars/nimbus-vcenter-vars.yml 47 | 48 | - name: update-vsphere-v7.0-worker 49 | serial: true 50 | plan: 51 | - do: 52 | - in_parallel: 53 | - get: concourse 54 | - get: deployments-src 55 | - get: bosh-cli 56 | - get: testbed-info 57 | trigger: true 58 | - get: vsphere-v7.0-worker-state-1 59 | - get: bosh-cpi-release 60 | resource: bosh-vsphere-cpi-release 61 | - get: stemcell 62 | resource: vsphere-xenial-stemcell 63 | - get: bosh-main-docker-image 64 | - in_parallel: 65 | - task: update-vsphere-v7.0-worker-1 66 | file: deployments-src/ci/tasks/deploy-worker.yml 67 | image: bosh-main-docker-image 68 | input_mapping: 69 | worker-state: vsphere-v7.0-worker-state-1 70 | params: 71 | WORKER_SECRETS: ((vsphere-v70-worker-secrets-1)) 72 | ENVIRONMENT_NAME: nimbus 73 | ensure: 74 | put: vsphere-v7.0-worker-state-1 75 | params: 76 | file: updated-worker-state/worker-state.json 77 | 78 | resource_types: 79 | - name: gcs-resource 80 | type: docker-image 81 | source: 82 | repository: harbor-repo.vmware.com/dockerhub-proxy-cache/frodenas/gcs-resource 83 | 84 | resources: 85 | - name: every-day 86 | type: time 87 | source: 88 | interval: 24h 89 | - name: deployments-src 90 | type: git 91 | source: 92 | uri: https://github.com/pivotal-cf/bosh-concourse-deployments.git 93 | branch: master 94 | - name: testbed-info 95 | type: gcs-resource 96 | source: 97 | bucket: ((deployments_core_bucket_name)) 98 | json_key: ((gcp_credentials_json)) 99 | versioned_file: nimbus/testbed-info.json 100 | - name: nimbus-vcenter-vars 101 | type: gcs-resource 102 | source: 103 | bucket: ((deployments_core_bucket_name)) 104 | json_key: ((gcp_credentials_json)) 105 | versioned_file: nimbus-vcenter-vars.yml 106 | 107 | - name: vsphere-v7.0-worker-state-1 108 | type: gcs-resource 109 | source: 110 | bucket: ((deployments_core_bucket_name)) 111 | json_key: ((gcp_credentials_json)) 112 | versioned_file: worker/vsphere-v7.0-worker-state-1.json 113 | - name: bosh-cli 114 | type: s3 115 | source: 116 | bucket: bosh-cli-artifacts 117 | regexp: bosh-cli-(\d+\.\d+\.\d+)-linux-amd64 118 | - name: vsphere-xenial-stemcell 119 | type: bosh-io-stemcell 120 | source: 121 | name: bosh-vsphere-esxi-ubuntu-xenial-go_agent 122 | - name: concourse 123 | type: bosh-io-release 124 | source: 125 | repository: concourse/concourse-bosh-release 126 | - name: bosh-vsphere-cpi-release 127 | type: bosh-io-release 128 | source: 129 | repository: cloudfoundry/bosh-vsphere-cpi-release 130 | - name: bosh-main-docker-image 131 | type: registry-image 132 | source: 133 | repository: bosh/main 134 | username: ((docker_username)) 135 | password: ((docker_password)) 136 | -------------------------------------------------------------------------------- /ci/pipeline-shared.yml: -------------------------------------------------------------------------------- 1 | --- 2 | jobs: 3 | - name: prepare-concourse-env 4 | serial_groups: [deploy] 5 | plan: 6 | - in_parallel: 7 | - get: deployments-src 8 | - get: jumpbox-deployment-src 9 | - get: bosh-cli 10 | - get: concourse-natbox-state 11 | - get: concourse-jumpbox-state 12 | - get: bosh-google-cpi-release 13 | - get: networking-release 14 | - get: stemcell 15 | - put: terraform 16 | params: 17 | env_name: bosh-cpi-concourse 18 | terraform_source: deployments-src/terraform/concourse 19 | vars: 20 | allow_mbus_access_to_natbox: 1 21 | allow_mbus_access_to_jumpbox: 1 22 | - in_parallel: 23 | - task: update-natbox 24 | file: deployments-src/ci/tasks/deploy-natbox.yml 25 | input_mapping: 26 | natbox-state: concourse-natbox-state 27 | params: 28 | GOOGLE_CREDENTIALS: {{gcp_credentials_json}} 29 | ensure: 30 | put: concourse-natbox-state 31 | params: 32 | file: updated-natbox-state/natbox-state.json 33 | - task: update-jumpbox 34 | file: deployments-src/ci/tasks/deploy-jumpbox.yml 35 | input_mapping: 36 | jumpbox-state: concourse-jumpbox-state 37 | params: 38 | TEAMS: {{bosh_teams}} 39 | GOOGLE_CREDENTIALS: {{gcp_credentials_json}} 40 | TRUSTED_CIDRS: {{ssh_trusted_cidrs}} 41 | MACHINE_TYPE: {{jumpbox_machine_type}} 42 | ensure: 43 | put: concourse-jumpbox-state 44 | params: 45 | file: updated-jumpbox-state/jumpbox-state.json 46 | ensure: 47 | put: terraform 48 | params: 49 | env_name: bosh-cpi-concourse 50 | terraform_source: deployments-src/terraform/concourse 51 | vars: 52 | allow_mbus_access_to_natbox: 0 53 | allow_mbus_access_to_jumpbox: 0 54 | - name: update-director 55 | serial_groups: [deploy] 56 | plan: 57 | - do: 58 | - in_parallel: 59 | - get: deployments-src 60 | - get: director-templates 61 | - get: bosh-cli 62 | - get: director-state 63 | - &open-ssh-to-jumpbox 64 | put: terraform 65 | params: 66 | env_name: bosh-cpi-concourse 67 | terraform_source: deployments-src/terraform/concourse 68 | vars: 69 | allow_ssh_access_to_jumpbox: 1 70 | - task: update-director 71 | file: deployments-src/ci/tasks/deploy-director.yml 72 | params: 73 | GOOGLE_CREDENTIALS: {{gcp_credentials_json}} 74 | BOSH_DIRECTOR_SECRETS: {{bosh_director_secrets}} 75 | BOSH_CLIENT: {{bosh_client_admin}} 76 | BOSH_CLIENT_SECRET: {{bosh_client_secret_admin}} 77 | BOSH_CA_CERT: {{bosh_ca_cert}} 78 | BOSH_CA_KEY: {{director_ca_key}} 79 | JUMPBOX_SSH_KEY: {{jumpbox_ssh_key}} 80 | JUMPBOX_SSH_USER: {{jumpbox_ssh_user}} 81 | BOSH_TEAMS: {{bosh_teams}} 82 | ensure: 83 | put: director-state 84 | params: 85 | file: updated-director-state/bosh-state.json 86 | ensure: &close-ssh-to-jumpbox 87 | put: terraform 88 | params: 89 | env_name: bosh-cpi-concourse 90 | terraform_source: deployments-src/terraform/concourse 91 | vars: 92 | allow_ssh_access_to_jumpbox: 0 93 | 94 | - name: update-cloud-config 95 | serial_groups: [deploy] 96 | plan: 97 | - do: 98 | - in_parallel: 99 | - get: deployments-src 100 | - get: bosh-cli 101 | - *open-ssh-to-jumpbox 102 | - &wait-for-ssh 103 | task: wait-for-ssh 104 | file: deployments-src/ci/tasks/wait-ssh.yml 105 | params: 106 | JUMPBOX_HOST: {{jumpbox_host}} 107 | - task: update-cloud-config 108 | file: deployments-src/ci/tasks/update-cloud-config.yml 109 | params: 110 | BOSH_CLIENT: {{bosh_client}} 111 | BOSH_CLIENT_SECRET: {{bosh_client_secret}} 112 | BOSH_CA_CERT: {{bosh_ca_cert}} 113 | JUMPBOX_SSH_KEY: {{jumpbox_ssh_key}} 114 | JUMPBOX_SSH_USER: {{jumpbox_ssh_user}} 115 | ensure: *close-ssh-to-jumpbox 116 | 117 | - name: update-vpn-server 118 | serial_groups: [deploy] 119 | plan: 120 | - in_parallel: 121 | - get: deployments-src 122 | - get: stemcell 123 | - get: bosh-cli 124 | - get: networking-release 125 | - get: openvpn-release 126 | - get: caddy-bosh-release 127 | - get: ssoca 128 | - *open-ssh-to-jumpbox 129 | - *wait-for-ssh 130 | - task: deploy-vpn-server 131 | file: deployments-src/ci/tasks/deploy-vpn-server.yml 132 | params: 133 | BOSH_CLIENT: {{bosh_client}} 134 | BOSH_CLIENT_SECRET: {{bosh_client_secret}} 135 | BOSH_CA_CERT: {{bosh_ca_cert}} 136 | JUMPBOX_SSH_KEY: {{jumpbox_ssh_key}} 137 | JUMPBOX_SSH_USER: {{jumpbox_ssh_user}} 138 | VPN_SECRETS: {{cpi_vpn_secrets}} 139 | EXTERNAL_HOST: "vpn-bosh.ci.cf-app.com" 140 | ensure: *close-ssh-to-jumpbox 141 | 142 | - name: open-ssh-for-30m 143 | serial_groups: [deploy] 144 | plan: 145 | - get: deployments-src 146 | - *open-ssh-to-jumpbox 147 | - task: wait-for-ssh 148 | file: deployments-src/ci/tasks/wait-for-ssh.yml 149 | params: 150 | JUMPBOX_HOST: {{jumpbox_host}} 151 | ensure: *close-ssh-to-jumpbox 152 | 153 | - name: re-terraform 154 | serial_groups: [deploy, deploy-asia] 155 | plan: 156 | - in_parallel: 157 | - get: deployments-src 158 | - task: delete-firewall-rules 159 | file: deployments-src/ci/tasks/delete-firewall-rules.yml 160 | params: 161 | GOOGLE_PROJECT_ID: {{project_id}} 162 | GOOGLE_CREDENTIALS: {{gcp_credentials_json}} 163 | ensure: 164 | do: 165 | - *close-ssh-to-jumpbox 166 | 167 | resource_types: 168 | - name: terraform 169 | type: docker-image 170 | source: 171 | repository: harbor-repo.vmware.com/dockerhub-proxy-cache/ljfranklin/terraform-resource 172 | - name: gcs-resource 173 | type: docker-image 174 | source: 175 | repository: harbor-repo.vmware.com/dockerhub-proxy-cache/frodenas/gcs-resource 176 | 177 | resources: 178 | - name: deployments-src 179 | type: git 180 | source: 181 | uri: https://github.com/pivotal-cf/bosh-concourse-deployments.git 182 | branch: master 183 | - name: director-templates 184 | type: git 185 | source: 186 | uri: https://github.com/cloudfoundry/bosh-deployment.git 187 | branch: master 188 | - name: jumpbox-deployment-src 189 | type: git 190 | source: 191 | uri: https://github.com/cppforlife/jumpbox-deployment.git 192 | branch: master 193 | - name: terraform 194 | type: terraform 195 | source: 196 | migrated_from_storage: 197 | bucket: {{deployments_bucket_name}} 198 | bucket_path: terraform/ 199 | access_key_id: {{storage_access_key}} 200 | secret_access_key: {{storage_secret_key}} 201 | endpoint: https://storage.googleapis.com 202 | backend_type: gcs 203 | backend_config: 204 | bucket: {{deployments_bucket_name}} 205 | prefix: terraform/ 206 | credentials: 207 | access_key_id: {{storage_access_key}} 208 | secret_access_key: {{storage_secret_key}} 209 | 210 | vars: 211 | project_id: {{project_id}} 212 | gcp_credentials_json: {{gcp_credentials_json}} 213 | ssh_trusted_cidrs: {{ssh_trusted_cidrs}} 214 | create_env_trusted_cidrs: {{create_env_trusted_cidrs}} 215 | bosh_core_web_trusted_cidrs: {{bosh_core_web_trusted_cidrs}} 216 | - name: concourse-natbox-state 217 | type: gcs-resource 218 | source: 219 | bucket: {{deployments_bucket_name}} 220 | json_key: {{gcp_credentials_json}} 221 | versioned_file: concourse/natbox-state.json 222 | - name: concourse-jumpbox-state 223 | type: gcs-resource 224 | source: 225 | bucket: {{deployments_bucket_name}} 226 | json_key: {{gcp_credentials_json}} 227 | versioned_file: concourse/jumpbox-state.json 228 | - name: director-state 229 | type: gcs-resource 230 | source: 231 | bucket: {{deployments_bucket_name}} 232 | json_key: {{gcp_credentials_json}} 233 | versioned_file: director/bosh-state.json 234 | - name: bosh-cli 235 | type: s3 236 | source: 237 | bucket: bosh-cli-artifacts 238 | regexp: bosh-cli-(\d+\.\d+\.\d+)-linux-amd64 239 | - name: stemcell 240 | type: bosh-io-stemcell 241 | source: 242 | name: bosh-google-kvm-ubuntu-trusty-go_agent 243 | - name: bosh-google-cpi-release 244 | type: bosh-io-release 245 | source: 246 | repository: cloudfoundry-incubator/bosh-google-cpi-release 247 | - name: networking-release 248 | type: bosh-io-release 249 | source: 250 | repository: cloudfoundry/networking-release 251 | - name: openvpn-release 252 | type: bosh-io-release 253 | source: 254 | repository: dpb587/openvpn-bosh-release 255 | - name: caddy-bosh-release 256 | type: bosh-io-release 257 | source: 258 | repository: dpb587/caddy-bosh-release 259 | - name: ssoca 260 | type: git 261 | source: 262 | uri: https://github.com/dpb587/ssoca-bosh-release.git 263 | branch: master 264 | -------------------------------------------------------------------------------- /ci/tasks/cleanup-nimbus-testbed: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | nimbus-ctl --testbed kill ${TESTBED_NAME} || true 4 | 5 | -------------------------------------------------------------------------------- /ci/tasks/cleanup-nimbus-testbed.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: devtools-docker-local.artifactory.eng.vmware.com/vmware/nimbus/nimbus-worker 8 | 9 | inputs: 10 | - name: deployments-src 11 | 12 | params: 13 | USER: "" 14 | TESTBED_NAME: "" 15 | 16 | run: 17 | path: deployments-src/ci/tasks/cleanup-nimbus-testbed 18 | -------------------------------------------------------------------------------- /ci/tasks/create-nimbus-vcenter-vars: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo 4 | 5 | export VCENTER_HOST=$(jq ".vc[0].systemPNID" nimbus-testbed-info/testbedInfo.json -r) 6 | 7 | cat > nimbus-vcenter-vars/nimbus-vcenter-vars.yml <> worker-ops.yml 73 | fi 74 | 75 | # if 'worker_public_key' add to 'tsa.team_authorized_keys' 76 | if [[ -n "$(echo "$team" | jq -r .worker_public_key)" ]]; then 77 | bosh int \ 78 | -l <( echo "$team" | jq '. + {"team": .name}' ) \ 79 | deployments-src/concourse/team-authorized-public-key-ops.yml >> worker-ops.yml 80 | fi 81 | done 82 | 83 | realpath worker-ops.yml 84 | } 85 | 86 | echo "Building Concourse Workers manifest..." 87 | worker_ops_path="$( create_worker_ops_file "$CONCOURSE_TEAMS" )" 88 | 89 | pushd deployments-src > /dev/null 90 | echo "Deploying Concourse..." 91 | bosh -n --tty deploy --fix \ 92 | -l <(echo "${CONCOURSE_SECRETS}") \ 93 | -v concourse_external_url=${CONCOURSE_EXTERNAL_URL} \ 94 | -v concourse_basic_auth_username=${CONCOURSE_BASIC_AUTH_USERNAME} \ 95 | -v concourse_basic_auth_password=${CONCOURSE_BASIC_AUTH_PASSWORD} \ 96 | -v concourse_github_client_id=${CONCOURSE_GITHUB_CLIENT_ID} \ 97 | -v concourse_github_client_secret=${CONCOURSE_GITHUB_CLIENT_SECRET} \ 98 | -v stemcell_version="'${stemcell_version}'" \ 99 | -v concourse_version="'${concourse_version}'" \ 100 | -v bpm_version="'${bpm_version}'" \ 101 | -v caddy_version="'${caddy_version}'" \ 102 | -v windows_tools_version="'${windows_tools_version}'" \ 103 | -v windows_utilities_version="'${windows_utilities_version}'" \ 104 | -d "${DEPLOYMENT_NAME}" \ 105 | -o "$worker_ops_path" \ 106 | --vars-store="you-really-need-this-flag-but-not-this-file.yml" \ 107 | "${DEPLOYMENT_CONFIG_PATH}" 108 | echo "Successfully deployed Concourse!" 109 | popd > /dev/null 110 | -------------------------------------------------------------------------------- /ci/tasks/deploy-concourse.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: boshcpi/bosh-concourse-deployments 8 | 9 | inputs: 10 | - name: bosh-cli 11 | - name: bpm 12 | - name: caddy 13 | - name: concourse 14 | - name: deployments-src 15 | - name: stemcell 16 | - name: terraform 17 | - name: windows-stemcell 18 | - name: windows-tools 19 | - name: windows-utilities 20 | 21 | params: 22 | DEPLOYMENT_NAME: "" 23 | DEPLOYMENT_CONFIG_PATH: "" 24 | CONCOURSE_SECRETS: "" 25 | BOSH_ENVIRONMENT: "" 26 | BOSH_CLIENT: "" 27 | BOSH_CLIENT_SECRET: "" 28 | BOSH_CA_CERT: "" 29 | JUMPBOX_HOST: "" 30 | JUMPBOX_SSH_USER: "" 31 | JUMPBOX_SSH_KEY: "" 32 | CONCOURSE_EXTERNAL_URL: "" 33 | CONCOURSE_BASIC_AUTH_USERNAME: "" 34 | CONCOURSE_BASIC_AUTH_PASSWORD: "" 35 | CONCOURSE_GITHUB_CLIENT_ID: "" 36 | CONCOURSE_GITHUB_CLIENT_SECRET: "" 37 | CONCOURSE_TEAMS: "[]" 38 | 39 | run: 40 | path: deployments-src/ci/tasks/deploy-concourse 41 | -------------------------------------------------------------------------------- /ci/tasks/deploy-director: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | # env 6 | : ${GOOGLE_CREDENTIALS:?} 7 | : ${BOSH_DIRECTOR_SECRETS:?} 8 | : ${BOSH_CLIENT:?} 9 | : ${BOSH_CLIENT_SECRET:?} 10 | : ${BOSH_CA_CERT:?} 11 | : ${BOSH_CA_KEY:?} 12 | : ${JUMPBOX_SSH_KEY:?} 13 | : ${JUMPBOX_SSH_USER:?} 14 | : ${BOSH_TEAMS:="[]"} 15 | 16 | source deployments-src/ci/utils 17 | 18 | generate_uaa_users_ops_file() { 19 | echo "$BOSH_TEAMS" | yaml_to_json | jq ' 20 | del(.[] | select(.uaa == null)) 21 | | 22 | map( 23 | { 24 | "type": "replace", 25 | "path": ("/instance_groups/name=bosh/jobs/name=uaa/properties/uaa/clients/"+.name+"?"), 26 | "value": .uaa 27 | } 28 | ) 29 | ' 30 | } 31 | 32 | BOSH_CA_CERT="$( read_with_escaped_newlines "${BOSH_CA_CERT}" )" 33 | BOSH_CA_KEY="$( read_with_escaped_newlines "${BOSH_CA_KEY}" )" 34 | 35 | remove_terraform_prefix terraform/metadata director_ > metadata 36 | 37 | ssh_tunnel \ 38 | "${JUMPBOX_SSH_KEY}" \ 39 | "${JUMPBOX_SSH_USER}@$(jq -r -e .jumpbox_external_ip metadata)" \ 40 | "$(jq -r -e .internal_ip metadata)" 41 | 42 | vars_store="$(mktemp /tmp/vars-store-XXXXXX)" 43 | 44 | cp director-state/*.json updated-director-state 45 | echo "Updating director..." 46 | bosh -n create-env \ 47 | -o director-templates/gcp/cpi.yml \ 48 | -o director-templates/local-dns.yml \ 49 | -o deployments-src/bosh/debug-ops-file.yml \ 50 | -o deployments-src/bosh/bootstrap-ops-file.yml \ 51 | -o deployments-src/bosh/datadog-ops-file.yml \ 52 | -o director-templates/uaa.yml \ 53 | -o director-templates/jumpbox-user.yml \ 54 | -o <( generate_uaa_users_ops_file ) \ 55 | --vars-store "$vars_store" \ 56 | -l <(echo "${BOSH_DIRECTOR_SECRETS}") \ 57 | -l metadata \ 58 | -v "admin_user=\"${BOSH_CLIENT}\"" \ 59 | -v "admin_password=\"${BOSH_CLIENT_SECRET}\"" \ 60 | -v "default_ca={ \"certificate\": \"${BOSH_CA_CERT}\", \"private_key\": \"${BOSH_CA_KEY}\" }" \ 61 | -v "gcp_credentials_json='${GOOGLE_CREDENTIALS}'" \ 62 | --state updated-director-state/bosh-state.json \ 63 | director-templates/bosh.yml 64 | 65 | echo "Successfully updated director!" 66 | 67 | rm -f "$vars_store" 68 | -------------------------------------------------------------------------------- /ci/tasks/deploy-director.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: boshcpi/bosh-concourse-deployments 8 | 9 | inputs: 10 | - name: terraform 11 | - name: deployments-src 12 | - name: director-templates 13 | - name: bosh-cli 14 | - name: director-state 15 | 16 | outputs: 17 | - name: updated-director-state 18 | 19 | params: 20 | GOOGLE_CREDENTIALS: "" 21 | BOSH_DIRECTOR_SECRETS: "" 22 | BOSH_CLIENT: "" 23 | BOSH_CLIENT_SECRET: "" 24 | BOSH_CA_CERT: "" 25 | BOSH_CA_KEY: "" 26 | JUMPBOX_SSH_KEY: "" 27 | JUMPBOX_SSH_USER: "" 28 | BOSH_TEAMS: "" 29 | run: 30 | path: deployments-src/ci/tasks/deploy-director 31 | -------------------------------------------------------------------------------- /ci/tasks/deploy-jumpbox: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | # env 6 | : ${TEAMS:?} 7 | : ${GOOGLE_CREDENTIALS:?} 8 | : ${TRUSTED_CIDRS:?} 9 | : ${MACHINE_TYPE:?} 10 | 11 | source deployments-src/ci/utils 12 | 13 | generate_jumpbox_users_ops_file() { 14 | echo "$TEAMS" | yaml_to_json | jq ' 15 | del(.[] | select(.jumpbox == null)) 16 | | 17 | map({ 18 | "type": "replace", 19 | "path": "/instance_groups/name=jumpbox/jobs/name=user_add/properties/users?/-", 20 | "value": { 21 | "name": .name, 22 | "sudo": false, 23 | "public_key": .jumpbox.public_key 24 | } 25 | }) 26 | ' 27 | } 28 | generate_jumpbox_nats_port_ops_file() { 29 | echo " 30 | - type: replace 31 | path: /cloud_provider/mbus 32 | value: https://mbus:((mbus_bootstrap_password))@((external_ip)):443 33 | - type: replace 34 | path: /cloud_provider/properties/agent/mbus 35 | value: https://mbus:((mbus_bootstrap_password))@((internal_ip)):443 36 | " 37 | } 38 | 39 | remove_terraform_prefix terraform/metadata jumpbox_ > metadata 40 | 41 | cp jumpbox-state/*.json updated-jumpbox-state 42 | echo "Updating jumpbox..." 43 | BOSH_LOG_LEVEL=debug bosh -n create-env \ 44 | --state updated-jumpbox-state/jumpbox-state.json \ 45 | --vars-store "you-really-need-this-flag-but-not-this-file.yml" \ 46 | -o jumpbox-deployment-src/gcp/cpi.yml \ 47 | -o deployments-src/jumpbox/custom-type-ops.yml \ 48 | -o deployments-src/jumpbox/remove-users-ops.yml \ 49 | -o <( generate_jumpbox_users_ops_file ) \ 50 | -o <( generate_jumpbox_nats_port_ops_file ) \ 51 | -l metadata \ 52 | -v "gcp_credentials_json='${GOOGLE_CREDENTIALS}'" \ 53 | -v "trusted_cidrs='${TRUSTED_CIDRS}'" \ 54 | -v "machine_type='${MACHINE_TYPE}'" \ 55 | jumpbox-deployment-src/jumpbox.yml 56 | 57 | echo "Successfully updated jumpbox!" 58 | -------------------------------------------------------------------------------- /ci/tasks/deploy-jumpbox.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: boshcpi/bosh-concourse-deployments 8 | 9 | inputs: 10 | - name: terraform 11 | - name: deployments-src 12 | - name: bosh-cli 13 | - name: jumpbox-state 14 | - name: jumpbox-deployment-src 15 | 16 | outputs: 17 | - name: updated-jumpbox-state 18 | 19 | params: 20 | GOOGLE_CREDENTIALS: "" 21 | TEAMS: "" 22 | TRUSTED_CIDRS: "" 23 | MACHINE_TYPE: "n1-standard-1" 24 | 25 | run: 26 | path: deployments-src/ci/tasks/deploy-jumpbox 27 | -------------------------------------------------------------------------------- /ci/tasks/deploy-natbox: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | # env 6 | : ${GOOGLE_CREDENTIALS:?} 7 | 8 | source deployments-src/ci/utils 9 | 10 | remove_terraform_prefix terraform/metadata natbox_ > metadata 11 | 12 | cp natbox-state/*.json updated-natbox-state/natbox-state.json 13 | 14 | echo "Updating NAT..." 15 | bosh -n create-env \ 16 | --state updated-natbox-state/natbox-state.json \ 17 | --vars-store "you-really-need-this-flag-but-not-this-file.yml" \ 18 | -l metadata \ 19 | -v "gcp_credentials_json='${GOOGLE_CREDENTIALS}'" \ 20 | deployments-src/natbox/natbox.yml 21 | 22 | echo "Successfully updated nat!" 23 | -------------------------------------------------------------------------------- /ci/tasks/deploy-natbox.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: boshcpi/bosh-concourse-deployments 8 | 9 | inputs: 10 | - name: terraform 11 | - name: deployments-src 12 | - name: bosh-cli 13 | - name: natbox-state 14 | - name: networking-release 15 | - name: bosh-google-cpi-release 16 | - name: stemcell 17 | 18 | outputs: 19 | - name: updated-natbox-state 20 | 21 | params: 22 | GOOGLE_CREDENTIALS: "" 23 | 24 | run: 25 | path: deployments-src/ci/tasks/deploy-natbox 26 | -------------------------------------------------------------------------------- /ci/tasks/deploy-nimbus-testbed: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | echo "{}" > ./worker-state/clean-env 4 | 5 | sed '/AdmissionControlQuota/d' /mts/git/nimbus/lib/nimbus/admission_control.rb > /tmp/admission_control.rb 6 | mv -f /tmp/admission_control.rb /mts/git/nimbus/lib/nimbus/admission_control.rb 7 | 8 | nimbus-testbeddeploy \ 9 | --testbedSpecRubyFile deployments-src/nimbus-testbed/nimbus_vc70_dual_networks.rb \ 10 | --runName ${TESTBED_NAME} \ 11 | --lease 7 \ 12 | --context general:nsx 13 | 14 | cp /tmp/nimbus-*/*/testbedInfo.json ./nimbus-testbed-info/ 15 | -------------------------------------------------------------------------------- /ci/tasks/deploy-nimbus-testbed.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: devtools-docker-local.artifactory.eng.vmware.com/vmware/nimbus/nimbus-worker 8 | 9 | inputs: 10 | - name: deployments-src 11 | 12 | outputs: 13 | - name: nimbus-testbed-info 14 | - name: worker-state 15 | 16 | params: 17 | USER: "" 18 | TESTBED_NAME: "" 19 | 20 | run: 21 | path: deployments-src/ci/tasks/deploy-nimbus-testbed 22 | -------------------------------------------------------------------------------- /ci/tasks/deploy-vpn-server: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | 5 | : ${BOSH_CLIENT:?} 6 | : ${BOSH_CLIENT_SECRET:?} 7 | : ${BOSH_CA_CERT:?} 8 | : ${JUMPBOX_SSH_USER:?} 9 | : ${JUMPBOX_SSH_KEY:?} 10 | : ${VPN_SECRETS:?} 11 | 12 | source deployments-src/ci/utils 13 | 14 | terraform_metadata="$( realpath terraform/metadata )" 15 | stemcell_version="$( cat stemcell/version )" 16 | 17 | export BOSH_ENVIRONMENT="$( jq -r -e .director_internal_ip $terraform_metadata )" 18 | 19 | setup_ssh_tunnel \ 20 | "$JUMPBOX_SSH_KEY" \ 21 | "$JUMPBOX_SSH_USER@$( jq -r -e .jumpbox_external_ip $terraform_metadata )" 22 | 23 | echo "Uploading OpenVPN release..." 24 | bosh -n --tty upload-release openvpn-release/release.tgz 25 | 26 | echo "Uploading Networking release..." 27 | bosh -n --tty upload-release networking-release/release.tgz 28 | 29 | echo "Uploading Caddy release..." 30 | bosh -n --tty upload-release caddy-bosh-release/release.tgz 31 | 32 | echo "Uploading SSOCA release..." 33 | ( cd ssoca; bosh -n --tty upload-release ) 34 | 35 | echo "Uploading stemcell..." 36 | bosh -n --tty upload-stemcell stemcell/stemcell.tgz 37 | 38 | generate_ops_file() { 39 | local team=${1?'Concourse team is required.'} 40 | local ops_file=${2?'Ops file is required.'} 41 | 42 | bosh int \ 43 | -v github_team="$team" \ 44 | openvpn-team-ops.yml \ 45 | >> "$ops_file" 46 | } 47 | 48 | pushd deployments-src/vpn > /dev/null 49 | iterate_teams "$( echo "$VPN_SECRETS" | yaml_to_json | jq -r .github_teams )" \ 50 | generate_ops_file /tmp/ssoca-teams-ops.yml 51 | 52 | echo "Deploying OpenVPN Server..." 53 | bosh -n --tty deploy -d openvpn \ 54 | -v stemcell_version="'$stemcell_version'" \ 55 | -v external_host="$EXTERNAL_HOST" \ 56 | -o /tmp/ssoca-teams-ops.yml \ 57 | -o caddy-ops.yml \ 58 | -l <(echo "${VPN_SECRETS}") \ 59 | -l "$terraform_metadata" \ 60 | --vars-store="you-really-need-this-flag-but-not-this-file.yml" \ 61 | manifest.yml 62 | echo "Successfully deployed OpenVPN Server!" 63 | popd > /dev/null 64 | -------------------------------------------------------------------------------- /ci/tasks/deploy-vpn-server.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: boshcpi/bosh-concourse-deployments 8 | 9 | inputs: 10 | - name: deployments-src 11 | - name: stemcell 12 | - name: bosh-cli 13 | - name: networking-release 14 | - name: openvpn-release 15 | - name: caddy-bosh-release 16 | - name: ssoca 17 | - name: terraform 18 | 19 | params: 20 | JUMPBOX_SSH_USER: "" 21 | JUMPBOX_SSH_KEY: "" 22 | BOSH_CLIENT: "" 23 | BOSH_CLIENT_SECRET: "" 24 | BOSH_CA_CERT: "" 25 | VPN_SECRETS: "" 26 | EXTERNAL_HOST: "" 27 | 28 | run: 29 | path: deployments-src/ci/tasks/deploy-vpn-server 30 | -------------------------------------------------------------------------------- /ci/tasks/deploy-worker: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | # env 6 | : ${ENVIRONMENT_NAME:?} 7 | : ${WORKER_SECRETS:?} 8 | 9 | set +u 10 | source deployments-src/ci/utils 11 | 12 | if [ -f /etc/profile.d/chruby.sh ]; then 13 | source /etc/profile.d/chruby.sh 14 | chruby ruby 15 | fi 16 | 17 | if [ -d testbed-info ]; then 18 | # - vcenter_resource_pool 19 | export WORKER_SECRETS=$(echo -e "$WORKER_SECRETS\n$( 20 | bosh int <(jq '.vc[0] | { 21 | vcenter_address: .ip, 22 | vcenter_user: .vimUsername, 23 | vcenter_password: .vimPassword 24 | }' testbed-info/testbed-info.json) 25 | )") 26 | 27 | public_ip=$(curl -s "$(jq -r '.network[0].gateway' testbed-info/testbed-info.json):4827/nsips") 28 | net=$(echo "${public_ip}" | jq -r '"\(.ip)/\(.netmask)"') 29 | # ubuntu gives us ruby 2.3.1 which does not have the prefix method on ip 30 | # us solution from here https://stackoverflow.com/questions/1825928/netmask-to-cidr-in-ruby 31 | subnet=$(ruby -e " 32 | require 'ipaddr'; ip = IPAddr.new('${net}'); 33 | puts \"#{ip}/#{ip.instance_variable_get(:@mask_addr).to_i.to_s(2).count('1')}\" 34 | ") 35 | export WORKER_SECRETS=$(echo -e "${WORKER_SECRETS}\n$( 36 | bosh int <(echo "${public_ip}" | jq '. | { 37 | vcenter_public_worker_ip: .ip, 38 | vcenter_public_gateway: .gateway 39 | }') 40 | )\nvcenter_public_cidr: ${subnet}") 41 | 42 | export WORKER_SECRETS=$(echo -e "${WORKER_SECRETS}\n$(echo " 43 | bosh_vsphere_cpi_url: $(cat bosh-cpi-release/url) 44 | bosh_vsphere_cpi_version: $(cat bosh-cpi-release/version) 45 | bosh_vsphere_cpi_sha1: $(cat bosh-cpi-release/sha1) 46 | concourse_url: $(cat concourse/url) 47 | concourse_version: $(cat concourse/version) 48 | concourse_sha1: $(cat concourse/sha1) 49 | stemcell_url: $(cat stemcell/url) 50 | stemcell_sha1: $(cat stemcell/sha1) 51 | " | awk '{$1=$1};1')") 52 | 53 | fi 54 | 55 | cp worker-state/*.json updated-worker-state/worker-state.json 56 | 57 | echo "Updating WORKER..." 58 | bosh --tty -n create-env \ 59 | --state updated-worker-state/worker-state.json \ 60 | -v concourse_release_path="$( realpath $PWD/concourse/*.tgz )" \ 61 | -v bosh_cpi_release_path="$( realpath $PWD/bosh-cpi-release/*.tgz )" \ 62 | -v stemcell_path="$( realpath $PWD/stemcell/*.tgz )" \ 63 | -l <( echo "${WORKER_SECRETS}" ) \ 64 | $( echo ${OPTIONAL_FLAGS} ) \ 65 | --vars-store="creds.yml" \ 66 | deployments-src/concourse/workers/${ENVIRONMENT_NAME}/worker.yml 67 | 68 | echo "Successfully updated worker!" 69 | -------------------------------------------------------------------------------- /ci/tasks/deploy-worker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: bosh/main 8 | 9 | inputs: 10 | - name: deployments-src 11 | - name: bosh-cpi-release 12 | - name: bosh-cli 13 | - name: concourse 14 | - name: worker-state 15 | - name: stemcell 16 | - name: testbed-info 17 | optional: true 18 | 19 | outputs: 20 | - name: updated-worker-state 21 | 22 | params: 23 | IAAS_CREDENTIALS: "" 24 | OPTIONAL_FLAGS: "" 25 | WORKER_SECRETS: "" 26 | ENVIRONMENT_NAME: "" 27 | 28 | run: 29 | path: deployments-src/ci/tasks/deploy-worker 30 | -------------------------------------------------------------------------------- /ci/tasks/extend-lease-nimbus-testbed: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | nimbus-ctl --lease ${DAYS} \ 4 | --testbed extend-lease ${TESTBED_NAME} 5 | -------------------------------------------------------------------------------- /ci/tasks/extend-lease-nimbus-testbed.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: devtools-docker-local.artifactory.eng.vmware.com/vmware/nimbus/nimbus-worker 8 | 9 | inputs: 10 | - name: deployments-src 11 | 12 | params: 13 | USER: "" 14 | DAYS: 7 15 | TESTBED_NAME: "" 16 | NIMBUS_LOCATION: "sc" 17 | 18 | run: 19 | path: deployments-src/ci/tasks/extend-lease-nimbus-testbed 20 | -------------------------------------------------------------------------------- /ci/tasks/set-teams-core: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | 5 | pushd deployments-src/concourse/teams/core 6 | wget -O ./fly "${CONCOURSE_EXTERNAL_URL}/api/v1/cli?arch=amd64&platform=linux" 7 | chmod +x ./fly 8 | 9 | yes | ./fly -t concourse login \ 10 | -c "${CONCOURSE_EXTERNAL_URL}" \ 11 | -u "${CONCOURSE_BASIC_AUTH_USERNAME}" \ 12 | -p "${CONCOURSE_BASIC_AUTH_PASSWORD}" 13 | 14 | 15 | ./fly -t concourse set-team --non-interactive -n bosh-io -c bosh-io.yml 16 | ./fly -t concourse set-team --non-interactive -n bosh-packages -c bosh-packages.yml 17 | ./fly -t concourse set-team --non-interactive -n dev -c dev.yml 18 | ./fly -t concourse set-team --non-interactive -n main -c main.yml 19 | ./fly -t concourse set-team --non-interactive -n pcf -c pcf.yml 20 | ./fly -t concourse set-team --non-interactive -n bosh-director -c bosh-director.yml 21 | popd 22 | 23 | -------------------------------------------------------------------------------- /ci/tasks/set-teams-core.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: boshcpi/bosh-concourse-deployments 8 | 9 | inputs: 10 | - name: deployments-src 11 | 12 | params: 13 | CONCOURSE_EXTERNAL_URL: "" 14 | CONCOURSE_BASIC_AUTH_USERNAME: "" 15 | CONCOURSE_BASIC_AUTH_PASSWORD: "" 16 | 17 | run: 18 | path: deployments-src/ci/tasks/set-teams-core 19 | -------------------------------------------------------------------------------- /ci/tasks/update-cloud-config: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | # env 6 | : ${BOSH_CLIENT:?} 7 | : ${BOSH_CLIENT_SECRET:?} 8 | : ${BOSH_CA_CERT:?} 9 | : ${JUMPBOX_SSH_USER:?} 10 | : ${JUMPBOX_SSH_KEY:?} 11 | 12 | source deployments-src/ci/utils 13 | 14 | terraform_metadata="$( realpath terraform/metadata )" 15 | 16 | export BOSH_ENVIRONMENT="$( jq -r -e .director_internal_ip $terraform_metadata )" 17 | 18 | setup_ssh_tunnel \ 19 | "$JUMPBOX_SSH_KEY" \ 20 | "$JUMPBOX_SSH_USER@$( jq -r -e .jumpbox_external_ip $terraform_metadata )" 21 | 22 | echo "Updating cloud config..." 23 | bosh -n update-cloud-config \ 24 | -l "${terraform_metadata}" \ 25 | --vars-store="you-really-need-this-flag-but-not-this-file.yml" \ 26 | deployments-src/bosh/cloud-config.yml 27 | 28 | echo "Successfully updated cloud-config!" 29 | -------------------------------------------------------------------------------- /ci/tasks/update-cloud-config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: boshcpi/bosh-concourse-deployments 8 | 9 | inputs: 10 | - name: terraform 11 | - name: deployments-src 12 | - name: bosh-cli 13 | 14 | params: 15 | BOSH_CLIENT: "" 16 | BOSH_CLIENT_SECRET: "" 17 | BOSH_CA_CERT: "" 18 | JUMPBOX_SSH_USER: "jumpbox" 19 | JUMPBOX_SSH_KEY: "" 20 | 21 | run: 22 | path: deployments-src/ci/tasks/update-cloud-config 23 | -------------------------------------------------------------------------------- /ci/tasks/wait-for-ssh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu -o pipefail 4 | 5 | # env 6 | : ${SLEEP_TIME:?} 7 | : ${JUMPBOX_HOST:?} 8 | 9 | echo "Your jumpbox address is '${JUMPBOX_HOST}'." 10 | echo "We'll leave the SSH port open for $(( ${SLEEP_TIME} / 60 )) minutes." 11 | echo "Hit cancel once to close the SSH port early" 12 | echo "😴 ..." 13 | sleep "${SLEEP_TIME}" 14 | -------------------------------------------------------------------------------- /ci/tasks/wait-for-ssh.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: boshcpi/bosh-concourse-deployments 8 | 9 | inputs: 10 | - name: deployments-src 11 | 12 | params: 13 | SLEEP_TIME: "1800" 14 | JUMPBOX_HOST: "" 15 | 16 | run: 17 | path: deployments-src/ci/tasks/wait-for-ssh 18 | -------------------------------------------------------------------------------- /ci/tasks/wait-ssh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu -o pipefail 4 | 5 | # env 6 | : ${JUMPBOX_HOST:?} 7 | 8 | while ! nc -v -z $JUMPBOX_HOST 22 ; do sleep 1 ; done 9 | -------------------------------------------------------------------------------- /ci/tasks/wait-ssh.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: boshcpi/bosh-concourse-deployments 8 | 9 | inputs: 10 | - name: deployments-src 11 | 12 | params: 13 | JUMPBOX_HOST: "" 14 | 15 | run: 16 | path: deployments-src/ci/tasks/wait-ssh 17 | -------------------------------------------------------------------------------- /ci/utils: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Oportunistically configure bosh for use 4 | configure_bosh_cli() { 5 | local bosh_input="$(realpath bosh-cli/*bosh-cli-* 2>/dev/null || true)" 6 | if [[ -n "${bosh_input}" ]]; then 7 | export bosh_cli="/usr/local/bin/bosh" 8 | cp "${bosh_input}" "${bosh_cli}" 9 | chmod +x "${bosh_cli}" 10 | fi 11 | } 12 | configure_bosh_cli 13 | 14 | read_with_escaped_newlines() { 15 | echo "$1" | perl -pe 's|\n|\\n|' 16 | } 17 | 18 | remove_terraform_prefix() { 19 | local terraform_metadata=${1?'Terraform metdata file is required.'} 20 | local prefix=${2?'Prefix is required.'} 21 | 22 | jq --arg prefix "$prefix" 'with_entries(.key |= sub("^" + $prefix; ""))' "$terraform_metadata" 23 | } 24 | 25 | yaml_to_json() { 26 | ruby -ryaml -rjson -e '$stdout.puts(JSON.pretty_generate(YAML.load($stdin.read)))' 27 | } 28 | 29 | iterate_teams() { 30 | local concourse_teams=${1?'Concourse teams are required.'} 31 | local function=${2?'BASH function is required.'} 32 | 33 | for ((i = 0 ; i < $(echo "$concourse_teams" | jq -r length) ; i++ )); do 34 | local team="$(echo "$concourse_teams" | jq -r ".[$i]")" 35 | $function "$team" "${@:3}" 36 | done 37 | } 38 | 39 | set_team() { 40 | local team=${1?'Concourse team is required.'} 41 | local concourse_target=${2?'Concourse target is required.'} 42 | 43 | echo -e "\nAdding '$( echo "$team" | jq -r .name )' team!" 44 | yes | ./fly -t $concourse_target set-team \ 45 | --non-interactive \ 46 | --team-name="$( echo "$team" | jq -r .name )" \ 47 | --github-team="$( echo "$team" | jq -r .github_team )" 48 | } 49 | 50 | destroy_stale_teams() { 51 | local concourse_target=${1?'Concourse target is required.'} 52 | local concourse_teams=${2?'Concourse teams are required.'} 53 | 54 | echo "Destroying stale Concourse teams..." 55 | 56 | ./fly -t $concourse_target teams \ 57 | | awk '{$1=$1}1' \ 58 | | xargs -I team echo '{"name": "team"}' \ 59 | > /tmp/current-teams.json 60 | local current_teams="$( cat /tmp/current-teams.json | jq -s . )" 61 | 62 | for ((i = 0; i < $(echo "$current_teams" | jq -r length) ; i++ )); do 63 | local team_name="$(echo "$current_teams" | jq -r ".[$i].name")" 64 | 65 | local found=$( echo "$concourse_teams" | jq -r --arg t $team_name '. | map(select(.name == $t))[0] != null' ) 66 | if [[ "$found" == "false" && "$team_name" != "main" ]]; then 67 | echo -e "\nDestroying '$team_name' team!!!" 68 | # echo "$team_name" | ./fly -t "$concourse_target" destroy-team -n "$team_name" 69 | fi 70 | done 71 | } 72 | 73 | ssh_key_file() { 74 | local tmp_dir=${1?'temp dir is required.'} 75 | local jumpbox_ssh_key=${2?'Jumpbox SSH key is required.'} 76 | 77 | local ssh_key_path="${tmp_dir}/id_rsa.pem" 78 | echo "${jumpbox_ssh_key}" > "${ssh_key_path}" 79 | chmod 400 "${ssh_key_path}" 80 | 81 | echo "${ssh_key_path}" 82 | } 83 | 84 | cleanup_bosh_ssh_tunnel() { 85 | killall -KILL ssh 86 | } 87 | 88 | setup_ssh_tunnel() { 89 | : ${BOSH_ENVIRONMENT:?} 90 | 91 | local jumpbox_ssh_key=${1?'Jumpbox SSH key is required.'} 92 | local jumpbox_address=${2?'Jumpbox address is required.'} 93 | 94 | local tmp_dir="$( mktemp -d /tmp/deploy-XXXXXX)" 95 | trap "{ rm -rf '$tmp_dir'; }" EXIT # will be overridden by future trap calls 96 | ssh_key_path="$( ssh_key_file "$tmp_dir" "$jumpbox_ssh_key" )" 97 | 98 | local ssh_args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i ${ssh_key_path} -fnNT" 99 | trap "{ cleanup_bosh_ssh_tunnel; rm -rf '$tmp_dir'; }" EXIT 100 | 101 | ssh $ssh_args -D 1080 "${jumpbox_address}" & 102 | export BOSH_ALL_PROXY=socks5://localhost:1080 103 | } 104 | 105 | cleanup_ssh_tunnel() { 106 | local ssh_args=${1?'SSH arguments are required.'} 107 | local tmp_dir=${2?'temp dir is required.'} 108 | local jumpbox_address=${3?'Jumpbox address is required.'} 109 | 110 | ssh $ssh_args -S "${tmp_dir}/tunnel-socket" -O exit "${jumpbox_address}" 111 | } 112 | 113 | ssh_tunnel() { 114 | local jumpbox_ssh_key=${1?'Jumpbox SSH key is required.'} 115 | local jumpbox_address=${2?'Jumpbox address is required.'} 116 | local machine_internal_ip=${3?'Machine internal IP is required.'} 117 | 118 | local tmp_dir="$( mktemp -d /tmp/deploy-XXXXXX)" 119 | trap "{ rm -rf '$tmp_dir'; }" EXIT # will be overridden by future trap calls 120 | ssh_key_path="$( ssh_key_file "$tmp_dir" "$jumpbox_ssh_key" )" 121 | 122 | ssh_args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i ${ssh_key_path}" 123 | ssh $ssh_args -M -S "${tmp_dir}/tunnel-socket" -fnNT -L 6868:"${machine_internal_ip}":6868 "${jumpbox_address}" 124 | trap "{ cleanup_ssh_tunnel '$ssh_args' '$tmp_dir' '$jumpbox_address'; rm -rf '$tmp_dir'; }" EXIT 125 | } 126 | 127 | take_disk_snapshot() { 128 | local deployment_name=${1?'Deployment name is required.'} 129 | local project_id=${2?'Google Cloud Project ID is required.'} 130 | local zone=${3?'Zone is required'} 131 | 132 | bosh -n -d "${deployment_name}" instances --details --column='Disk CIDs' | grep 'disk' | \ 133 | xargs -n1 gcloud --project "${project_id}" compute disks snapshot \ 134 | --snapshot-names "concourse-snapshot-$(date +'%Y%m%d%H%M%S')" \ 135 | --zone "${zone}" 136 | } 137 | 138 | setup_gcloud_cli() { 139 | local google_credentials=${1?'Google Cloud credentials are required.'} 140 | 141 | source /root/.bashrc 142 | local google_service_account=$( echo "${google_credentials}" | jq -r .client_email ) 143 | 144 | gcloud auth activate-service-account "${google_service_account}" \ 145 | --key-file <( echo "${google_credentials}" ) 146 | } 147 | -------------------------------------------------------------------------------- /concourse/concourse-core.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: concourse-core 3 | 4 | releases: 5 | - name: bpm 6 | version: ((bpm_version)) 7 | - name: caddy 8 | version: ((caddy_version)) 9 | - name: concourse 10 | version: ((concourse_version)) 11 | - name: windows-tools 12 | version: ((windows_tools_version)) 13 | - name: windows-utilities 14 | version: ((windows_utilities_version)) 15 | 16 | stemcells: 17 | - alias: default 18 | os: ubuntu-xenial 19 | version: ((stemcell_version)) 20 | # Windows Stemcell for AWS: https://s3.amazonaws.com/bosh-windows-stemcells/light-bosh-stemcell-*-aws-xen-hvm-windows2012R2-go_agent.tgz 21 | - alias: windows 22 | os: windows2016 23 | version: latest 24 | 25 | update: 26 | canaries: 1 27 | max_in_flight: 6 28 | canary_watch_time: 30000 - 90000 29 | update_watch_time: 30000 - 90000 30 | serial: false 31 | 32 | instance_groups: 33 | - name: web 34 | instances: 1 35 | vm_type: concourse_core 36 | persistent_disk_type: caddy-ssd 37 | stemcell: default 38 | azs: [us1] 39 | networks: 40 | - name: concourse 41 | jobs: 42 | - name: bpm 43 | release: bpm 44 | - name: web 45 | release: concourse 46 | properties: 47 | external_url: ((concourse_external_url)) 48 | token_signing_key: ((token_signing_key)) 49 | postgresql: 50 | host: ((database_host)) 51 | database: ((database_name)) 52 | role: 53 | name: ((database_role)) 54 | password: ((database_password)) 55 | worker_gateway: 56 | host_key: ((tsa_host_key)) 57 | authorized_keys: ((tsa_authorized_keys)) 58 | container_placement_strategy: fewest-build-containers 59 | github_auth: 60 | client_id: ((concourse_github_client_id)) 61 | client_secret: ((concourse_github_client_secret)) 62 | add_local_users: 63 | - ((concourse_basic_auth_username)):((concourse_basic_auth_password)) 64 | main_team: 65 | auth: 66 | local: 67 | users: [((concourse_basic_auth_username))] 68 | github: 69 | teams: ((main_team_owner_github_teams)) 70 | - name: caddy 71 | release: caddy 72 | properties: 73 | acme: 74 | email: ((letsencrypt_registration_email)) 75 | caddyfile: | 76 | ((concourse_external_host)) { 77 | gzip 78 | 79 | tls { 80 | dns route53 81 | } 82 | proxy / localhost:8080 { 83 | transparent 84 | websocket 85 | } 86 | } 87 | env: 88 | AWS_ACCESS_KEY_ID: ((route53_aws_access_id)) 89 | AWS_SECRET_ACCESS_KEY: ((route53_aws_secret_access_key)) 90 | AWS_HOSTED_ZONE_ID: ((route53_hosted_zone_id)) 91 | 92 | - name: worker 93 | instances: 16 94 | vm_type: concourse_core_worker_12_16 95 | stemcell: default 96 | azs: [us1] 97 | networks: 98 | - name: concourse 99 | jobs: 100 | - name: worker 101 | release: concourse 102 | properties: 103 | worker_gateway: 104 | worker_key: ((worker_key)) 105 | garden: 106 | allow_host_access: true 107 | 108 | - name: worker-brats 109 | instances: 4 110 | vm_type: concourse_core_worker_12_16 111 | stemcell: default 112 | azs: [us1] 113 | networks: 114 | - name: concourse 115 | jobs: 116 | - name: worker 117 | release: concourse 118 | properties: 119 | worker_gateway: 120 | worker_key: ((worker_key)) 121 | garden: 122 | allow_host_access: true 123 | tags: 124 | - worker-brats 125 | 126 | - name: worker-integration-1 127 | instances: 1 128 | vm_type: concourse_core_worker_32_64 129 | stemcell: default 130 | azs: [us1] 131 | networks: 132 | - name: concourse 133 | jobs: 134 | - name: worker 135 | release: concourse 136 | properties: 137 | worker_gateway: 138 | worker_key: ((worker_key)) 139 | garden: 140 | allow_host_access: true 141 | tags: 142 | - bosh-integration 143 | - bosh-integration-1 144 | - bosh-integration-db-tls-hotswap # backwards compatibility with branch pipelines 145 | 146 | - name: worker-integration-2 147 | instances: 1 148 | vm_type: concourse_core_worker_32_64 149 | stemcell: default 150 | azs: [us1] 151 | networks: 152 | - name: concourse 153 | jobs: 154 | - name: worker 155 | release: concourse 156 | properties: 157 | worker_gateway: 158 | worker_key: ((worker_key)) 159 | garden: 160 | allow_host_access: true 161 | tags: 162 | - bosh-integration 163 | - bosh-integration-2 164 | - bosh-integration-db-tls-mysql # backwards compatibility with branch pipelines 165 | 166 | - name: worker-integration-3 167 | instances: 1 168 | vm_type: concourse_core_worker_32_64 169 | stemcell: default 170 | azs: [us1] 171 | networks: 172 | - name: concourse 173 | jobs: 174 | - name: worker 175 | release: concourse 176 | properties: 177 | worker_gateway: 178 | worker_key: ((worker_key)) 179 | garden: 180 | allow_host_access: true 181 | tags: 182 | - bosh-integration 183 | - bosh-integration-3 184 | - bosh-integration-db-tls-postgres # backwards compatibility with branch pipelines 185 | 186 | - name: worker-integration-4 187 | instances: 1 188 | vm_type: concourse_core_worker_32_64 189 | stemcell: default 190 | azs: [us1] 191 | networks: 192 | - name: concourse 193 | jobs: 194 | - name: worker 195 | release: concourse 196 | properties: 197 | worker_gateway: 198 | worker_key: ((worker_key)) 199 | garden: 200 | allow_host_access: true 201 | tags: 202 | - bosh-integration 203 | - bosh-integration-4 204 | - bosh-integration-9 # backwards compatibility with branch pipelines 205 | 206 | - name: worker-integration-5 207 | instances: 1 208 | vm_type: concourse_core_worker_32_64 209 | stemcell: default 210 | azs: [us1] 211 | networks: 212 | - name: concourse 213 | jobs: 214 | - name: worker 215 | release: concourse 216 | properties: 217 | worker_gateway: 218 | worker_key: ((worker_key)) 219 | garden: 220 | allow_host_access: true 221 | tags: 222 | - bosh-integration 223 | - bosh-integration-5 224 | 225 | - name: worker-integration-6 226 | instances: 1 227 | vm_type: concourse_core_worker_32_64 228 | stemcell: default 229 | azs: [us1] 230 | networks: 231 | - name: concourse 232 | jobs: 233 | - name: worker 234 | release: concourse 235 | properties: 236 | worker_gateway: 237 | worker_key: ((worker_key)) 238 | garden: 239 | allow_host_access: true 240 | tags: 241 | - bosh-integration 242 | - fly-integration 243 | - bosh-integration-6 244 | 245 | - name: worker-windows 246 | instances: 1 247 | stemcell: windows 248 | vm_type: concourse_core_worker_8_16 249 | azs: [us1] 250 | networks: 251 | - name: concourse-windows 252 | jobs: 253 | - name: worker-windows 254 | release: concourse 255 | properties: 256 | worker_gateway: 257 | worker_key: ((worker_key)) 258 | - name: set_password 259 | release: windows-utilities 260 | properties: 261 | set_password: 262 | username: "((windows_administrator_username))" 263 | password: "((windows_administrator_password))" 264 | - name: enable_ssh 265 | release: windows-utilities 266 | - name: golang-1-windows 267 | release: windows-tools 268 | - name: ginkgo 269 | release: windows-tools 270 | - name: mingw64 271 | release: windows-tools 272 | 273 | - name: worker-asia 274 | instances: 1 275 | vm_type: concourse_core_worker_2_8 276 | stemcell: default 277 | azs: [asia] 278 | networks: 279 | - name: concourse-asia 280 | jobs: 281 | - name: worker 282 | release: concourse 283 | properties: 284 | worker_gateway: 285 | worker_key: ((worker_key)) 286 | garden: 287 | allow_host_access: true 288 | tags: 289 | - asia 290 | team: pcf 291 | -------------------------------------------------------------------------------- /concourse/team-authorized-public-key-ops.yml: -------------------------------------------------------------------------------- 1 | - type: replace 2 | path: /instance_groups/name=concourse_cpi/jobs/name=web/properties/worker_gateway/team_authorized_keys/((team))? 3 | value: ((worker_public_key)) 4 | -------------------------------------------------------------------------------- /concourse/teams/core/bosh-director.yml: -------------------------------------------------------------------------------- 1 | roles: 2 | - name: owner 3 | github: 4 | teams: ["cloudfoundry:cf-bosh"] 5 | - name: member 6 | github: 7 | teams: ["cloudfoundry:cf-bosh-windows", "cloudfoundry:cf-bosh-core-external-contributor", "cloudfoundry:cf-bosh-europe"] 8 | -------------------------------------------------------------------------------- /concourse/teams/core/bosh-io.yml: -------------------------------------------------------------------------------- 1 | roles: 2 | - name: owner 3 | github: 4 | teams: ["pivotal-cf:cf-bosh"] 5 | -------------------------------------------------------------------------------- /concourse/teams/core/bosh-packages.yml: -------------------------------------------------------------------------------- 1 | roles: 2 | - name: owner 3 | github: 4 | teams: ["cloudfoundry:cf-bosh"] 5 | -------------------------------------------------------------------------------- /concourse/teams/core/dev.yml: -------------------------------------------------------------------------------- 1 | roles: 2 | - name: owner 3 | github: 4 | teams: ["cloudfoundry:cf-bosh"] 5 | -------------------------------------------------------------------------------- /concourse/teams/core/main.yml: -------------------------------------------------------------------------------- 1 | roles: 2 | - name: owner 3 | github: 4 | teams: ["cloudfoundry:cf-bosh"] 5 | - name: member 6 | github: 7 | teams: ["cloudfoundry:cf-bosh-windows", "cloudfoundry:cf-bosh-core-external-contributor", "cloudfoundry:cf-bosh-europe"] 8 | -------------------------------------------------------------------------------- /concourse/teams/core/pcf.yml: -------------------------------------------------------------------------------- 1 | roles: 2 | - name: owner 3 | github: 4 | teams: ["pivotal-cf:cf-bosh"] 5 | -------------------------------------------------------------------------------- /concourse/worker-ops.yml: -------------------------------------------------------------------------------- 1 | - type: replace 2 | path: /instance_groups/- 3 | value: 4 | name: ((team))_worker 5 | instances: 1 6 | vm_type: ((vm_type)) 7 | stemcell: default 8 | azs: [us1] 9 | networks: 10 | - name: concourse 11 | jobs: 12 | - name: worker 13 | release: concourse 14 | properties: 15 | worker_gateway: 16 | worker_key: ((worker_key)) 17 | team: ((team)) 18 | -------------------------------------------------------------------------------- /concourse/workers/google-asia-worker/worker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: google-asia-concourse-worker 3 | 4 | releases: 5 | - name: bosh-google-cpi 6 | url: file://((bosh_cpi_release_path)) 7 | - name: concourse 8 | url: file://((concourse_release_path)) 9 | - name: garden-runc 10 | url: file://((garden_runc_release_path)) 11 | - name: os-conf 12 | url: file://((os_conf_release_path)) 13 | 14 | resource_pools: 15 | - name: vms 16 | network: private 17 | stemcell: 18 | url: file://((stemcell_path)) 19 | cloud_properties: 20 | zone: ((zone)) 21 | cpu: 2 22 | ram: 5_120 23 | root_disk_size_gb: 80 24 | 25 | networks: 26 | - name: private 27 | type: manual 28 | subnets: 29 | - range: ((internal_cidr)) 30 | gateway: ((internal_gw)) 31 | cloud_properties: 32 | network_name: ((network)) 33 | subnetwork_name: ((subnetwork)) 34 | tags: ((tags)) 35 | 36 | instance_groups: 37 | - name: concourse_cpi_sf_worker_asia 38 | instances: 1 39 | resource_pool: vms 40 | networks: 41 | - {name: private, static_ips: [((internal_ip))]} 42 | properties: 43 | tags: [asia] 44 | team: ((gcp_asia_worker_team)) 45 | baggageclaim: 46 | forward_address: 127.0.0.1:7788 47 | garden: 48 | listen_network: tcp 49 | listen_address: 127.0.0.1:7777 50 | allow_host_access: true 51 | btrfs_store_size_mb: 1000000 52 | forward_address: 127.0.0.1:7777 53 | tsa: 54 | registration_mode: forward 55 | host: ((concourse_tsa_hostname)) 56 | worker_key: ((worker_key)) 57 | host_public_key: ((tsa_host_key.public_key)) 58 | jobs: 59 | - name: worker 60 | release: concourse 61 | - name: baggageclaim 62 | release: concourse 63 | - name: garden 64 | release: garden-runc 65 | - name: user_add 66 | release: os-conf 67 | properties: 68 | users: 69 | - name: jumpbox 70 | public_key: ((jumpbox_ssh.public_key)) 71 | 72 | cloud_provider: 73 | template: {name: google_cpi, release: bosh-google-cpi} 74 | mbus: https://mbus:((mbus_bootstrap_password))@127.0.0.1:6868 75 | properties: 76 | agent: {mbus: "https://mbus:((mbus_bootstrap_password))@0.0.0.0:6868"} 77 | blobstore: {provider: local, path: /var/vcap/micro_bosh/data/cache} 78 | ntp: [time1.google.com,time2.google.com,time3.google.com,time4.google.com] 79 | google: 80 | project: ((project_id)) 81 | default_zone: ((zone)) 82 | json_key: ((gcp_credentials_json)) 83 | -------------------------------------------------------------------------------- /concourse/workers/nimbus/worker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: ((worker_name)) 3 | 4 | releases: 5 | - name: bosh-vsphere-cpi 6 | url: file://((bosh_cpi_release_path)) 7 | - name: concourse 8 | url: file://((concourse_release_path)) 9 | 10 | resource_pools: 11 | - name: vms 12 | network: private 13 | env: 14 | bosh: 15 | password: ((bosh_password)) 16 | authorized_keys: 17 | - ((ssh_authorized_public_key)) 18 | mbus: 19 | cert: ((mbus_bootstrap_ssl)) 20 | stemcell: 21 | url: file://((stemcell_path)) 22 | cloud_properties: 23 | ((resource_pool_properties)) 24 | 25 | networks: 26 | - name: public 27 | type: manual 28 | subnets: 29 | - range: ((vcenter_public_cidr)) 30 | gateway: ((vcenter_public_gateway)) 31 | dns: ((vcenter_dns)) 32 | cloud_properties: {name: ((vcenter_public_network_name))} 33 | 34 | - name: private 35 | type: manual 36 | subnets: 37 | - range: ((vcenter_internal_cidr)) 38 | gateway: ((vcenter_gateway)) 39 | reserved: ((vcenter_reserved_ips)) 40 | dns: ((vcenter_dns)) 41 | cloud_properties: {name: ((vcenter_network_name))} 42 | 43 | instance_groups: 44 | - name: worker 45 | instances: 1 46 | resource_pool: vms 47 | networks: 48 | - name: public 49 | static_ips: [((vcenter_public_worker_ip))] 50 | default: [dns, gateway] 51 | - name: private 52 | static_ips: [((vcenter_worker_ip))] 53 | jobs: 54 | - name: worker 55 | release: concourse 56 | properties: 57 | tags: ((worker_tags)) 58 | garden: 59 | allow_host_access: true 60 | worker_gateway: 61 | worker_key: ((worker_key)) 62 | hosts: ((concourse_tsa_hostname)) 63 | host_public_key: ((tsa_host_key.public_key)) 64 | baggageclaim: 65 | driver: overlay 66 | 67 | cloud_provider: 68 | template: {name: vsphere_cpi, release: bosh-vsphere-cpi} 69 | mbus: "https://mbus:((vcenter_worker_mbus_password))@((vcenter_public_worker_ip)):6868" 70 | properties: 71 | vcenter: 72 | address: ((vcenter_address)) 73 | user: ((vcenter_user)) 74 | password: ((vcenter_password)) 75 | datacenters: 76 | - name: ((vcenter_datacenter)) 77 | vm_folder: ((vcenter_datacenter_vm_folder)) 78 | template_folder: ((vcenter_datacenter_template_folder)) 79 | datastore_pattern: ((vcenter_datacenter_datastore_pattern)) 80 | persistent_datastore_pattern: ((vcenter_persistent_datacenter_datastore_pattern)) 81 | disk_path: ((vcenter_datacenter_disk_path)) 82 | clusters: 83 | - ((vcenter_cluster)): 84 | resource_pool: ((vcenter_resource_pool)) 85 | agent: {mbus: "https://mbus:((vcenter_worker_mbus_password))@0.0.0.0:6868"} 86 | blobstore: {provider: local, path: /var/vcap/micro_bosh/data/cache} 87 | ntp: [0.pool.ntp.org, 1.pool.ntp.org] 88 | 89 | variables: 90 | - name: default_ca 91 | type: certificate 92 | options: 93 | is_ca: true 94 | common_name: ca 95 | 96 | - name: mbus_bootstrap_ssl 97 | type: certificate 98 | options: 99 | ca: default_ca 100 | common_name: ((vcenter_worker_ip)) 101 | alternative_names: [((vcenter_worker_ip))] 102 | -------------------------------------------------------------------------------- /concourse/workers/openstack/README: -------------------------------------------------------------------------------- 1 | Due to the Pivotal Openstack environment being inaccessible from the internet and our GCP upgrader, a director has been bbl'd up in the Openstack environment, and a worker deployed from this director. 2 | 3 | The director will hopefully be helpful in testing Openstack specifics if needed. 4 | 5 | The worker.yml included is for a deployed worker (not create-env). 6 | 7 | github.com/cloudfoundry/bosh-bbl-ci-envs holds the openstack env artifacts. 8 | 9 | ``` 10 | cd bosh-bbl-ci-envs/openstack # .envrc loads bbl env 11 | lpass login 12 | bosh -d concourse-worker bosh-concourse-deployments/concourse/workers/openstack/worker.yml -l <(lpass show --note "bosh-concourse-deployments openstack external worker") 13 | ``` 14 | 15 | Assumes that the concourse release is uploaded to the director. It should be the same version as what the upgrader will use. 16 | -------------------------------------------------------------------------------- /concourse/workers/openstack/worker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: concourse-worker 3 | 4 | releases: 5 | - name: concourse 6 | version: latest 7 | 8 | stemcells: 9 | - alias: xenial 10 | os: ubuntu-xenial 11 | version: latest 12 | 13 | instance_groups: 14 | - name: worker 15 | instances: 1 16 | azs: [z1, z2, z3] 17 | vm_type: large 18 | stemcell: xenial 19 | networks: 20 | - name: default 21 | default: [dns, gateway] 22 | - name: public 23 | static_ips: [10.196.3.90] 24 | jobs: 25 | - name: worker 26 | release: concourse 27 | properties: 28 | log_level: debug 29 | tags: ((worker_tags)) 30 | worker_gateway: 31 | hosts: ["((bosh_core_concourse_tsa_host))"] 32 | host_public_key: ((bosh_core_concourse_tsa_host_key.public_key)) 33 | worker_key: ((worker_key)) 34 | 35 | update: 36 | canaries: 1 37 | max_in_flight: 1 38 | serial: false 39 | canary_watch_time: 1000-60000 40 | update_watch_time: 1000-60000 41 | -------------------------------------------------------------------------------- /concourse/workers/ops/add-team.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - type: replace 3 | path: /instance_groups/name=worker/jobs/name=worker/properties/team? 4 | value: ((worker_team)) 5 | -------------------------------------------------------------------------------- /concourse/workers/vsphere/worker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: ((worker_name)) 3 | 4 | releases: 5 | - name: bosh-vsphere-cpi 6 | url: file://((bosh_cpi_release_path)) 7 | - name: concourse 8 | url: file://((concourse_release_path)) 9 | 10 | resource_pools: 11 | - name: vms 12 | network: private 13 | env: 14 | bosh: 15 | authorized_keys: 16 | - ((ssh_authorized_public_key)) 17 | mbus: 18 | cert: ((mbus_bootstrap_ssl)) 19 | stemcell: 20 | url: file://((stemcell_path)) 21 | cloud_properties: 22 | ((resource_pool_properties)) 23 | 24 | networks: 25 | - name: private 26 | type: manual 27 | subnets: 28 | - range: ((vcenter_internal_cidr)) 29 | gateway: ((vcenter_gateway)) 30 | reserved: ((vcenter_reserved_ips)) 31 | dns: ((vcenter_dns)) 32 | cloud_properties: {name: ((vcenter_network_name))} 33 | 34 | instance_groups: 35 | - name: worker 36 | instances: 1 37 | resource_pool: vms 38 | networks: 39 | - {name: private, static_ips: [((vcenter_worker_ip))]} 40 | jobs: 41 | - name: worker 42 | release: concourse 43 | properties: 44 | tags: ((worker_tags)) 45 | garden: 46 | allow_host_access: true 47 | worker_gateway: 48 | worker_key: ((worker_key)) 49 | hosts: ((concourse_tsa_hostname)) 50 | host_public_key: ((tsa_host_key.public_key)) 51 | baggageclaim: 52 | driver: overlay 53 | 54 | cloud_provider: 55 | template: {name: vsphere_cpi, release: bosh-vsphere-cpi} 56 | mbus: "https://mbus:((vcenter_worker_mbus_password))@((vcenter_worker_ip)):6868" 57 | properties: 58 | vcenter: 59 | address: ((vcenter_address)) 60 | user: ((vcenter_user)) 61 | password: ((vcenter_password)) 62 | datacenters: 63 | - name: ((vcenter_datacenter)) 64 | vm_folder: ((vcenter_datacenter_vm_folder)) 65 | template_folder: ((vcenter_datacenter_template_folder)) 66 | datastore_pattern: ((vcenter_datacenter_datastore_pattern)) 67 | persistent_datastore_pattern: ((vcenter_persistent_datacenter_datastore_pattern)) 68 | disk_path: ((vcenter_datacenter_disk_path)) 69 | clusters: 70 | - ((vcenter_cluster)): 71 | resource_pool: ((vcenter_resource_pool)) 72 | agent: {mbus: "https://mbus:((vcenter_worker_mbus_password))@0.0.0.0:6868"} 73 | blobstore: {provider: local, path: /var/vcap/micro_bosh/data/cache} 74 | ntp: [0.pool.ntp.org, 1.pool.ntp.org] 75 | 76 | variables: 77 | - name: default_ca 78 | type: certificate 79 | options: 80 | is_ca: true 81 | common_name: ca 82 | 83 | - name: mbus_bootstrap_ssl 84 | type: certificate 85 | options: 86 | ca: default_ca 87 | common_name: ((vcenter_worker_ip)) 88 | alternative_names: [((vcenter_worker_ip))] 89 | -------------------------------------------------------------------------------- /connect-bosh.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | lpass_note="$( set -eu ; lpass show --note --sync=now "bosh-concourse-deployments gcp bosh-core" )" 3 | 4 | creds() { 5 | local path=${1?'Path is required.'} 6 | bosh int <( echo "$lpass_note" ) --path /"$path" 7 | } 8 | 9 | jumpbox_host="$( set -eu ; creds jumpbox_host )" 10 | jumpbox_user="$( set -eu ; creds jumpbox_ssh_user )" 11 | 12 | tmp_dir="$( mktemp -d /tmp/jumpbox-XXXXXX)" 13 | 14 | cleanup() { 15 | [[ ! -e "$tmp_dir/tunnel-socket" ]] || ssh -S "$tmp_dir/tunnel-socket" -O exit "$jumpbox_user@$jumpbox_host" 16 | rm -rf "$tmp_dir" 17 | } 18 | 19 | trap cleanup EXIT 20 | 21 | # director CA Cert 22 | creds bosh_ca_cert > "$tmp_dir/ca_cert.pem" 23 | # jumpbox SSH key 24 | creds jumpbox_ssh_key > "$tmp_dir/jumpbox.pem" 25 | chmod 600 "$tmp_dir/jumpbox.pem" 26 | # director client and client secret 27 | export BOSH_CLIENT="$( creds bosh_client_admin )" 28 | export BOSH_CLIENT_SECRET="$( creds bosh_client_secret_admin )" 29 | export BOSH_ENVIRONMENT=10.0.0.6 30 | export BOSH_CA_CERT="$tmp_dir/ca_cert.pem" 31 | 32 | # SSH tunnel through the jumpbox 33 | echo "Not working? Ensure you've run 'open-ssh-for-30m'" 34 | ssh \ 35 | -M -S "${tmp_dir}/tunnel-socket" \ 36 | -i "$tmp_dir/jumpbox.pem" -fnNT -D 1080 "$jumpbox_user@$jumpbox_host" 37 | export BOSH_ALL_PROXY=socks5://localhost:1080 38 | 39 | $SHELL --rcfile <(cat ~/.bashrc - <<'EOF' 40 | prompt_command () { 41 | PS1="\n$(battery_char) $(clock_char) ${purple}\h ${yellow}→ ${yellow}jumpbox ${yellow}→ ${red}bosh_director ${reset_color}in ${green}\w\n${bold_cyan}$(scm_char)${green}$(scm_prompt_info) ${green}→${reset_color} " 42 | } 43 | EOF 44 | ) 45 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:14.04 2 | 3 | RUN locale-gen en_US.UTF-8 4 | RUN dpkg-reconfigure locales 5 | ENV LANG en_US.UTF-8 6 | ENV LC_ALL en_US.UTF-8 7 | 8 | RUN apt-get update && \ 9 | apt-get install -y --no-install-recommends \ 10 | ca-certificates wget bash git openssh-client perl \ 11 | curl ruby ruby-dev python realpath psmisc make && \ 12 | apt-get clean 13 | 14 | ENV JQ_VERSION=1.5 GCLOUD_SDK_VERSION=192.0.0 15 | 16 | # Import stedolan PGP key (jq) 17 | RUN wget -nv https://raw.githubusercontent.com/stedolan/jq/master/sig/jq-release.key && \ 18 | gpg --import jq-release.key && \ 19 | gpg --fingerprint 0x71523402 | grep 'Key fingerprint = 4FD7 01D6 FA9B 3D2D F5AC 935D AF19 040C 7152 3402' && \ 20 | if [ "$?" != "0" ]; then echo "Invalid PGP key!"; exit 1; fi 21 | 22 | # Install jq 23 | RUN cd /tmp && \ 24 | wget -nv https://github.com/stedolan/jq/releases/download/jq-$JQ_VERSION/jq-linux64 && \ 25 | wget -nv https://raw.githubusercontent.com/stedolan/jq/master/sig/v$JQ_VERSION/jq-linux64.asc && \ 26 | gpg --verify jq-linux64.asc jq-linux64 && \ 27 | chmod +x jq-linux64 && \ 28 | mv jq-linux64 /usr/local/bin/jq 29 | 30 | # Install Google Cloud CLI 31 | RUN wget -q -O /usr/gcloud.tar.gz https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-$GCLOUD_SDK_VERSION-linux-x86_64.tar.gz && \ 32 | ( \ 33 | echo 'f8220a7f8c4d45644ab422feabc36ad4d80834fc1b21a48d8d7901ea8184d4b5' /usr/gcloud.tar.gz | \ 34 | sha256sum -c - \ 35 | ) && \ 36 | tar -C /usr/ -xzvf /usr/gcloud.tar.gz && \ 37 | /usr/google-cloud-sdk/install.sh --usage-reporting false --path-update false --command-completion false -q 38 | 39 | RUN echo source /usr/google-cloud-sdk/path.bash.inc > /root/.bashrc 40 | -------------------------------------------------------------------------------- /docker/build-image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | DOCKER_IMAGE=${DOCKER_IMAGE:-boshcpi/bosh-concourse-deployments} 6 | 7 | docker login 8 | 9 | echo "Building docker image..." 10 | docker build -t $DOCKER_IMAGE . 11 | 12 | echo "Pushing docker image..." 13 | docker push $DOCKER_IMAGE 14 | -------------------------------------------------------------------------------- /docs/troubleshooting.md: -------------------------------------------------------------------------------- 1 | ### Troubleshooting 2 | To enable debugging with the `bosh` cli, perform the following steps: 3 | 4 | 1. Open up the ssh port to our jumpbox for 30 minutes by triggering the 5 | [concourse/open-ssh-for-30m](https://bosh-upgrader.ci.cf-app.com/teams/main/pipelines/shared-environment/jobs/open-ssh-for-30m/) job. 6 | 1. Wait for confirmation that the SSH port is open. 7 | 1. Use the `connect-bosh.sh` (or `connect-bosh-core.sh`) script to establish a connection and start the tunnel. 8 | * If you see `Operation timed out`, it means that we need to fire up 9 | our [concourse/open-ssh-for-30m](https://bosh-upgrader.ci.cf-app.com/teams/main/pipelines/shared-environment/jobs/open-ssh-for-30m/) Concourse job to allow us to ssh-in again. 10 | * `ssh-jumpbox ` will also work and may be a better maintained script 11 | * Note: If you're running this from a non-bosh computer you need to ssh or tunnel through a trusted ip - the sf and toronto bosh computers should be fine - see `jumpbox_trusted_cidrs` in the lpass note `bosh-concourse-deployments gcp bosh-core` 12 | 1. You should now be targeted and can use `bosh` commands like normal. 13 | 14 | 15 | #### Initial Configuration of Jumpbox 16 | 17 | You only need to do this once, unless you redeploy the jumpbox. 18 | 19 | 1. Download the BOSH director's CA certificate, which we stored in LastPass: 20 | 21 | ``` 22 | lpass show --note bosh-concourse-upgrader-cpi-pipeline-director \ 23 | | ruby -r yaml -e 'data = YAML::load(STDIN.read); puts data["director_ca_cert"]' \ 24 | > /tmp/ca_cert.pem 25 | ``` 26 | 1. Open up ssh to the jumpbox for 30 minutes by kicking off the 27 | Concourse job: https://bosh-upgrader.ci.cf-app.com/teams/main/pipelines/shared-environment/jobs/open-ssh-for-30m/. 28 | 1. Examine the output of the _wait-for-ssh_ task to determine the IP 29 | address of the jumpbox: 30 | 31 | ``` 32 | Your jumpbox address is '104.198.xx.yy'. 33 | ``` 34 | 1. Now we need to find our private key. 35 | The private key will be in our credential file, which we stored in LastPass, 36 | in the secure note "secret-lastpass-note". 37 | 38 | ``` 39 | lpass show --note bosh-concourse-upgrader-cpi-pipeline-director \ 40 | | ruby -r yaml -e 'data = YAML::load(STDIN.read); puts data["jumpbox_ssh_key"]' \ 41 | > /tmp/vcap.pem 42 | chmod 600 /tmp/vcap.pem 43 | ``` 44 | 1. Print out the BOSH director user and password; you'll need this later to 45 | log in: 46 | 47 | ``` 48 | lpass show --note bosh-concourse-upgrader-cpi-pipeline-director \ 49 | | ruby -r yaml -e 'data = YAML::load(STDIN.read); puts data["director_admin_username"]; puts data["director_admin_password"]' 50 | ``` 51 | 1. Copy the ca_cert and the vcap key to the jumpbox (the cert needed to execute BOSH commands; the key is needed to ssh to VMs): 52 | 53 | ``` 54 | scp -i /tmp/vcap.pem /tmp/vcap.pem /tmp/ca_cert.pem jumpbox@104.198.xx.yy: 55 | ``` 56 | 1. ssh into the jumpbox: 57 | 58 | ``` 59 | ssh -i /tmp/vcap.pem jumpbox@104.198.xx.yy 60 | ``` 61 | 1. Install BOSH CLI (if it's not already installed): 62 | 63 | ``` 64 | curl -L https://s3.amazonaws.com/bosh-cli-artifacts/bosh-cli-0.0.147-linux-amd64 -o bosh 65 | chmod +x bosh 66 | sudo mv bosh /usr/local/bin/bosh 67 | ``` 68 | 1. Log into the director: 69 | 70 | ``` 71 | export BOSH_ENVIRONMENT=10.0.0.6 72 | export BOSH_CA_CERT=$PWD/ca_cert.pem 73 | export BOSH_CLIENT=admin # whatever the director admin user is 74 | export BOSH_CLIENT_SECRET=blahblah # whatever the director admin password is 75 | bosh login 76 | bosh alias-env bosh 77 | ``` 78 | 1. Install tmux 79 | 80 | ``` 81 | sudo apt-get install -y tmux 82 | ``` 83 | -------------------------------------------------------------------------------- /docs/vcenter-outages.md: -------------------------------------------------------------------------------- 1 | # Recovering from a Vcenter outage 2 | The concourse upgrader for Bosh is at https://bosh-upgrader.ci.cf-app.com/ 3 | 4 | The actual VM for the upgrader is created in vcenter wild (https://vcenter.wild.cf-app.com/) creds in lastpass. 5 | In the event of a vcenter outage (ie from a scheduled power-down event), 6 | the VM running the upgrader will be powered down and someone will need to manually restart it in vcenter. 7 | 8 | The VM you need to restart is under the "[BOSH-CPI-CONCOURSE-UPGRADER-VMs](https://vcenter.wild.cf-app.com/ui/#?extensionId=vsphere.core.folder.vm.relatedVMsTab&objectId=urn:vmomi:Folder:group-v463711:D431B8F6-82A5-4ACE-B4BD-25B2C0D477DF&navigator=vsphere.core.viTree.vmsAndTemplatesView)" VMs category 9 | 10 | At the moment, the upgrader VM is vm-985561c7-5c63-4513-b2a4-133a7a80bf04, who knows if this updates. If you accidentally pick the wrong VM and it is managed by bosh, vcenter will give you a helpful warning that the VM is managed by the vsphere cpi, you probably shoulnd't be directly starting that VM. 11 | 12 | Once the upgrader is running, you can go to https://bosh-upgrader.ci.cf-app.com/ and run update-vsphere-v6.5-worker (from both the core pipeline and the cpi pipeline). It should also help to prune the old worker references with fly. 13 | 14 | Alternatively, you can open the ssh tunnel and `bosh cck`, this should also work. 15 | -------------------------------------------------------------------------------- /jumpbox/custom-type-ops.yml: -------------------------------------------------------------------------------- 1 | - type: replace 2 | path: /resource_pools/name=vms/cloud_properties/machine_type 3 | value: ((machine_type)) 4 | -------------------------------------------------------------------------------- /jumpbox/remove-users-ops.yml: -------------------------------------------------------------------------------- 1 | - type: remove 2 | path: /instance_groups/name=jumpbox/jobs/name=user_add/properties/users 3 | -------------------------------------------------------------------------------- /natbox/natbox.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: concourse 3 | 4 | releases: 5 | - name: networking 6 | url: file://../../networking-release/release.tgz 7 | - name: bosh-google-cpi 8 | url: file://../../bosh-google-cpi-release/release.tgz 9 | 10 | resource_pools: 11 | - name: default 12 | network: private 13 | stemcell: 14 | url: file://../../stemcell/stemcell.tgz 15 | cloud_properties: 16 | machine_type: g1-small 17 | disk: 10_000 18 | zone: ((zone)) 19 | tags: ((tags)) 20 | ip_forwarding: true 21 | 22 | networks: 23 | - name: private 24 | type: manual 25 | subnets: 26 | - range: ((internal_cidr)) 27 | gateway: ((internal_gw)) 28 | dns: [8.8.8.8] 29 | cloud_properties: 30 | network_name: ((network)) 31 | subnetwork_name: ((subnetwork)) 32 | - name: public 33 | type: vip 34 | 35 | instance_groups: 36 | - name: nat 37 | instances: 1 38 | resource_pool: default 39 | networks: 40 | - name: private 41 | default: [dns, gateway] 42 | static_ips: [((internal_ip))] 43 | - name: public 44 | static_ips: [((external_ip))] 45 | jobs: 46 | - name: nat 47 | release: networking 48 | properties: 49 | networking: 50 | nat: 51 | out_interface: eth0 52 | 53 | cloud_provider: 54 | template: 55 | name: google_cpi 56 | release: bosh-google-cpi 57 | 58 | mbus: "https://mbus:((mbus_bootstrap_password))@((external_ip)):443" 59 | 60 | properties: 61 | google: 62 | project: ((project_id)) 63 | json_key: ((gcp_credentials_json)) 64 | agent: {mbus: "https://mbus:((mbus_bootstrap_password))@0.0.0.0:443"} 65 | blobstore: {provider: local, path: /var/vcap/micro_bosh/data/cache} 66 | ntp: 67 | - 0.pool.ntp.org 68 | - 1.pool.ntp.org 69 | 70 | variables: 71 | - name: mbus_bootstrap_password 72 | type: password 73 | -------------------------------------------------------------------------------- /nimbus-testbed/nimbus_vc70_dual_networks.rb: -------------------------------------------------------------------------------- 1 | require 'rbvmomi' 2 | 3 | oneGB = 1 * 1000 * 1000 # in KB 4 | dcName = 'private' 5 | clusterName = 'private' 6 | 7 | $testbed = Proc.new do 8 | { 9 | "name" => "testbed-test", 10 | "version" => 3, 11 | "network" => [ 12 | { 13 | "name" => "net.0", 14 | "enableDhcp" => true, 15 | "enableStaticIpService" => true 16 | } 17 | ], 18 | "esx" => (0..0).map do | idx | 19 | { 20 | "name" => "esx.#{idx}", 21 | "nics" => 2, # 2 NICs 22 | "networks" => ["public", "nsx::net.0"], 23 | "vc" => "vc.0", 24 | "customBuild" => "ob-15843807", 25 | "dc" => dcName, 26 | "clusterName" => clusterName, 27 | "style" => "fullInstall", 28 | "cpus" => 10, # to match releng-dev size used in hack-nimbus 29 | 'cpuReservation' => 6000, # in Mhz 30 | 'memory' => 64 * 1024, # in MB 31 | 'memoryReservation' => 24 * 1024, # in MB 32 | "fullClone" => true, 33 | "disks" => [ 5 * 1000 * oneGB ], # 5 TB Disk, directors and concourse workers take ~120GB each 34 | } 35 | end, 36 | 37 | "vcs" => [ 38 | { 39 | "name" => "vc.0", 40 | "type" => "vcva", 41 | "customBuild" => "ob-15952498", 42 | "dcName" => [dcName], 43 | "clusters" => [ 44 | { 45 | "name" => clusterName, 46 | "dc" => dcName, 47 | "enableDrs" => true 48 | } 49 | ] 50 | } 51 | ], 52 | 53 | "beforePostBoot" => Proc.new do |runId, testbedSpec, vmList, catApi, logDir| 54 | end, 55 | "postBoot" => Proc.new do |runId, testbedSpec, vmList, catApi, logDir| 56 | vc = vmList['vc'].first 57 | vim = RbVmomi::VIM.connect( 58 | host: vc.ip, 59 | user: vc.testbedInfo['vimUsername'], 60 | password: vc.testbedInfo['vimPassword'], 61 | insecure: true 62 | ) 63 | root_folder = vim.serviceInstance.content.rootFolder 64 | dc = root_folder.childEntity.grep(RbVmomi::VIM::Datacenter).find { |x| x.name == dcName } || fail('datacenter not found') 65 | cr = dc.find_compute_resource(clusterName) || dc.hostFolder.children.find(clusterName).first 66 | abort "compute resource not found" unless cr 67 | 68 | VIM = RbVmomi::VIM 69 | 70 | spec = { 71 | :cpuAllocation => { 72 | :limit => -1, 73 | :expandableReservation => true, 74 | :reservation => 0, 75 | :shares => {:level => :normal, :shares => 0} 76 | }, 77 | :memoryAllocation => { 78 | :limit => -1, 79 | :expandableReservation => true, 80 | :reservation => 0, 81 | :shares => {:level => :normal, :shares => 0} 82 | }, 83 | } 84 | cr.resourcePool.CreateResourcePool( 85 | :name => "concourseWorkers", 86 | :spec => spec 87 | ) 88 | cr.resourcePool.CreateResourcePool( 89 | :name => "testVMs", 90 | :spec => spec 91 | ) 92 | 93 | case cr 94 | when VIM::ClusterComputeResource 95 | hosts = cr.host 96 | when VIM::ComputeResource 97 | hosts = [cr] 98 | else 99 | abort "invalid resource" 100 | end 101 | 102 | hosts.each do |host| 103 | hns = host.configManager.networkSystem 104 | 105 | pnics_in_use = [] 106 | pnics_available = [] 107 | 108 | # find available Physical Nic's 109 | hns.networkConfig.vswitch.each do |vs| 110 | pnics_in_use.concat vs.props[:spec].policy.nicTeaming.nicOrder.activeNic 111 | pnics_in_use.concat vs.props[:spec].policy.nicTeaming.nicOrder.standbyNic 112 | end 113 | 114 | hns.networkConfig.pnic.each do |pnic| 115 | pnics_available << pnic.device if !pnics_in_use.include?(pnic.device) 116 | end 117 | 118 | name = 'internal-network' 119 | policy = VIM::HostNetworkPolicy(nicTeaming: VIM::HostNicTeamingPolicy(nicOrder: VIM::HostNicOrderPolicy(activeNic: pnics_available))) 120 | portgroup = VIM::HostPortGroupSpec(name: name, vswitchName: name, vlanId: 0, policy: policy) 121 | hostbridge = VIM::HostVirtualSwitchBondBridge(:nicDevice => pnics_available) 122 | vswitchspec = VIM::HostVirtualSwitchSpec(:bridge => hostbridge, :mtu => 1500, :numPorts => 128) 123 | hns.AddVirtualSwitch(vswitchName: name, spec: vswitchspec) 124 | hns.AddPortGroup(portgrp: portgroup) 125 | end 126 | 127 | end 128 | } 129 | end 130 | -------------------------------------------------------------------------------- /runway/README.md: -------------------------------------------------------------------------------- 1 | # Runway hosted Concourse within VMware 2 | On-boarding docs can be found [here](https://confluence.eng.vmware.com/display/RUNWAY/1.+On-Boarding). 3 | 4 | To add new users to the `bosh-core` namespace: 5 | - add their username to `access.json` 6 | - run `./update-namespace.sh` 7 | 8 | To login to the runway concourse instance run: 9 | ``` 10 | fly -t runway@bosh-core login -c https://runway-ci.eng.vmware.com -n bosh-core 11 | ``` 12 | -------------------------------------------------------------------------------- /runway/access.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "bosh-core", 3 | "type": "concourse", 4 | "role": { 5 | "owner": { 6 | "user": [ 7 | "mrosecrance", 8 | "stevensonda", 9 | "jpalermo", 10 | "klakin", 11 | "bupton", 12 | "rubenk", 13 | "scoward" 14 | ], 15 | "group": [] 16 | }, 17 | "operator": { 18 | "user": [], 19 | "group": [] 20 | }, 21 | "viewer": { 22 | "user": [], 23 | "group": [] 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /runway/fly-login.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | fly -t runway login -c https://runway-ci.eng.vmware.com -n bosh-core 4 | -------------------------------------------------------------------------------- /runway/update-namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if ! command -v runctl &> /dev/null 4 | then 5 | echo "please install the runway cli first:" 6 | echo "https://gitlab.eng.vmware.com/devtools/runway/cli/runctl/tree/master#installation" 7 | exit 1 8 | fi 9 | 10 | 11 | config=~/.runway/config.yml 12 | if [ ! -f "$config" ]; then 13 | echo "configuring runway cli creating: $config" 14 | echo "please enter your vmware LDAP credentials" 15 | read -p "username: " username 16 | read -s -p "password: " password 17 | mkdir -p ~/.runway 18 | echo "auth: $(echo -n "${username}:${password}" | base64)" > $config 19 | echo "" 20 | fi 21 | 22 | echo "Updating namespace" 23 | # bosh-core namespace has id 719 24 | runctl ns update --file access.json 25 | -------------------------------------------------------------------------------- /scripts/generate-director-ca.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | deployments_dir="$( cd "$( dirname "$0" )" && cd .. && pwd )" 6 | 7 | tmp_dir="${deployments_dir}/tmp" 8 | mkdir -p "${tmp_dir}" 9 | 10 | echo "Generating SSL CA Cert..." 11 | yes "" | openssl req -x509 -newkey rsa:4096 -keyout "${tmp_dir}/director-ca.pem" -out "${tmp_dir}/director-ca.pub" -days 9999 -nodes > /dev/null 2>&1 12 | 13 | echo "Your Director CA Key and Cert was generated at ${tmp_dir}/director-ca.pem and ${tmp_dir}/director-ca.pub" 14 | -------------------------------------------------------------------------------- /scripts/generate-jumpbox-ssh-key.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | deployments_dir="$( cd "$( dirname "$0" )" && cd .. && pwd )" 6 | 7 | tmp_dir="${deployments_dir}/tmp" 8 | mkdir -p "${tmp_dir}" 9 | 10 | echo "Generating SSH Key..." 11 | ssh-keygen -f "${tmp_dir}/jumpbox.pem" -N '' -C vcap 12 | mv ${tmp_dir}/jumpbox{.pem.pub,.pub} 13 | 14 | echo "Your Jumpbox SSH key was generated at ${tmp_dir}/jumpbox.pem" 15 | -------------------------------------------------------------------------------- /scripts/open-vcenter-nimbus-ui.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | echo "If the following call to google storage APIs fails, try 'gcloud auth login'" 5 | NIMBUS_VARS=$(gsutil cat gs://bosh-core-concourse-deployment/nimbus-vcenter-vars.yml) 6 | IP=$(yq read <(echo "$NIMBUS_VARS") "vcenter_ip") 7 | USER=$(yq read <(echo "$NIMBUS_VARS") "vcenter_user") 8 | PASS=$(yq read <(echo "$NIMBUS_VARS") "vcenter_password") 9 | echo "Success!" 10 | echo "" 11 | echo "You may need to type 'thisisunsafe', multiple times, to get through the browser security warning" 12 | echo "The username is '$USER', password is '$PASS'" 13 | echo "" 14 | echo "Here's the URL to the vcenter UI: https://$IP/ui" 15 | -------------------------------------------------------------------------------- /scripts/provision-gcloud-for-concourse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | : ${TERRAFORM_SERVICE_ACCOUNT_ID:?} 6 | : ${DIRECTOR_SERVICE_ACCOUNT_ID:?} 7 | : ${PROJECT_ID:?} 8 | : ${CONCOURSE_BUCKET_NAME:?} 9 | ${TRACE:=false} 10 | 11 | if [[ $TRACE = true ]]; then 12 | set -x 13 | fi 14 | 15 | deployments_dir="$( cd "$( dirname "$0" )" && cd .. && pwd )" 16 | 17 | tmp_dir="${deployments_dir}/tmp" 18 | mkdir -p "${tmp_dir}" 19 | 20 | terraform_service_account_email="${TERRAFORM_SERVICE_ACCOUNT_ID}@${PROJECT_ID}.iam.gserviceaccount.com" 21 | director_service_account_email="${DIRECTOR_SERVICE_ACCOUNT_ID}@${PROJECT_ID}.iam.gserviceaccount.com" 22 | 23 | function add_role() { 24 | service_account_email=$1 25 | role=$2 26 | 27 | echo "${role}" 28 | 29 | gcloud projects add-iam-policy-binding "${PROJECT_ID}" \ 30 | --member "serviceAccount:${service_account_email}" \ 31 | --role "${role}" 32 | } 33 | 34 | echo "Setting up gcloud" 35 | gcloud init 36 | 37 | echo "Creating bucket ${CONCOURSE_BUCKET_NAME}..." 38 | gsutil mb "gs://${CONCOURSE_BUCKET_NAME}" 39 | gsutil versioning set on "gs://${CONCOURSE_BUCKET_NAME}" 40 | 41 | echo "Seeding bucket with empty state files..." 42 | gsutil cp -n <( echo '{}' ) gs://${CONCOURSE_BUCKET_NAME}/concourse/natbox-state.json 43 | gsutil cp -n <( echo '{}' ) gs://${CONCOURSE_BUCKET_NAME}/concourse/jumpbox-state.json 44 | gsutil cp -n <( echo '{}' ) gs://${CONCOURSE_BUCKET_NAME}/director/bosh-state.json 45 | 46 | echo "Creating Service Account ${terraform_service_account_email}..." 47 | gcloud iam service-accounts create "${TERRAFORM_SERVICE_ACCOUNT_ID}" 48 | gcloud iam service-accounts keys create "${tmp_dir}/${TERRAFORM_SERVICE_ACCOUNT_ID}.key.json" \ 49 | --iam-account "${terraform_service_account_email}" 50 | 51 | echo "Adding roles to ${terraform_service_account_email}..." 52 | add_role "${terraform_service_account_email}" roles/compute.instanceAdmin 53 | add_role "${terraform_service_account_email}" roles/compute.networkAdmin 54 | add_role "${terraform_service_account_email}" roles/compute.storageAdmin 55 | add_role "${terraform_service_account_email}" roles/compute.securityAdmin 56 | add_role "${terraform_service_account_email}" roles/storage.admin 57 | add_role "${terraform_service_account_email}" roles/iam.serviceAccountActor 58 | echo "" 59 | 60 | echo "Creating Service Account ${director_service_account_email}..." 61 | gcloud iam service-accounts create "${DIRECTOR_SERVICE_ACCOUNT_ID}" 62 | 63 | echo "Adding roles to ${director_service_account_email}..." 64 | add_role "${director_service_account_email}" roles/compute.instanceAdmin 65 | add_role "${director_service_account_email}" roles/compute.networkAdmin 66 | add_role "${director_service_account_email}" roles/compute.storageAdmin 67 | add_role "${director_service_account_email}" roles/storage.admin 68 | add_role "${director_service_account_email}" roles/iam.serviceAccountActor 69 | echo "" 70 | 71 | echo "Success!" 72 | echo "Your Terraform service account key was downloaded to ${tmp_dir}/${TERRAFORM_SERVICE_ACCOUNT_ID}.key.json" 73 | -------------------------------------------------------------------------------- /shell.nix: -------------------------------------------------------------------------------- 1 | { pkgs ? import {} }: 2 | 3 | pkgs.mkShell { 4 | buildInputs = [ 5 | pkgs.runctl 6 | ]; 7 | } 8 | -------------------------------------------------------------------------------- /ssh-jumpbox.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | jumpbox_ip=$1 4 | 5 | if [[ -z "${jumpbox_ip}" ]]; then 6 | echo "Provide the jumpbox IP Address" 7 | exit 1 8 | fi 9 | 10 | note="$(lpass show --note --sync=now "bosh-concourse-deployments gcp bosh-core")" 11 | echo "$note" | ruby -r yaml -e 'data = YAML::load(STDIN.read); puts data["bosh_ca_cert"]' > /tmp/ca_cert.pem 12 | echo "$note"| ruby -r yaml -e 'data = YAML::load(STDIN.read); puts data["jumpbox_ssh_key"]' > /tmp/jumpbox.pem 13 | echo "$note"| ruby -r yaml -e 'data = YAML::load(STDIN.read); puts data["director_ssh_key"]' > /tmp/director_jumpbox.pem 14 | chmod 600 /tmp/jumpbox.pem 15 | 16 | # Download director username and password 17 | eval $(echo "$note" \ 18 | | ruby -r yaml -e 'data = YAML::load(STDIN.read); puts "BOSH_ENVIRONMENT=#{data["bosh_environment"]}"; puts "BOSH_CLIENT=#{data["bosh_client_admin"]}"; puts "BOSH_CLIENT_SECRET=#{data["bosh_client_secret_admin"]}"') 19 | 20 | cat > /tmp/bosh.env <