├── .gitignore ├── LICENCE ├── NOTICE ├── README.md ├── ci ├── pipelines │ └── pas-pipeline-tasks │ │ ├── pipeline.yml │ │ └── values.yml └── tasks │ └── validate-pipeline │ ├── task.sh │ └── task.yml ├── docker └── Dockerfile ├── examples ├── pas-pipeline.yml ├── pas-secrets.yml ├── pks-pipeline.yml └── pks-secrets.yml ├── last_tested ├── scripts ├── deployment-backup ├── deployment-backup-cleanup ├── export-cf-metadata ├── export-director-metadata ├── export-pks-metadata └── om-cmd ├── tasks ├── bbr-backup-director │ ├── task.sh │ └── task.yml ├── bbr-backup-pas │ ├── task.sh │ └── task.yml ├── bbr-backup-pks-clusters │ ├── task.sh │ └── task.yml ├── bbr-backup-pks │ ├── task.sh │ └── task.yml ├── bbr-cleanup-director │ ├── task.sh │ └── task.yml ├── bbr-cleanup-pas │ ├── task.sh │ └── task.yml ├── bbr-cleanup-pks-clusters │ ├── task.sh │ └── task.yml ├── bbr-cleanup-pks │ ├── task.sh │ └── task.yml ├── check-opsman-status │ ├── task.sh │ └── task.yml ├── export-om-installation │ ├── task.sh │ └── task.yml ├── extract-bbr-binary │ └── task.yml ├── lock-pks │ ├── task.sh │ └── task.yml └── unlock-pks │ ├── task.sh │ └── task.yml └── testFile /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | secrets* 3 | !*sample* 4 | -------------------------------------------------------------------------------- /LICENCE: -------------------------------------------------------------------------------- 1 | Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ 2 | 3 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 4 | 5 | 1. Definitions. 6 | 7 | "License" shall mean the terms and conditions for use, reproduction, and 8 | distribution as defined by Sections 1 through 9 of this document. 9 | 10 | "Licensor" shall mean the copyright owner or entity authorized by the copyright 11 | owner that is granting the License. 12 | 13 | "Legal Entity" shall mean the union of the acting entity and all other entities 14 | that control, are controlled by, or are under common control with that entity. 15 | For the purposes of this definition, "control" means (i) the power, direct or 16 | indirect, to cause the direction or management of such entity, whether by 17 | contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the 18 | outstanding shares, or (iii) beneficial ownership of such entity. 19 | 20 | "You" (or "Your") shall mean an individual or Legal Entity exercising 21 | permissions granted by this License. 22 | 23 | "Source" form shall mean the preferred form for making modifications, including 24 | but not limited to software source code, documentation source, and 25 | configuration files. 26 | 27 | "Object" form shall mean any form resulting from mechanical transformation or 28 | translation of a Source form, including but not limited to compiled object 29 | code, generated documentation, and conversions to other media types. 30 | 31 | "Work" shall mean the work of authorship, whether in Source or Object form, 32 | made available under the License, as indicated by a copyright notice that is 33 | included in or attached to the work (an example is provided in the Appendix 34 | below). 35 | 36 | "Derivative Works" shall mean any work, whether in Source or Object form, that 37 | is based on (or derived from) the Work and for which the editorial revisions, 38 | annotations, elaborations, or other modifications represent, as a whole, an 39 | original work of authorship. For the purposes of this License, Derivative Works 40 | shall not include works that remain separable from, or merely link (or bind by 41 | name) to the interfaces of, the Work and Derivative Works thereof. 42 | 43 | "Contribution" shall mean any work of authorship, including the original 44 | version of the Work and any modifications or additions to that Work or 45 | Derivative Works thereof, that is intentionally submitted to Licensor for 46 | inclusion in the Work by the copyright owner or by an individual or Legal 47 | Entity authorized to submit on behalf of the copyright owner. For the purposes 48 | of this definition, "submitted" means any form of electronic, verbal, or 49 | written communication sent to the Licensor or its representatives, including 50 | but not limited to communication on electronic mailing lists, source code 51 | control systems, and issue tracking systems that are managed by, or on behalf 52 | of, the Licensor for the purpose of discussing and improving the Work, but 53 | excluding communication that is conspicuously marked or otherwise designated in 54 | writing by the copyright owner as "Not a Contribution." 55 | 56 | "Contributor" shall mean Licensor and any individual or Legal Entity on behalf 57 | of whom a Contribution has been received by Licensor and subsequently 58 | incorporated within the Work. 59 | 60 | 2. Grant of Copyright License. 61 | 62 | Subject to the terms and conditions of this License, each Contributor hereby 63 | grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, 64 | irrevocable copyright license to reproduce, prepare Derivative Works of, 65 | publicly display, publicly perform, sublicense, and distribute the Work and 66 | such Derivative Works in Source or Object form. 67 | 68 | 3. Grant of Patent License. 69 | 70 | Subject to the terms and conditions of this License, each Contributor hereby 71 | grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, 72 | irrevocable (except as stated in this section) patent license to make, have 73 | made, use, offer to sell, sell, import, and otherwise transfer the Work, where 74 | such license applies only to those patent claims licensable by such Contributor 75 | that are necessarily infringed by their Contribution(s) alone or by combination 76 | of their Contribution(s) with the Work to which such Contribution(s) was 77 | submitted. If You institute patent litigation against any entity (including a 78 | cross-claim or counterclaim in a lawsuit) alleging that the Work or a 79 | Contribution incorporated within the Work constitutes direct or contributory 80 | patent infringement, then any patent licenses granted to You under this License 81 | for that Work shall terminate as of the date such litigation is filed. 82 | 83 | 4. Redistribution. 84 | 85 | You may reproduce and distribute copies of the Work or Derivative Works thereof 86 | in any medium, with or without modifications, and in Source or Object form, 87 | provided that You meet the following conditions: 88 | 89 | You must give any other recipients of the Work or Derivative Works a copy of 90 | this License; and You must cause any modified files to carry prominent notices 91 | stating that You changed the files; and You must retain, in the Source form of 92 | any Derivative Works that You distribute, all copyright, patent, trademark, and 93 | attribution notices from the Source form of the Work, excluding those notices 94 | that do not pertain to any part of the Derivative Works; and If the Work 95 | includes a "NOTICE" text file as part of its distribution, then any Derivative 96 | Works that You distribute must include a readable copy of the attribution 97 | notices contained within such NOTICE file, excluding those notices that do not 98 | pertain to any part of the Derivative Works, in at least one of the following 99 | places: within a NOTICE text file distributed as part of the Derivative Works; 100 | within the Source form or documentation, if provided along with the Derivative 101 | Works; or, within a display generated by the Derivative Works, if and wherever 102 | such third-party notices normally appear. The contents of the NOTICE file are 103 | for informational purposes only and do not modify the License. You may add Your 104 | own attribution notices within Derivative Works that You distribute, alongside 105 | or as an addendum to the NOTICE text from the Work, provided that such 106 | additional attribution notices cannot be construed as modifying the License. 107 | You may add Your own copyright statement to Your modifications and may provide 108 | additional or different license terms and conditions for use, reproduction, or 109 | distribution of Your modifications, or for any such Derivative Works as a 110 | whole, provided Your use, reproduction, and distribution of the Work otherwise 111 | complies with the conditions stated in this License. 112 | 113 | 5. Submission of Contributions. 114 | 115 | Unless You explicitly state otherwise, any Contribution intentionally submitted 116 | for inclusion in the Work by You to the Licensor shall be under the terms and 117 | conditions of this License, without any additional terms or conditions. 118 | Notwithstanding the above, nothing herein shall supersede or modify the terms 119 | of any separate license agreement you may have executed with Licensor regarding 120 | such Contributions. 121 | 122 | 6. Trademarks. 123 | 124 | This License does not grant permission to use the trade names, trademarks, 125 | service marks, or product names of the Licensor, except as required for 126 | reasonable and customary use in describing the origin of the Work and 127 | reproducing the content of the NOTICE file. 128 | 129 | 7. Disclaimer of Warranty. 130 | 131 | Unless required by applicable law or agreed to in writing, Licensor provides 132 | the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, 133 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, 134 | including, without limitation, any warranties or conditions of TITLE, 135 | NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are 136 | solely responsible for determining the appropriateness of using or 137 | redistributing the Work and assume any risks associated with Your exercise of 138 | permissions under this License. 139 | 140 | 8. Limitation of Liability. 141 | 142 | In no event and under no legal theory, whether in tort (including negligence), 143 | contract, or otherwise, unless required by applicable law (such as deliberate 144 | and grossly negligent acts) or agreed to in writing, shall any Contributor be 145 | liable to You for damages, including any direct, indirect, special, incidental, 146 | or consequential damages of any character arising as a result of this License 147 | or out of the use or inability to use the Work (including but not limited to 148 | damages for loss of goodwill, work stoppage, computer failure or malfunction, 149 | or any and all other commercial damages or losses), even if such Contributor 150 | has been advised of the possibility of such damages. 151 | 152 | 9. Accepting Warranty or Additional Liability. 153 | 154 | While redistributing the Work or Derivative Works thereof, You may choose to 155 | offer, and charge a fee for, acceptance of support, warranty, indemnity, or 156 | other liability obligations and/or rights consistent with this License. 157 | However, in accepting such obligations, You may act only on Your own behalf and 158 | on Your sole responsibility, not on behalf of any other Contributor, and only 159 | if You agree to indemnify, defend, and hold each Contributor harmless for any 160 | liability incurred by, or claims asserted against, such Contributor by reason 161 | of your accepting any such warranty or additional liability. 162 | 163 | END OF TERMS AND CONDITIONS -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | bbr-pipeline-tasks 2 | 3 | Copyright (c) 2017-Present Pivotal Software, Inc. All Rights Reserved. 4 | 5 | This product is licensed to you under the Apache License, Version 2.0 (the 6 | "License"). 7 | 8 | You may not use this product except in compliance with the License. 9 | 10 | This product may include a number of subcomponents with separate copyright 11 | notices and license terms. Your use of these subcomponents is subject to the 12 | terms and conditions of the subcomponent's license, as noted in the LICENSE 13 | file. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # BBR PCF Pipeline Tasks 2 | 3 | This is a collection of [Concourse](https://concourse.ci) tasks for backing up a [Pivotal Cloud Foundry](https://pivotal.io/platform) installation using [bbr](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore). The intention of this collection is to provide a set of example Concourse scripts to perform BBR tasks. This is not a supported product, although we will try out best to respond via Github issues to any problems faced. 4 | 5 | ### All Foundations 6 | - [export-om-installation](tasks/export-om-installation/task.yml): Export Ops Manager installation settings 7 | - [bbr-backup-director](tasks/bbr-backup-director/task.yml): Run `bbr director backup` 8 | - [bbr-cleanup-director](tasks/bbr-cleanup-director/task.yml): Run `bbr director backup-cleanup` 9 | - [check-opsman-status](tasks/check-opsman-status/task.yml): Check `Apply changes` is not inflight before taking a backup. If it is, the task fails. This should prevent a backup from taking place. Please refer to the [example](examples/) pipelines to see how the task is used. 10 | 11 | 12 | ### PAS 13 | - [bbr-backup-pas](tasks/bbr-backup-pas/task.yml): Run `bbr deployment backup` for PAS 14 | - [bbr-cleanup-pas](tasks/bbr-cleanup-pas/task.yml): Run `bbr deployment backup-cleanup` for PAS 15 | 16 | ### PKS 17 | - [bbr-backup-pks](tasks/bbr-backup-pks/task.yml): Run `bbr deployment backup` for PKS control plane 18 | - [bbr-cleanup-pks](tasks/bbr-cleanup-pks/task.yml): Run `bbr deployment backup-cleanup` for PKS control plane 19 | - [bbr-backup-pks-clusters](tasks/bbr-backup-pks-clusters/task.yml): Run `bbr deployment --all-deployments backup` for all PKS clusters 20 | - [bbr-cleanup-pks-clusters](tasks/bbr-cleanup-pks-clusters/task.yml): Run `bbr deployment --all-deployments backup-cleanup` for all PKS clusters 21 | - [lock-pks](tasks/lock-pks/task.yml): Lock PKS control plane 22 | - [unlock-pks](tasks/unlock-pks/task.yml): Unlock PKS control plane 23 | 24 | ### Helper 25 | - [extract-bbr-binary](tasks/extract-bbr-binary/task.yml): Extracts the linux bbr binary from tarfile 26 | 27 | --- 28 | 29 | ## Requirements 30 | 31 | ### GitHub Account 32 | 33 | For Concourse to pull the tasks it needs to reach out to GitHub. We use the SSH method to download the tasks from GitHub in the example pipelines and we strongly recommend that the HTTPS method is not used. Concourse typically polls GitHub for any changes to the target Git repo and the HTTPS method is subject to rate limits. The SSH method is not subject to the same rate limits as it authenticates the client against a GitHub user which has much higher limits. 34 | 35 | Please [create](https://help.github.com/en/articles/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent#generating-a-new-ssh-key) and [add](https://help.github.com/en/articles/adding-a-new-ssh-key-to-your-github-account) and SSH key to your GitHub account as this will needs to be used in the [pipeline secrets](https://github.com/pivotal-cf/bbr-pcf-pipeline-tasks/blob/master/examples/pks-secrets.yml#L2). 36 | 37 | ### Networking 38 | 39 | To use any of these tasks, apart from `export-om-installation`, you will need either: 40 | - a Concourse worker with access to your Ops Manager private networks. You can find an example template for deploying an external worker in a different network to your Concourse deployment [here](https://github.com/concourse/concourse-bosh-deployment/blob/master/cluster/external-worker.yml) 41 | - or, provide the `OPSMAN_PRIVATE_KEY` to use a SSH tunnel via the Ops Manager VM. This key **is not required** if your concourse worker **has access** to the Ops Manager **private networks**. Please note, using a SSH tunnel may increase the time taken to drain backup artifacts. Backup artifacts can be very large and using a SSH tunnel will be a significant overhead on network performance. 42 | 43 | ### Disk space 44 | 45 | The backup tasks will run `bbr` commands on your Concourse worker. Ensure that your Concourse workers have enough disk space to accommodate your backup artifacts. 46 | 47 | --- 48 | 49 | ## Example pipelines 50 | 51 | Example pipelines and secrets are provided to show how to use these tasks to back up PAS or PKS. 52 | 53 | ### Triggers 54 | 55 | Running regular backups (at least every 24 hours) and storing multiple copies of backup artifacts in different datacenters is highly recommended. The [time](https://github.com/concourse/time-resource) Concourse resource can be added to the pipeline to trigger backups regularly. 56 | 57 | ### Backup artifact storage 58 | 59 | There are a variety of storage resources such as [S3](https://github.com/concourse/s3-resource) that can be used to move backups to storage. A list of Concourse resources can be found [here](https://concourse.ci/resource-types.html). 60 | 61 | ### HTTP Proxies 62 | 63 | The BBR task for backing up deployments will communicate with the BOSH API. 64 | 65 | You can proxy this request by setting the `BOSH_ALL_PROXY` environment variable: 66 | 67 | - `BOSH_ALL_PROXY` :: Proxy used to reach the bosh director. Example: 68 | `ssh+socks5://ubuntu@1.22.33.444:22` 69 | 70 | In addition, you may be in the habit of proxying other requests using 71 | the `HTTPS_PROXY` environment variable. This is common if, for 72 | example, you have a concourse deployed on one k8s cluster that is 73 | usually used to test apps deployed to another k8s cluster. 74 | 75 | - `HTTPS_PROXY` :: Proxy for all other requests. Example: 76 | `proxy.example.com` 77 | 78 | You can also set the `NO_PROXY` environment variable with a list of 79 | IPs, IP prefixes, or domains where the proxy should not be used. This 80 | will cause BBR and other utilities to ignore `BOSH_ALL_PROXY` and 81 | `HTTPS_PROXY` for requests to those addresses. BBR uses [standard go 82 | libraries](https://pkg.go.dev/golang.org/x/net/http/httpproxy) for 83 | interpreting `NO_PROXY`, and so will behave similarly to other 84 | utilities. 85 | 86 | A common pattern is to use a proxy for the majority of your network 87 | communication, but to exclude the bosh director. For this use-case, 88 | you can set the `SET_NO_PROXY` environment variable to `true`in your 89 | backup task. This will have the effect of prepending the BOSH director 90 | IP to the `NO_PROXY` list. Otherwise, the `NO_PROXY` environment 91 | variable will not be changed. 92 | 93 | ```yaml 94 | - task: bbr-backup-pas 95 | file: bbr-pipeline-tasks-repo/tasks/bbr-backup-pas/task.yml 96 | params: 97 | SKIP_SSL_VALIDATION: ((skip-ssl-validation)) 98 | OPSMAN_URL: ((opsman-url)) 99 | OPSMAN_USERNAME: ((opsman-username)) 100 | OPSMAN_PASSWORD: ((opsman-password)) 101 | OPSMAN_PRIVATE_KEY: ((opsman-private-key)) 102 | SET_NO_PROXY: true 103 | ``` 104 | 105 | --- 106 | 107 | ## Semantic Versioning 108 | 109 | The inputs, outputs, params, filename, and filepath of all task files in this repo are part of a semantically versioned API. 110 | See our documentation for a detailed discussion of our semver API. See www.semver.org for an explanation of semantic versioning. 111 | 112 | ### Pinning to a version 113 | 114 | This repository has git tags that can be used to pin to a specific version. For example, here is how to pin to v1.0.0 using `tag_filter`: 115 | 116 | ```yaml 117 | resources: 118 | - name: bbr-pipeline-tasks-repo 119 | type: git 120 | source: 121 | uri: https://github.com/pivotal-cf/bbr-pcf-pipeline-tasks.git 122 | branch: master 123 | tag_filter: v1.0.0 124 | ``` 125 | -------------------------------------------------------------------------------- /ci/pipelines/pas-pipeline-tasks/pipeline.yml: -------------------------------------------------------------------------------- 1 | --- 2 | #! to set pipeline run: fly set-pipeline -p "pas-pipeline-tasks" -c <(ytt -f "ci/pipelines/pas-pipeline-tasks/pipeline.yml" --data-values-file ./ci/pipelines/pas-pipeline-tasks/values.yml ) 3 | #@ load("@ytt:data", "data") 4 | 5 | #! ************************************ 6 | #! Secrets we need to run this pipeline 7 | #! ************************************ 8 | 9 | secrets: 10 | #! shepherd_account_key was needed on 2023-10-18 to claim shepherd environments. It was created using `shepherd create service-account` after logging in with WS1. 11 | - &shepherd_sa_key ((shepherd.sa_key)) 12 | 13 | #! tanzunet_admin_token was needed on 2023-10-18 to get windows-TAS from tanzunet. Generated using tanzunet account in lastpass at Shared-Cryogenics/prod-essential/Tanzunet Non-Admin 14 | - &pivnet_api_token ((tanzunet-account.legacy_api_token)) 15 | 16 | #! github_token was needed on 2023-10-18 to access PRs. Generated by using github account in lastpass at Shared-Cryogenics/infrastructure-root/github-ci-account 17 | - &git_access_token ((github.tas_operability_bot_token)) 18 | 19 | #! github_ssh_key was needed on 2023-10-18 to pull repos. Generated by using github account in lastpass at Shared-Cryogenics/infrastructure-root/github-ci-account 20 | - &github_ssh_key ((github.tas_operability_bot_private_key)) 21 | 22 | - &gcs_credentials 23 | bucket: ((om-backup-artifact.backup-bucket)) 24 | json_key: ((om-backup-artifact.gcs-json-key)) 25 | 26 | - &docker_creds 27 | username: ((docker.username)) 28 | password: ((docker.password)) 29 | 30 | #! ************** 31 | #! End of secrets 32 | #! ************** 33 | 34 | #! ************** 35 | #! Params 36 | #! ************** 37 | opsman_creds: &opsman_credentials 38 | OPSMAN_URL: ((.:pooled-env.ops_manager.url)) 39 | OPSMAN_USERNAME: ((.:pooled-env.ops_manager.username)) 40 | OPSMAN_PASSWORD: ((.:pooled-env.ops_manager.password)) 41 | OPSMAN_PRIVATE_KEY: ((.:pooled-env.ops_manager_private_key)) 42 | 43 | number_of_apply_changes_retries: &number_of_apply_changes_retries 2 44 | number_of_claim_env_retries: &number_of_claim_env_retries 5 45 | number_of_download_retries: &number_of_download_retries 5 46 | number_of_sats_retries: &number_of_sats_retries 10 47 | 48 | 49 | #! ************** 50 | #! End of params 51 | #! ************** 52 | 53 | jobs: 54 | - name: create-pr-once-a-week 55 | plan: 56 | - in_parallel: 57 | - get: once-a-week 58 | trigger: true 59 | - get: bbr-pipeline-tasks-repo-main 60 | - get: cryogenics-concourse-tasks 61 | - get: image-cryogenics-essentials 62 | - task: create-test-commit 63 | image: image-cryogenics-essentials 64 | config: 65 | platform: linux 66 | inputs: 67 | - name: bbr-pipeline-tasks-repo-main 68 | outputs: 69 | - name: branch_name 70 | - name: source-repo 71 | params: 72 | COMMIT_USERNAME: Tas Operability Bot 73 | COMMIT_USEREMAIL: tas-operability.pdl@broadcom.com 74 | run: 75 | path: /bin/bash 76 | args: 77 | - -c 78 | - | 79 | #!/bin/bash 80 | pushd bbr-pipeline-tasks-repo-main 81 | git config user.name "${COMMIT_USERNAME}" 82 | git config user.email "${COMMIT_USEREMAIL}" 83 | 84 | DATE="$(date +%d-%m-%y)" 85 | BRANCH="trigger-tests-on-$DATE" 86 | git checkout -b $BRANCH 87 | echo "$DATE" > last_tested 88 | echo "$BRANCH" > ../branch_name/name 89 | 90 | git add last_tested 91 | git commit -m "update last_tested on $DATE" 92 | popd 93 | git clone bbr-pipeline-tasks-repo-main source-repo 94 | 95 | - load_var: pr_branch_name 96 | file: branch_name/name 97 | - put: bbr-pipeline-tasks-repo-write 98 | params: 99 | repository: source-repo 100 | branch: "((.:pr_branch_name))" 101 | force: true 102 | - task: create-pull-request 103 | file: cryogenics-concourse-tasks/github-automation/create-pr/task.yml 104 | image: image-cryogenics-essentials 105 | input_mapping: 106 | source-repo: bbr-pipeline-tasks-repo-write 107 | params: 108 | GH_TOKEN: *git_access_token 109 | BASE: main 110 | BRANCH: "((.:pr_branch_name))" 111 | LABELS: ci-trigger 112 | TITLE: ci-((.:pr_branch_name)) 113 | MESSAGE: | 114 | This is an automatically generated Pull Request from the Tas Operability Bot. 115 | It is used to automatically create a PR against this repo to regularly trigger 116 | CI to test the tasks against new TAS Versions 117 | 118 | - name: lint-pipeline 119 | plan: 120 | - in_parallel: 121 | - get: bbr-pipeline-tasks-repo 122 | trigger: true 123 | - get: cryogenics-concourse-tasks 124 | - get: image-cryogenics-essentials 125 | - task: check-pipeline-for-stray-secrets 126 | image: image-cryogenics-essentials 127 | file: cryogenics-concourse-tasks/pipeline-linting/check-pipeline-for-stray-secrets/task.yml 128 | input_mapping: 129 | pipeline-repo: bbr-pipeline-tasks-repo 130 | params: 131 | PIPELINE_TO_CHECK: ./pipeline-repo/ci/pipelines/pas-pipeline-tasks/pipeline.yml 132 | #@ for v in data.values.tas_versions: 133 | - name: #@ 'claim-env-' + v["version_slug"] 134 | plan: 135 | - in_parallel: 136 | - get: bbr-pipeline-tasks-repo 137 | passed: 138 | - lint-pipeline 139 | trigger: true 140 | version: every 141 | - put: #@ 'tas-' + v["version_slug"] 142 | attempts: *number_of_claim_env_retries 143 | timeout: 6h 144 | params: 145 | action: create 146 | duration: 24h 147 | resource: #@ 'tas-' + v["version_slug"] 148 | timeout: 6h 149 | 150 | - name: #@ 'resize-director-and-control-vm-' + v["version_slug"] 151 | plan: 152 | - in_parallel: 153 | - get: bbr-pipeline-tasks-repo 154 | passed: 155 | - #@ 'claim-env-' + v["version_slug"] 156 | trigger: true 157 | version: every 158 | - get: #@ 'tas-' + v["version_slug"] 159 | passed: 160 | - #@ 'claim-env-' + v["version_slug"] 161 | - get: image-cryogenics-essentials 162 | - task: resize-vms-to-avoid-timeouts 163 | image: image-cryogenics-essentials 164 | config: 165 | platform: linux 166 | inputs: 167 | - name: bbr-pipeline-tasks-repo 168 | - name: #@ 'tas-' + v["version_slug"] 169 | run: 170 | path: /bin/bash 171 | args: 172 | - -c 173 | - | 174 | set -eu 175 | . <(smith -l tas-*/metadata om) 176 | CF_GUID="$(om curl -p /api/v0/staged/products | jq -r '.[] | select(.type == "cf").guid')" 177 | CONTROL_JOB_ID=$(om curl -p /api/v0/staged/products/$CF_GUID/jobs | jq -r '.jobs[] | select(.name=="control").guid ') 178 | VM_TYPE="$(om curl -p /api/v0/vm_types | jq -r '[ .vm_types[] | select( .cpu >= 4 and .ephemeral_disk >= 50000 )][0].name')" 179 | UPDATE_CONFIG=$(om curl -p /api/v0/staged/products/$CF_GUID/jobs/$CONTROL_JOB_ID/resource_config | jq ".instance_type.id = \"$VM_TYPE\"") 180 | om curl -x PUT -p /api/v0/staged/products/$CF_GUID/jobs/$CONTROL_JOB_ID/resource_config -d "$UPDATE_CONFIG" 181 | 182 | BOSH_GUID="$(om curl -p /api/v0/staged/products | jq -r '.[] | select(.type == "p-bosh").guid')" 183 | DIRECTOR_JOB_ID=$(om curl -p /api/v0/staged/products/$BOSH_GUID/jobs | jq -r '.jobs[] | select(.name=="director").guid ') 184 | UPDATE_CONFIG=$(om curl -p /api/v0/staged/products/$BOSH_GUID/jobs/$DIRECTOR_JOB_ID/resource_config | jq ".instance_type.id = \"$VM_TYPE\"") 185 | om curl -x PUT -p /api/v0/staged/products/$BOSH_GUID/jobs/$DIRECTOR_JOB_ID/resource_config -d "$UPDATE_CONFIG" 186 | 187 | om apply-changes 188 | 189 | - name: #@ 'validate-sample-pipeline-' + v["version_slug"] 190 | plan: 191 | - in_parallel: 192 | - get: bbr-pipeline-tasks-repo 193 | trigger: true 194 | version: every 195 | passed: 196 | - #@ 'resize-director-and-control-vm-' + v["version_slug"] 197 | - get: #@ 'tas-' + v["version_slug"] 198 | trigger: true 199 | passed: 200 | - #@ 'resize-director-and-control-vm-' + v["version_slug"] 201 | - get: image-bbr-pipeline-tasks 202 | - put: bbr-pipeline-tasks-repo 203 | params: 204 | context: validation-of-sample-pas-pipeline 205 | path: bbr-pipeline-tasks-repo 206 | status: pending 207 | - task: validate-pipeline 208 | image: image-bbr-pipeline-tasks 209 | file: bbr-pipeline-tasks-repo/ci/tasks/validate-pipeline/task.yml 210 | input_mapping: 211 | pipeline: bbr-pipeline-tasks-repo 212 | params: 213 | PIPELINE_PATH: examples/pas-pipeline.yml 214 | SECRETS_PATH: examples/pas-secrets.yml 215 | on_failure: 216 | put: bbr-pipeline-tasks-repo 217 | params: 218 | context: validation-of-sample-pas-pipeline 219 | path: bbr-pipeline-tasks-repo 220 | status: failure 221 | on_success: 222 | put: bbr-pipeline-tasks-repo 223 | params: 224 | context: validation-of-sample-pas-pipeline 225 | path: bbr-pipeline-tasks-repo 226 | status: success 227 | 228 | - name: #@ 'export-om-installation-' + v["version_slug"] 229 | serial: true 230 | plan: 231 | - in_parallel: 232 | - get: bbr-pipeline-tasks-repo 233 | trigger: true 234 | version: every 235 | passed: 236 | - #@ 'claim-env-' + v["version_slug"] 237 | - get: #@ 'tas-' + v["version_slug"] 238 | trigger: true 239 | passed: 240 | - #@ 'claim-env-' + v["version_slug"] 241 | - get: image-bbr-pipeline-tasks 242 | - put: bbr-pipeline-tasks-repo 243 | params: 244 | context: export-om-installation 245 | path: bbr-pipeline-tasks-repo 246 | status: pending 247 | - file: #@ 'tas-' + v["version_slug"] + '/metadata' 248 | format: json 249 | load_var: pooled-env 250 | - task: export-om-installation 251 | image: image-bbr-pipeline-tasks 252 | file: bbr-pipeline-tasks-repo/tasks/export-om-installation/task.yml 253 | params: 254 | <<: *opsman_credentials 255 | on_failure: 256 | put: bbr-pipeline-tasks-repo 257 | params: 258 | context: export-om-installation 259 | path: bbr-pipeline-tasks-repo 260 | status: failure 261 | on_success: 262 | put: bbr-pipeline-tasks-repo 263 | params: 264 | context: export-om-installation 265 | path: bbr-pipeline-tasks-repo 266 | status: success 267 | - put: om-backup-artifact 268 | params: 269 | file: om-installation/installation_*.zip 270 | 271 | - name: #@ 'bbr-backup-pas-' + v["version_slug"] 272 | plan: 273 | - in_parallel: 274 | - get: bbr-pipeline-tasks-repo 275 | trigger: true 276 | version: every 277 | passed: 278 | - #@ 'resize-director-and-control-vm-' + v["version_slug"] 279 | - get: bbr-release 280 | - get: #@ 'tas-' + v["version_slug"] 281 | trigger: true 282 | passed: 283 | - #@ 'resize-director-and-control-vm-' + v["version_slug"] 284 | - get: image-bbr-pipeline-tasks 285 | - put: bbr-pipeline-tasks-repo 286 | params: 287 | context: bbr-backup-pas 288 | path: bbr-pipeline-tasks-repo 289 | status: pending 290 | - file: #@ 'tas-' + v["version_slug"] + '/metadata' 291 | format: json 292 | load_var: pooled-env 293 | - task: extract-binary 294 | image: image-bbr-pipeline-tasks 295 | file: bbr-pipeline-tasks-repo/tasks/extract-bbr-binary/task.yml 296 | - task: bbr-backup-pas 297 | attempts: *number_of_apply_changes_retries 298 | image: image-bbr-pipeline-tasks 299 | file: bbr-pipeline-tasks-repo/tasks/bbr-backup-pas/task.yml 300 | params: 301 | <<: *opsman_credentials 302 | on_failure: 303 | put: bbr-pipeline-tasks-repo 304 | params: 305 | context: bbr-backup-pas 306 | path: bbr-pipeline-tasks-repo 307 | status: failure 308 | on_success: 309 | put: bbr-pipeline-tasks-repo 310 | params: 311 | context: bbr-backup-pas 312 | path: bbr-pipeline-tasks-repo 313 | status: success 314 | - in_parallel: 315 | - put: pas-backup-bucket 316 | params: 317 | file: pas-backup-artifact/pas-backup_*.tar 318 | - task: bbr-cleanup-pas 319 | image: image-bbr-pipeline-tasks 320 | file: bbr-pipeline-tasks-repo/tasks/bbr-cleanup-pas/task.yml 321 | params: 322 | <<: *opsman_credentials 323 | 324 | - name: #@ 'bbr-backup-director-' + v["version_slug"] 325 | plan: 326 | - in_parallel: 327 | - get: bbr-pipeline-tasks-repo 328 | trigger: true 329 | version: every 330 | passed: 331 | - #@ 'resize-director-and-control-vm-' + v["version_slug"] 332 | - get: bbr-release 333 | - get: #@ 'tas-' + v["version_slug"] 334 | trigger: true 335 | passed: 336 | - #@ 'resize-director-and-control-vm-' + v["version_slug"] 337 | - get: image-bbr-pipeline-tasks 338 | - put: bbr-pipeline-tasks-repo 339 | params: 340 | context: bbr-backup-director 341 | path: bbr-pipeline-tasks-repo 342 | status: pending 343 | - file: #@ 'tas-' + v["version_slug"] + '/metadata' 344 | format: json 345 | load_var: pooled-env 346 | - task: extract-binary 347 | image: image-bbr-pipeline-tasks 348 | file: bbr-pipeline-tasks-repo/tasks/extract-bbr-binary/task.yml 349 | - task: bbr-backup-director 350 | image: image-bbr-pipeline-tasks 351 | file: bbr-pipeline-tasks-repo/tasks/bbr-backup-director/task.yml 352 | params: 353 | <<: *opsman_credentials 354 | on_failure: 355 | put: bbr-pipeline-tasks-repo 356 | params: 357 | context: bbr-backup-director 358 | path: bbr-pipeline-tasks-repo 359 | status: failure 360 | on_success: 361 | put: bbr-pipeline-tasks-repo 362 | params: 363 | context: bbr-backup-director 364 | path: bbr-pipeline-tasks-repo 365 | status: success 366 | - in_parallel: 367 | - put: director-backup-bucket 368 | params: 369 | file: director-backup-artifact/director-backup_*.tar 370 | - task: bbr-cleanup-directors 371 | image: image-bbr-pipeline-tasks 372 | file: bbr-pipeline-tasks-repo/tasks/bbr-cleanup-director/task.yml 373 | params: 374 | <<: *opsman_credentials 375 | 376 | - name: #@ 'check-opsman-status-' + v["version_slug"] 377 | serial: true 378 | plan: 379 | - in_parallel: 380 | - get: bbr-pipeline-tasks-repo 381 | trigger: true 382 | version: every 383 | passed: 384 | - #@ 'validate-sample-pipeline-' + v["version_slug"] 385 | - #@ 'export-om-installation-' + v["version_slug"] 386 | - #@ 'bbr-backup-director-' + v["version_slug"] 387 | - #@ 'bbr-backup-pas-' + v["version_slug"] 388 | - get: #@ 'tas-' + v["version_slug"] 389 | trigger: true 390 | passed: 391 | - #@ 'validate-sample-pipeline-' + v["version_slug"] 392 | - #@ 'export-om-installation-' + v["version_slug"] 393 | - #@ 'bbr-backup-director-' + v["version_slug"] 394 | - #@ 'bbr-backup-pas-' + v["version_slug"] 395 | - get: image-cryogenics-essentials 396 | - get: image-bbr-pipeline-tasks 397 | - put: bbr-pipeline-tasks-repo 398 | params: 399 | context: check-opsman-status 400 | path: bbr-pipeline-tasks-repo 401 | status: pending 402 | - file: #@ 'tas-' + v["version_slug"] + '/metadata' 403 | format: json 404 | load_var: pooled-env 405 | - task: check-opsman-status 406 | image: image-bbr-pipeline-tasks 407 | file: bbr-pipeline-tasks-repo/tasks/check-opsman-status/task.yml 408 | attempts: *number_of_apply_changes_retries 409 | params: *opsman_credentials 410 | on_failure: 411 | put: bbr-pipeline-tasks-repo 412 | params: 413 | context: check-opsman-status 414 | path: bbr-pipeline-tasks-repo 415 | status: failure 416 | - task: check-opsman-status-during-apply-changes 417 | attempts: *number_of_apply_changes_retries 418 | image: image-cryogenics-essentials 419 | config: 420 | platform: linux 421 | 422 | inputs: 423 | - name: bbr-pipeline-tasks-repo 424 | - name: #@ 'tas-' + v["version_slug"] 425 | params: *opsman_credentials 426 | run: 427 | path: /bin/bash 428 | args: 429 | - -c 430 | - | 431 | set -eu 432 | 433 | source "bbr-pipeline-tasks-repo/scripts/om-cmd" 434 | om_cmd apply-changes &> /dev/null & 435 | 436 | sleep 60 437 | 438 | set +e 439 | output="$(./bbr-pipeline-tasks-repo/tasks/check-opsman-status/task.sh)" 440 | code=$? 441 | set -e 442 | 443 | test $code -ne 0 444 | echo "$output" 445 | grep "\"Apply Changes\" is in flight." <<< "$output" 446 | 447 | om_cmd apply-changes --reattach 448 | 449 | on_failure: 450 | put: bbr-pipeline-tasks-repo 451 | params: 452 | context: check-opsman-status 453 | path: bbr-pipeline-tasks-repo 454 | status: failure 455 | on_success: 456 | put: bbr-pipeline-tasks-repo 457 | params: 458 | context: check-opsman-status 459 | path: bbr-pipeline-tasks-repo 460 | status: success 461 | 462 | - name: #@ 'unclaim-env-' + v["version_slug"] 463 | plan: 464 | - get: bbr-pipeline-tasks-repo 465 | passed: 466 | - #@ 'check-opsman-status-' + v["version_slug"] 467 | - get: #@ 'tas-' + v["version_slug"] 468 | passed: 469 | - #@ 'check-opsman-status-' + v["version_slug"] 470 | trigger: true 471 | - put: #@ 'tas-' + v["version_slug"] 472 | params: 473 | action: release 474 | resource: #@ 'tas-' + v["version_slug"] 475 | #@ end 476 | 477 | - name: merge-pr 478 | plan: 479 | - get: bbr-pipeline-tasks-repo 480 | trigger: true 481 | passed: 482 | #@ for v in data.values.tas_versions: 483 | - #@ 'unclaim-env-' + v["version_slug"] 484 | #@ end 485 | - put: bbr-pipeline-tasks-repo 486 | params: 487 | merge: true 488 | 489 | resource_types: 490 | - name: pivnet 491 | type: registry-image 492 | source: 493 | <<: *docker_creds 494 | repository: pivotalcf/pivnet-resource 495 | tag: latest-final 496 | - name: pull-request 497 | type: registry-image 498 | source: 499 | <<: *docker_creds 500 | repository: cryogenics/pr-queue-resource 501 | 502 | - name: shepherd 503 | source: 504 | repository: us-west2-docker.pkg.dev/shepherd-268822/shepherd2/concourse-resource 505 | tag: v1 506 | type: registry-image 507 | 508 | - name: gcs 509 | type: registry-image 510 | source: 511 | <<: *docker_creds 512 | repository: frodenas/gcs-resource 513 | 514 | resources: 515 | - name: image-bbr-pipeline-tasks 516 | source: 517 | <<: *docker_creds 518 | repository: pcfplatformrecovery/bbr-pcf-pipeline-tasks 519 | tag: final 520 | type: registry-image 521 | 522 | - name: cryogenics-concourse-tasks 523 | type: git 524 | icon: github 525 | source: 526 | uri: git@github.com:pivotal/cryogenics-concourse-tasks.git 527 | private_key: *github_ssh_key 528 | branch: main 529 | 530 | - name: bbr-pipeline-tasks-repo-main 531 | type: git 532 | icon: github 533 | source: 534 | uri: git@github.com:pivotal-cf/bbr-pcf-pipeline-tasks 535 | private_key: *github_ssh_key 536 | branch: main 537 | 538 | - name: bbr-pipeline-tasks-repo-write 539 | type: git 540 | icon: github 541 | source: 542 | uri: git@github.com:pivotal-cf/bbr-pcf-pipeline-tasks 543 | private_key: *github_ssh_key 544 | commit_filter: 545 | exclude: 546 | - ;resource comment; This resource is used exclusively for pushing new changes 547 | 548 | - name: image-cryogenics-essentials 549 | type: registry-image 550 | source: 551 | <<: *docker_creds 552 | repository: cryogenics/essentials 553 | 554 | - name: bbr-pipeline-tasks-repo 555 | type: pull-request 556 | icon: source-pull 557 | source: 558 | base_branch: main 559 | repository: pivotal-cf/bbr-pcf-pipeline-tasks 560 | disable_forks: true 561 | access_token: *git_access_token 562 | autosync_pr: true 563 | ignore_paths: 564 | - docker/* 565 | - README.md 566 | - tasks/bbr-backup-pks/* 567 | - tasks/bbr-backup-pks-clusters/* 568 | - tasks/bbr-cleanup-pks/* 569 | - tasks/bbr-cleanup-pks-clusters/* 570 | - tasks/lock-pks/* 571 | - tasks/unlock-pks/* 572 | 573 | - name: om-backup-artifact 574 | type: gcs 575 | source: 576 | <<: *gcs_credentials 577 | regexp: installation_(.*).zip 578 | 579 | - name: pas-backup-bucket 580 | type: gcs 581 | source: 582 | <<: *gcs_credentials 583 | regexp: pas-backup_(.*).tar 584 | 585 | - name: director-backup-bucket 586 | type: gcs 587 | source: 588 | <<: *gcs_credentials 589 | regexp: director-backup_(.*).tar 590 | 591 | - name: bbr-release 592 | type: pivnet 593 | source: 594 | api_token: *pivnet_api_token 595 | product_slug: p-bosh-backup-and-restore 596 | 597 | - name: once-a-week 598 | type: time 599 | source: 600 | days: [Monday] 601 | 602 | #@ for v in data.values.tas_versions: 603 | - name: #@ 'tas-' + v["version_slug"] 604 | type: shepherd 605 | icon: pool 606 | source: 607 | url: https://v2.shepherd.run 608 | service-account-key: *shepherd_sa_key 609 | compatibility-mode: environments-app 610 | lease: 611 | namespace: tas-operability 612 | pool: 613 | namespace: official 614 | name: #@ v["pool_name"] 615 | #@ end 616 | -------------------------------------------------------------------------------- /ci/pipelines/pas-pipeline-tasks/values.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Automatically generated file. DO NOT EDIT. 3 | tas_versions: 4 | - {'version_slug': '2-11', 'pool_name': 'tas-2_11' , 'pivnet_ert_windows': '^2\.11\.\d+((-alpha|-beta|-rc)\.\d+)?(\+LTS-T)?$' } 5 | - {'version_slug': '2-13', 'pool_name': 'tas-2_13' , 'pivnet_ert_windows': '^2\.13\.\d+((-alpha|-beta|-rc)\.\d+)?(\+LTS-T)?$' } 6 | - {'version_slug': '4-0', 'pool_name': 'tas_four' , 'pivnet_ert_windows': '^4\.0\.\d+((-alpha|-beta|-rc)\.\d+)?(\+LTS-T)?$' } 7 | - {'version_slug': '5-0', 'pool_name': 'tas-5_0' , 'pivnet_ert_windows': '^5\.0\.\d+((-alpha|-beta|-rc)\.\d+)?(\+LTS-T)?$' } 8 | - {'version_slug': '6-0', 'pool_name': 'tas-6_0' , 'pivnet_ert_windows': '^6\.0\.\d+((-alpha|-beta|-rc)\.\d+)?(\+LTS-T)?$' } 9 | -------------------------------------------------------------------------------- /ci/tasks/validate-pipeline/task.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | 5 | pushd pipeline 6 | bosh int "$PIPELINE_PATH" -l "$SECRETS_PATH" --var-errs --var-errs-unused 7 | fly validate-pipeline -c "$PIPELINE_PATH" -l "$SECRETS_PATH" 8 | popd 9 | -------------------------------------------------------------------------------- /ci/tasks/validate-pipeline/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: registry-image 6 | source: 7 | repository: pcfplatformrecovery/bbr-pcf-pipeline-tasks 8 | tag: final 9 | 10 | inputs: 11 | - name: bbr-pipeline-tasks-repo 12 | - name: pipeline 13 | 14 | run: 15 | path: bbr-pipeline-tasks-repo/ci/tasks/validate-pipeline/task.sh 16 | 17 | params: 18 | PIPELINE_PATH: 19 | SECRETS_PATH: 20 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG REGISTRY=docker.io 2 | ARG BASE_IMAGE=ubuntu:latest 3 | # as of 2024/03/28, comment taken from https://hub.docker.com/_/ubuntu h1: "What's in this image?" 4 | # The ubuntu:latest tag points to the "latest LTS", since that's the version recommended for general use. The ubuntu:rolling tag points to the latest release (regardless of LTS status). 5 | # additionally the FROM is parametrized so we can provide the base image layer without the implicit docker pull at build time.This avoid docker rate limitation in CI 6 | ARG BASE_IMAGE=${REGISTRY}/${BASE_IMAGE} 7 | FROM $BASE_IMAGE 8 | 9 | RUN apt-get update && apt-get install -y \ 10 | openssh-client \ 11 | curl \ 12 | wget \ 13 | jq \ 14 | netcat-openbsd \ 15 | && rm -rf /var/lib/apt/lists/* 16 | 17 | RUN ["/bin/bash", "-c", "set -o pipefail && curl -s https://api.github.com/repos/pivotal-cf/om/releases/latest \ 18 | | jq -e -r '.assets[] | select(.name | contains(\"om-linux\")) | select(.name | contains(\"tar.gz\") | not) | .browser_download_url' \ 19 | | wget -qi - -O /bin/om && chmod +x /bin/om"] 20 | 21 | RUN ["/bin/bash", "-c", "set -o pipefail && curl -s https://api.github.com/repos/cloudfoundry/bosh-cli/releases/latest \ 22 | | jq -e -r '.assets[] | .browser_download_url' \ 23 | | grep linux \ 24 | | wget -qi - -O /bin/bosh && chmod +x /bin/bosh"] 25 | 26 | RUN ["/bin/bash", "-c", "set -o pipefail && curl -s https://api.github.com/repos/concourse/concourse/releases/latest \ 27 | | jq -e -r '.assets[] | select(.name | contains(\"linux-amd64.tgz\")) | select(.name | contains(\"fly\")) | select(.name | contains(\"sha\") | not) | .browser_download_url' \ 28 | | wget -qi - -O fly.tar.gz && tar xvf fly.tar.gz -C /bin && rm fly.tar.gz && chmod +x /bin/fly"] 29 | -------------------------------------------------------------------------------- /examples/pas-pipeline.yml: -------------------------------------------------------------------------------- 1 | --- 2 | opsman_credentials: &opsman_credentials 3 | SKIP_SSL_VALIDATION: ((opsman.skip-ssl-validation)) 4 | OPSMAN_URL: ((opsman.url)) 5 | OPSMAN_USERNAME: ((opsman.username)) 6 | OPSMAN_PASSWORD: ((opsman.password)) 7 | OPSMAN_PRIVATE_KEY: ((opsman.private-key)) 8 | 9 | s3_credentials: &s3_credentials 10 | bucket: ((storage.backup-bucket)) 11 | region_name: ((storage.region)) 12 | access_key_id: ((aws_credentials.access_key_id)) 13 | secret_access_key: ((aws_credentials.secret_access_key)) 14 | endpoint: ((storage.endpoint)) 15 | 16 | jobs: 17 | - name: export-om-installation 18 | serial: true 19 | plan: 20 | - in_parallel: 21 | - get: bbr-pipeline-tasks-repo 22 | - task: export-om-installation 23 | file: bbr-pipeline-tasks-repo/tasks/export-om-installation/task.yml 24 | params: 25 | SKIP_SSL_VALIDATION: ((opsman.skip-ssl-validation)) 26 | OPSMAN_URL: ((opsman.url)) 27 | OPSMAN_USERNAME: ((opsman.username)) 28 | OPSMAN_PASSWORD: ((opsman.password)) 29 | - put: om-backup-artifact 30 | params: 31 | file: om-installation/installation_*.zip 32 | 33 | - name: bbr-backup-pas 34 | serial: true 35 | plan: 36 | - in_parallel: 37 | - get: bbr-pipeline-tasks-repo 38 | - get: bbr-release 39 | - task: check-opsman-status 40 | file: bbr-pipeline-tasks-repo/tasks/check-opsman-status/task.yml 41 | params: 42 | <<: *opsman_credentials 43 | - task: extract-binary 44 | file: bbr-pipeline-tasks-repo/tasks/extract-bbr-binary/task.yml 45 | - task: bbr-backup-pas 46 | file: bbr-pipeline-tasks-repo/tasks/bbr-backup-pas/task.yml 47 | params: 48 | <<: *opsman_credentials 49 | on_failure: 50 | task: bbr-cleanup-pas 51 | file: bbr-pipeline-tasks-repo/tasks/bbr-cleanup-pas/task.yml 52 | params: 53 | <<: *opsman_credentials 54 | - put: pas-backup-bucket 55 | params: 56 | file: pas-backup-artifact/pas-backup_*.tar 57 | 58 | - name: bbr-backup-director 59 | serial: true 60 | plan: 61 | - in_parallel: 62 | - get: bbr-pipeline-tasks-repo 63 | - get: bbr-release 64 | - task: check-opsman-status 65 | file: bbr-pipeline-tasks-repo/tasks/check-opsman-status/task.yml 66 | params: 67 | <<: *opsman_credentials 68 | - task: extract-binary 69 | file: bbr-pipeline-tasks-repo/tasks/extract-bbr-binary/task.yml 70 | - task: bbr-backup-director 71 | file: bbr-pipeline-tasks-repo/tasks/bbr-backup-director/task.yml 72 | params: 73 | <<: *opsman_credentials 74 | on_failure: 75 | task: bbr-cleanup-director 76 | file: bbr-pipeline-tasks-repo/tasks/bbr-cleanup-director/task.yml 77 | params: 78 | <<: *opsman_credentials 79 | - put: director-backup-bucket 80 | params: 81 | file: director-backup-artifact/director-backup_*.tar 82 | 83 | resource_types: 84 | - name: pivnet 85 | type: docker-image 86 | source: 87 | repository: pivotalcf/pivnet-resource 88 | tag: latest-final 89 | 90 | resources: 91 | - name: bbr-pipeline-tasks-repo 92 | type: git 93 | source: 94 | uri: https://github.com/pivotal-cf/bbr-pcf-pipeline-tasks.git 95 | branch: main 96 | - name: om-backup-artifact 97 | type: s3 98 | source: 99 | <<: *s3_credentials 100 | regexp: installation_(.*).zip 101 | - name: pas-backup-bucket 102 | type: s3 103 | source: 104 | <<: *s3_credentials 105 | regexp: pas-backup_(.*).tar 106 | - name: director-backup-bucket 107 | type: s3 108 | source: 109 | <<: *s3_credentials 110 | regexp: director-backup_(.*).tar 111 | - name: bbr-release 112 | type: pivnet 113 | source: 114 | api_token: ((pivnet.api_token)) 115 | product_slug: p-bosh-backup-and-restore 116 | -------------------------------------------------------------------------------- /examples/pas-secrets.yml: -------------------------------------------------------------------------------- 1 | --- 2 | pivnet.api_token: example-pivnet-token 3 | 4 | # Ops Manager credentials 5 | opsman.skip-ssl-validation: false 6 | opsman.url: https://pcf.example.com 7 | opsman.username: example-user 8 | opsman.password: example-password 9 | opsman.private-key: | # optional, Ops Manager VM SSH private key 10 | -----BEGIN EXAMPLE PRIVATE KEY----- 11 | ... 12 | -----END EXAMPLE PRIVATE KEY----- 13 | 14 | # S3-compatible bucket credentials 15 | storage.backup-bucket: thebucket 16 | storage.region: eu-west-1 17 | storage.endpoint: s3-eu-west-1.amazonaws.com 18 | aws_credentials.access_key_id: example-key-id 19 | aws_credentials.secret_access_key: example-secret-access-key 20 | -------------------------------------------------------------------------------- /examples/pks-pipeline.yml: -------------------------------------------------------------------------------- 1 | --- 2 | opsman_credentials: &opsman_credentials 3 | SKIP_SSL_VALIDATION: ((skip-ssl-validation)) 4 | OPSMAN_URL: ((opsman-url)) 5 | OPSMAN_USERNAME: ((opsman-username)) 6 | OPSMAN_PASSWORD: ((opsman-password)) 7 | OPSMAN_PRIVATE_KEY: ((opsman-private-key)) 8 | 9 | s3_credentials: &s3_credentials 10 | bucket: ((storage-backup-bucket)) 11 | region_name: ((storage-region)) 12 | access_key_id: ((storage-access-key-id)) 13 | secret_access_key: ((storage-secret-access-key)) 14 | endpoint: ((storage-endpoint)) 15 | 16 | resource_types: 17 | - name: pivnet 18 | type: docker-image 19 | source: 20 | repository: pivotalcf/pivnet-resource 21 | tag: latest-final 22 | 23 | resources: 24 | - name: bbr-pipeline-tasks-repo 25 | type: git 26 | source: 27 | uri: git@github.com:pivotal-cf/bbr-pcf-pipeline-tasks.git 28 | private_key: ((git-private-key)) 29 | branch: main 30 | tag_filter: ((bbr-pipeline-tasks-repo-version)) 31 | 32 | - name: bbr-release 33 | type: pivnet 34 | source: 35 | api_token: ((pivnet-api-token)) 36 | product_slug: p-bosh-backup-and-restore 37 | 38 | - name: om-backup-bucket 39 | type: s3 40 | source: 41 | <<: *s3_credentials 42 | regexp: installation_(.*).zip 43 | 44 | - name: director-backup-bucket 45 | type: s3 46 | source: 47 | <<: *s3_credentials 48 | regexp: director-backup_(.*).tar 49 | 50 | - name: pks-backup-bucket 51 | type: s3 52 | source: 53 | <<: *s3_credentials 54 | regexp: pks-backup_(.*).tar 55 | 56 | - name: pks-clusters-backup-bucket 57 | type: s3 58 | source: 59 | <<: *s3_credentials 60 | regexp: pks-clusters-backup_(.*).tar 61 | 62 | jobs: 63 | - name: export-om-installation 64 | serial: true 65 | plan: 66 | - in_parallel: 67 | - get: bbr-release 68 | - get: bbr-pipeline-tasks-repo 69 | - task: export-om-installation 70 | file: bbr-pipeline-tasks-repo/tasks/export-om-installation/task.yml 71 | params: 72 | SKIP_SSL_VALIDATION: ((skip-ssl-validation)) 73 | OPSMAN_URL: ((opsman-url)) 74 | OPSMAN_USERNAME: ((opsman-username)) 75 | OPSMAN_PASSWORD: ((opsman-password)) 76 | - put: om-backup-bucket 77 | params: 78 | file: om-installation/installation_*.zip 79 | 80 | - name: bbr-backup-pks-foundation 81 | serial: true 82 | plan: 83 | - in_parallel: 84 | - get: bbr-release 85 | - get: bbr-pipeline-tasks-repo 86 | - task: check-opsman-status 87 | file: bbr-pipeline-tasks-repo/tasks/check-opsman-status/task.yml 88 | params: 89 | <<: *opsman_credentials 90 | - task: lock-pks 91 | file: bbr-pipeline-tasks-repo/tasks/lock-pks/task.yml 92 | params: 93 | <<: *opsman_credentials 94 | - task: extract-binary 95 | file: bbr-pipeline-tasks-repo/tasks/extract-bbr-binary/task.yml 96 | - in_parallel: 97 | - task: bbr-backup-director 98 | file: bbr-pipeline-tasks-repo/tasks/bbr-backup-director/task.yml 99 | params: 100 | <<: *opsman_credentials 101 | on_failure: 102 | task: bbr-cleanup-director 103 | file: bbr-pipeline-tasks-repo/tasks/bbr-cleanup-director/task.yml 104 | params: 105 | <<: *opsman_credentials 106 | - task: bbr-backup-pks 107 | file: bbr-pipeline-tasks-repo/tasks/bbr-backup-pks/task.yml 108 | params: 109 | <<: *opsman_credentials 110 | on_failure: 111 | task: bbr-cleanup-pks 112 | file: bbr-pipeline-tasks-repo/tasks/bbr-cleanup-pks/task.yml 113 | params: 114 | <<: *opsman_credentials 115 | - task: bbr-backup-pks-clusters 116 | file: bbr-pipeline-tasks-repo/tasks/bbr-backup-pks-clusters/task.yml 117 | params: 118 | <<: *opsman_credentials 119 | on_failure: 120 | task: bbr-cleanup-pks-clusters 121 | file: bbr-pipeline-tasks-repo/tasks/bbr-cleanup-pks-clusters/task.yml 122 | params: 123 | <<: *opsman_credentials 124 | - in_parallel: 125 | - put: director-backup-bucket 126 | params: 127 | file: director-backup-artifact/director-backup_*.tar 128 | - put: pks-backup-bucket 129 | params: 130 | file: pks-backup-artifact/pks-backup_*.tar 131 | - put: pks-clusters-backup-bucket 132 | params: 133 | file: pks-clusters-backup-artifact/pks-clusters-backup_*.tar 134 | ensure: 135 | task: unlock-pks 136 | file: bbr-pipeline-tasks-repo/tasks/unlock-pks/task.yml 137 | params: 138 | <<: *opsman_credentials 139 | -------------------------------------------------------------------------------- /examples/pks-secrets.yml: -------------------------------------------------------------------------------- 1 | --- 2 | git-private-key: | # GitHub user private key to pull bbr-pcf-pipeline-tasks repo 3 | -----BEGIN EXAMPLE PRIVATE KEY----- 4 | ... 5 | -----END EXAMPLE PRIVATE KEY----- 6 | pivnet-api-token: example-pivnet-token 7 | 8 | # Ops Manager credentials 9 | skip-ssl-validation: false 10 | opsman-url: https://pcf.example.com 11 | opsman-username: example-user 12 | opsman-password: example-password 13 | opsman-private-key: | # optional, Ops Manager VM SSH private key 14 | -----BEGIN EXAMPLE PRIVATE KEY----- 15 | ... 16 | -----END EXAMPLE PRIVATE KEY----- 17 | 18 | # S3-compatible bucket credentials 19 | storage-backup-bucket: some-bucket-name 20 | storage-endpoint: s3-region-1.amazonaws.com 21 | storage-region: region-1 22 | storage-access-key-id: some-key-id 23 | storage-secret-access-key: some-access-key 24 | 25 | # bbr-pcf-pipeline-tasks pinned release version 26 | bbr-pipeline-tasks-repo-version: example-tag-v0.0.0 -------------------------------------------------------------------------------- /last_tested: -------------------------------------------------------------------------------- 1 | 19-08-24 2 | -------------------------------------------------------------------------------- /scripts/deployment-backup: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ../binary/bbr deployment --target "$BOSH_ENVIRONMENT" \ 4 | --username "$BOSH_CLIENT" \ 5 | --deployment "$DEPLOYMENT_NAME" \ 6 | --ca-cert "$BOSH_CA_CERT_PATH" \ 7 | backup --with-manifest -------------------------------------------------------------------------------- /scripts/deployment-backup-cleanup: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ./binary/bbr deployment --target "$BOSH_ENVIRONMENT" \ 4 | --username "$BOSH_CLIENT" \ 5 | --deployment "$DEPLOYMENT_NAME" \ 6 | --ca-cert "$BOSH_CA_CERT_PATH" \ 7 | backup-cleanup 8 | -------------------------------------------------------------------------------- /scripts/export-cf-metadata: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | set -o pipefail 5 | 6 | # Get CF deployment guid 7 | om_cmd curl -p /api/v0/deployed/products > deployed_products.json 8 | DEPLOYMENT_NAME=$(jq -r '.[] | select(.type == "cf") | .guid' "deployed_products.json") 9 | export DEPLOYMENT_NAME 10 | -------------------------------------------------------------------------------- /scripts/export-director-metadata: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | set -o pipefail 5 | 6 | function cleanup_socks_proxy { 7 | kill "$ssh_socks_proxy_pid" 8 | } 9 | 10 | # shellcheck disable=SC1090 11 | source "$(dirname "${BASH_SOURCE[0]}")/om-cmd" 12 | 13 | BOSH_CA_CERT_PATH="${PWD}/bosh.crt" 14 | 15 | if om_cmd curl -p /api/v0/deployed/director/credentials/uaa_bbr_client_credentials > /dev/null; then 16 | echo "Retreving BBR client credentials for BOSH Director" 17 | om_cmd curl -p /api/v0/deployed/director/credentials/uaa_bbr_client_credentials > bbr_client.json 18 | BOSH_CLIENT="$(jq -r .credential.value.identity bbr_client.json)" 19 | BOSH_CLIENT_SECRET="$(jq -r .credential.value.password bbr_client.json)" 20 | 21 | bosh_product_guid="$(om_cmd curl -p /api/v0/deployed/products | jq -r '.[] | select(.type=="p-bosh") | .guid')" 22 | BOSH_ENVIRONMENT="$(om_cmd curl -p "/api/v0/deployed/products/${bosh_product_guid}/static_ips" | jq -r .[0].ips[0])" 23 | 24 | om_cmd curl -p /api/v0/certificate_authorities | jq -r '.certificate_authorities | last.cert_pem' > "$BOSH_CA_CERT_PATH" 25 | else 26 | echo "Retreving Ops Manager client credentials for BOSH Director" 27 | om_cmd curl -p /api/v0/deployed/director/manifest > director_manifest.json 28 | 29 | BOSH_CLIENT="ops_manager" 30 | BOSH_CLIENT_SECRET=$(jq -r '.jobs[] | select(.name == "bosh") | .properties.uaa.clients.ops_manager.secret' director_manifest.json) 31 | BOSH_ENVIRONMENT=$(jq -r '.jobs[] | select(.name == "bosh") | .properties.director.address' director_manifest.json) 32 | 33 | jq -r '.jobs[] | select(.name == "bosh") | .properties.director.config_server.ca_cert' director_manifest.json > "${BOSH_CA_CERT_PATH}" 34 | fi 35 | 36 | # Get BOSH Director private key 37 | om_cmd curl -p /api/v0/deployed/director/credentials/bbr_ssh_credentials > bbr_keys.json 38 | BOSH_PRIVATE_KEY=$(jq -r '.credential.value.private_key_pem' bbr_keys.json) 39 | 40 | if [ ! -z ${OPSMAN_PRIVATE_KEY:+x} ]; then 41 | echo -e "$OPSMAN_PRIVATE_KEY" > "${PWD}/ssh.key" 42 | chmod 0600 "${PWD}/ssh.key" 43 | opsman_private_key_path="${PWD}/ssh.key" 44 | opsman_host="$(basename "$OPSMAN_URL")" 45 | ssh -4 -D 5000 -NC "ubuntu@${opsman_host}" -i "${opsman_private_key_path}" -o ServerAliveInterval=60 -o StrictHostKeyChecking=no & 46 | ssh_socks_proxy_pid="$!" 47 | trap cleanup_socks_proxy EXIT 48 | export BOSH_ALL_PROXY=socks5://localhost:5000 49 | echo "Using BOSH_ALL_PROXY" 50 | echo "sleeping for 10 secs to establish connection" 51 | sleep 10s 52 | fi 53 | 54 | # Set NO_PROXY for BOSH Director 55 | if [ ! -z ${SET_NO_PROXY:+x} ] && [ $SET_NO_PROXY = true ]; then 56 | export NO_PROXY="${BOSH_ENVIRONMENT},${NO_PROXY:=${no_proxy:=}}" 57 | echo "exporting NO_PROXY=${NO_PROXY}" 58 | fi 59 | 60 | export BOSH_CLIENT 61 | export BOSH_CLIENT_SECRET 62 | export BOSH_CA_CERT_PATH 63 | export BOSH_CA_CERT=$BOSH_CA_CERT_PATH 64 | export BOSH_ENVIRONMENT 65 | export BOSH_USERNAME="bbr" 66 | export BOSH_PRIVATE_KEY 67 | -------------------------------------------------------------------------------- /scripts/export-pks-metadata: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | set -o pipefail 5 | 6 | # Get CF deployment guid 7 | om_cmd curl -p /api/v0/deployed/products > deployed_products.json 8 | DEPLOYMENT_NAME=$(jq -r '.[] | select(.type == "pivotal-container-service") | .guid' "deployed_products.json") 9 | export DEPLOYMENT_NAME 10 | 11 | 12 | om_cmd curl -p /api/v0/deployed/products/${DEPLOYMENT_NAME}/uaa_client_credentials > bosh_team_creds.json 13 | BOSH_TEAM_CLIENT="$(jq -r .uaa_client_name bosh_team_creds.json)" 14 | BOSH_TEAM_CLIENT_SECRET="$(jq -r .uaa_client_secret bosh_team_creds.json)" 15 | -------------------------------------------------------------------------------- /scripts/om-cmd: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | shopt -s expand_aliases 4 | 5 | skip_ssl=${SKIP_SSL_VALIDATION:-false} 6 | skip_ssl_flag="" 7 | 8 | if ${skip_ssl}; then 9 | skip_ssl_flag="--skip-ssl-validation" 10 | fi 11 | 12 | if [ -z "${CLIENT_ID:-}" ] ; then 13 | echo "Using Ops Manager credentials" 14 | alias om_cmd='om ${skip_ssl_flag} --target "${OPSMAN_URL}" --username "${OPSMAN_USERNAME}" --password "${OPSMAN_PASSWORD}"' 15 | else 16 | echo "Using Client ID credentials" 17 | alias om_cmd='om ${skip_ssl_flag} --target "${OPSMAN_URL}" --client-id "${CLIENT_ID}" --client-secret "${CLIENT_SECRET}"' 18 | fi 19 | -------------------------------------------------------------------------------- /tasks/bbr-backup-director/task.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | 5 | # shellcheck disable=SC1090 6 | source "$( dirname "$0" )/../../scripts/export-director-metadata" 7 | 8 | current_date="$( date +"%Y-%m-%d-%H-%M-%S" )" 9 | 10 | pushd director-backup-artifact 11 | ../binary/bbr director --host "${BOSH_ENVIRONMENT}" \ 12 | --username "$BOSH_USERNAME" \ 13 | --private-key-path <(echo "${BOSH_PRIVATE_KEY}") \ 14 | backup 15 | 16 | tar -cvf "director-backup_${current_date}.tar" --remove-files -- */* 17 | # shellcheck disable=SC2086 18 | popd 19 | -------------------------------------------------------------------------------- /tasks/bbr-backup-director/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pcfplatformrecovery/bbr-pcf-pipeline-tasks 8 | tag: final 9 | 10 | inputs: 11 | - name: bbr-pipeline-tasks-repo 12 | # Directory containing the bbr executable 13 | - name: binary 14 | 15 | outputs: 16 | # Contains `director-backup.tar` artifact created by `bbr director backup` 17 | - name: director-backup-artifact 18 | 19 | run: 20 | path: bbr-pipeline-tasks-repo/tasks/bbr-backup-director/task.sh 21 | 22 | params: 23 | # The Ops Manager URL, e.g. https://pcf.example.com 24 | OPSMAN_URL: 25 | 26 | # If true, SSL validation will be skipped when connecting to Ops Manager API 27 | SKIP_SSL_VALIDATION: false 28 | 29 | # Client credentials for Ops Manager API. If empty, user credentials will be used 30 | CLIENT_ID: 31 | CLIENT_SECRET: 32 | 33 | # User credentials for Ops Manager API 34 | OPSMAN_USERNAME: 35 | OPSMAN_PASSWORD: 36 | 37 | # The SSH private key for the Ops Manager VM 38 | # If provided, a SSH tunnel through the Ops Manager VM is created and used by bbr 39 | OPSMAN_PRIVATE_KEY: 40 | -------------------------------------------------------------------------------- /tasks/bbr-backup-pas/task.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | 5 | scripts="$( dirname "$0" )/../../scripts" 6 | 7 | # shellcheck disable=SC1090 8 | source "${scripts}/export-director-metadata" 9 | # shellcheck disable=SC1090 10 | source "${scripts}/export-cf-metadata" 11 | 12 | current_date="$( date +"%Y-%m-%d-%H-%M-%S" )" 13 | 14 | pushd pas-backup-artifact 15 | # shellcheck disable=SC1090 16 | source "../${scripts}/deployment-backup" 17 | tar -cvf "pas-backup_${current_date}.tar" --remove-files -- */* 18 | popd 19 | 20 | -------------------------------------------------------------------------------- /tasks/bbr-backup-pas/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pcfplatformrecovery/bbr-pcf-pipeline-tasks 8 | tag: final 9 | 10 | inputs: 11 | - name: bbr-pipeline-tasks-repo 12 | # Directory containing the bbr executable 13 | - name: binary 14 | 15 | outputs: 16 | # Contains `pas-backup.tar` artifact created by `bbr deployment backup` 17 | - name: pas-backup-artifact 18 | 19 | run: 20 | path: bbr-pipeline-tasks-repo/tasks/bbr-backup-pas/task.sh 21 | 22 | params: 23 | # The Ops Manager URL, e.g. https://pcf.example.com 24 | OPSMAN_URL: 25 | 26 | # If true, SSL validation will be skipped when connecting to Ops Manager API 27 | SKIP_SSL_VALIDATION: false 28 | 29 | # Client credentials for Ops Manager API. If empty, user credentials will be used 30 | CLIENT_ID: 31 | CLIENT_SECRET: 32 | 33 | # User credentials for Ops Manager API 34 | OPSMAN_USERNAME: 35 | OPSMAN_PASSWORD: 36 | 37 | # The SSH private key for the Ops Manager VM 38 | # If provided, a SSH tunnel through the Ops Manager VM is created and used by bbr 39 | OPSMAN_PRIVATE_KEY: 40 | 41 | # Config NO_PROXY for BOSH cli and OM 42 | # Setting SET_NO_PROXY: true results in NO_PROXY being exported 43 | # with the BOSH Director IP 44 | SET_NO_PROXY: 45 | -------------------------------------------------------------------------------- /tasks/bbr-backup-pks-clusters/task.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | 5 | scripts="$( dirname "$0" )/../../scripts" 6 | 7 | # shellcheck disable=SC1090 8 | source "${scripts}/export-director-metadata" 9 | # shellcheck disable=SC1090 10 | source "${scripts}/export-pks-metadata" 11 | 12 | current_date="$( date +"%Y-%m-%d-%H-%M-%S" )" 13 | 14 | pushd pks-clusters-backup-artifact 15 | # shellcheck disable=SC1090 16 | 17 | ../binary/bbr deployment --target "$BOSH_ENVIRONMENT" \ 18 | --username "$BOSH_TEAM_CLIENT" \ 19 | --password "$BOSH_TEAM_CLIENT_SECRET" \ 20 | --ca-cert "$BOSH_CA_CERT_PATH" \ 21 | --all-deployments \ 22 | backup --with-manifest 23 | 24 | tar -cvf "pks-clusters-backup_${current_date}.tar" --remove-files -- */* 25 | popd 26 | -------------------------------------------------------------------------------- /tasks/bbr-backup-pks-clusters/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pcfplatformrecovery/bbr-pcf-pipeline-tasks 8 | tag: final 9 | 10 | inputs: 11 | - name: bbr-pipeline-tasks-repo 12 | # Directory containing the bbr executable 13 | - name: binary 14 | 15 | outputs: 16 | # Contains `pks-clusters-backup.tar` artifact created by `bbr deployment --all-deployments backup` 17 | - name: pks-clusters-backup-artifact 18 | 19 | run: 20 | path: bbr-pipeline-tasks-repo/tasks/bbr-backup-pks-clusters/task.sh 21 | 22 | params: 23 | # The Ops Manager URL, e.g. https://pcf.example.com 24 | OPSMAN_URL: 25 | 26 | # If true, SSL validation will be skipped when connecting to Ops Manager API 27 | SKIP_SSL_VALIDATION: false 28 | 29 | # Client credentials for Ops Manager API. If empty, user credentials will be used 30 | CLIENT_ID: 31 | CLIENT_SECRET: 32 | 33 | # User credentials for Ops Manager API 34 | OPSMAN_USERNAME: 35 | OPSMAN_PASSWORD: 36 | 37 | # The SSH private key for the Ops Manager VM 38 | # If provided, a SSH tunnel through the Ops Manager VM is created and used by bbr 39 | OPSMAN_PRIVATE_KEY: 40 | 41 | # Config NO_PROXY for BOSH cli and OM 42 | # Setting SET_NO_PROXY: true results in NO_PROXY being exported 43 | # with the BOSH Director IP 44 | SET_NO_PROXY: 45 | -------------------------------------------------------------------------------- /tasks/bbr-backup-pks/task.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | 5 | scripts="$( dirname "$0" )/../../scripts" 6 | 7 | # shellcheck disable=SC1090 8 | source "${scripts}/export-director-metadata" 9 | # shellcheck disable=SC1090 10 | source "${scripts}/export-pks-metadata" 11 | 12 | current_date="$( date +"%Y-%m-%d-%H-%M-%S" )" 13 | 14 | pushd pks-backup-artifact 15 | # shellcheck disable=SC1090 16 | source "../${scripts}/deployment-backup" 17 | tar -cvf "pks-backup_${current_date}.tar" --remove-files -- */* 18 | popd 19 | -------------------------------------------------------------------------------- /tasks/bbr-backup-pks/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pcfplatformrecovery/bbr-pcf-pipeline-tasks 8 | tag: final 9 | 10 | inputs: 11 | - name: bbr-pipeline-tasks-repo 12 | # Directory containing the bbr executable 13 | - name: binary 14 | 15 | outputs: 16 | # Contains `pks-backup.tar` artifact created by `bbr deployment backup` 17 | - name: pks-backup-artifact 18 | 19 | run: 20 | path: bbr-pipeline-tasks-repo/tasks/bbr-backup-pks/task.sh 21 | 22 | params: 23 | # The Ops Manager URL, e.g. https://pcf.example.com 24 | OPSMAN_URL: 25 | 26 | # If true, SSL validation will be skipped when connecting to Ops Manager API 27 | SKIP_SSL_VALIDATION: false 28 | 29 | # Client credentials for Ops Manager API. If empty, user credentials will be used 30 | CLIENT_ID: 31 | CLIENT_SECRET: 32 | 33 | # User credentials for Ops Manager API 34 | OPSMAN_USERNAME: 35 | OPSMAN_PASSWORD: 36 | 37 | # The SSH private key for the Ops Manager VM 38 | # If provided, a SSH tunnel through the Ops Manager VM is created and used by bbr 39 | OPSMAN_PRIVATE_KEY: 40 | 41 | # Config NO_PROXY for BOSH cli and OM 42 | # Setting SET_NO_PROXY: true results in NO_PROXY being exported 43 | # with the BOSH Director IP 44 | SET_NO_PROXY: 45 | -------------------------------------------------------------------------------- /tasks/bbr-cleanup-director/task.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | 5 | # shellcheck disable=SC1090 6 | source "$(dirname "$0")/../../scripts/export-director-metadata" 7 | 8 | ./binary/bbr director --host "${BOSH_ENVIRONMENT}" \ 9 | --username "$BOSH_USERNAME" \ 10 | --private-key-path <(echo "${BOSH_PRIVATE_KEY}") \ 11 | backup-cleanup 12 | -------------------------------------------------------------------------------- /tasks/bbr-cleanup-director/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pcfplatformrecovery/bbr-pcf-pipeline-tasks 8 | tag: final 9 | 10 | inputs: 11 | - name: bbr-pipeline-tasks-repo 12 | # Directory containing the bbr executable 13 | - name: binary 14 | 15 | run: 16 | path: bbr-pipeline-tasks-repo/tasks/bbr-cleanup-director/task.sh 17 | 18 | params: 19 | # The Ops Manager URL, e.g. https://pcf.example.com 20 | OPSMAN_URL: 21 | 22 | # If true, SSL validation will be skipped when connecting to Ops Manager API 23 | SKIP_SSL_VALIDATION: false 24 | 25 | # Client credentials for Ops Manager API. If empty, user credentials will be used 26 | CLIENT_ID: 27 | CLIENT_SECRET: 28 | 29 | # User credentials for Ops Manager API 30 | OPSMAN_USERNAME: 31 | OPSMAN_PASSWORD: 32 | 33 | # The SSH private key for the Ops Manager VM 34 | # If provided, a SSH tunnel through the Ops Manager VM is created and used by bbr 35 | OPSMAN_PRIVATE_KEY: 36 | -------------------------------------------------------------------------------- /tasks/bbr-cleanup-pas/task.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | 5 | scripts="$(dirname "$0")/../../scripts" 6 | 7 | 8 | # shellcheck disable=SC1090 9 | source "$scripts/export-director-metadata" 10 | # shellcheck disable=SC1090 11 | source "$scripts/export-cf-metadata" 12 | 13 | # shellcheck disable=SC1090 14 | source "$scripts/deployment-backup-cleanup" -------------------------------------------------------------------------------- /tasks/bbr-cleanup-pas/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pcfplatformrecovery/bbr-pcf-pipeline-tasks 8 | tag: final 9 | 10 | inputs: 11 | - name: bbr-pipeline-tasks-repo 12 | # Directory containing the bbr executable 13 | - name: binary 14 | 15 | run: 16 | path: bbr-pipeline-tasks-repo/tasks/bbr-cleanup-pas/task.sh 17 | 18 | params: 19 | # The Ops Manager URL, e.g. https://pcf.example.com 20 | OPSMAN_URL: 21 | 22 | # If true, SSL validation will be skipped when connecting to Ops Manager API 23 | SKIP_SSL_VALIDATION: false 24 | 25 | # Client credentials for Ops Manager API. If empty, user credentials will be used 26 | CLIENT_ID: 27 | CLIENT_SECRET: 28 | 29 | # User credentials for Ops Manager API 30 | OPSMAN_USERNAME: 31 | OPSMAN_PASSWORD: 32 | 33 | # The SSH private key for the Ops Manager VM 34 | # If provided, a SSH tunnel through the Ops Manager VM is created and used by bbr 35 | OPSMAN_PRIVATE_KEY: 36 | 37 | # Config NO_PROXY for BOSH cli and OM 38 | # Setting SET_NO_PROXY: true results in NO_PROXY being exported 39 | # with the BOSH Director IP 40 | SET_NO_PROXY: 41 | -------------------------------------------------------------------------------- /tasks/bbr-cleanup-pks-clusters/task.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | 5 | scripts="$(dirname "$0")/../../scripts" 6 | 7 | 8 | # shellcheck disable=SC1090 9 | source "$scripts/export-director-metadata" 10 | # shellcheck disable=SC1090 11 | source "$scripts/export-pks-metadata" 12 | 13 | # shellcheck disable=SC1090 14 | ./binary/bbr deployment --target "$BOSH_ENVIRONMENT" \ 15 | --username "$BOSH_TEAM_CLIENT" \ 16 | --password "$BOSH_TEAM_CLIENT_SECRET" \ 17 | --ca-cert "$BOSH_CA_CERT_PATH" \ 18 | --all-deployments \ 19 | backup-cleanup 20 | -------------------------------------------------------------------------------- /tasks/bbr-cleanup-pks-clusters/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pcfplatformrecovery/bbr-pcf-pipeline-tasks 8 | tag: final 9 | 10 | inputs: 11 | - name: bbr-pipeline-tasks-repo 12 | # Directory containing the bbr executable 13 | - name: binary 14 | 15 | run: 16 | path: bbr-pipeline-tasks-repo/tasks/bbr-cleanup-pks-clusters/task.sh 17 | 18 | params: 19 | # The Ops Manager URL, e.g. https://pcf.example.com 20 | OPSMAN_URL: 21 | 22 | # If true, SSL validation will be skipped when connecting to Ops Manager API 23 | SKIP_SSL_VALIDATION: false 24 | 25 | # Client credentials for Ops Manager API. If empty, user credentials will be used 26 | CLIENT_ID: 27 | CLIENT_SECRET: 28 | 29 | # User credentials for Ops Manager API 30 | OPSMAN_USERNAME: 31 | OPSMAN_PASSWORD: 32 | 33 | # The SSH private key for the Ops Manager VM 34 | # If provided, a SSH tunnel through the Ops Manager VM is created and used by bbr 35 | OPSMAN_PRIVATE_KEY: 36 | 37 | # Config NO_PROXY for BOSH cli and OM 38 | # Setting SET_NO_PROXY: true results in NO_PROXY being exported 39 | # with the BOSH Director IP 40 | SET_NO_PROXY: 41 | -------------------------------------------------------------------------------- /tasks/bbr-cleanup-pks/task.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | 5 | scripts="$(dirname "$0")/../../scripts" 6 | 7 | 8 | # shellcheck disable=SC1090 9 | source "$scripts/export-director-metadata" 10 | # shellcheck disable=SC1090 11 | source "$scripts/export-pks-metadata" 12 | 13 | # shellcheck disable=SC1090 14 | source "$scripts/deployment-backup-cleanup" -------------------------------------------------------------------------------- /tasks/bbr-cleanup-pks/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pcfplatformrecovery/bbr-pcf-pipeline-tasks 8 | tag: final 9 | 10 | inputs: 11 | - name: bbr-pipeline-tasks-repo 12 | # Directory containing the bbr executable 13 | - name: binary 14 | 15 | run: 16 | path: bbr-pipeline-tasks-repo/tasks/bbr-cleanup-pks/task.sh 17 | 18 | params: 19 | # The Ops Manager URL, e.g. https://pcf.example.com 20 | OPSMAN_URL: 21 | 22 | # If true, SSL validation will be skipped when connecting to Ops Manager API 23 | SKIP_SSL_VALIDATION: false 24 | 25 | # Client credentials for Ops Manager API. If empty, user credentials will be used 26 | CLIENT_ID: 27 | CLIENT_SECRET: 28 | 29 | # User credentials for Ops Manager API 30 | OPSMAN_USERNAME: 31 | OPSMAN_PASSWORD: 32 | 33 | # The SSH private key for the Ops Manager VM 34 | # If provided, a SSH tunnel through the Ops Manager VM is created and used by bbr 35 | OPSMAN_PRIVATE_KEY: 36 | 37 | # Config NO_PROXY for BOSH cli and OM 38 | # Setting SET_NO_PROXY: true results in NO_PROXY being exported 39 | # with the BOSH Director IP 40 | SET_NO_PROXY: 41 | -------------------------------------------------------------------------------- /tasks/check-opsman-status/task.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | scripts="$(dirname "$0")/../../scripts" 6 | 7 | # shellcheck source=../../scripts/om-cmd 8 | source "${scripts}/om-cmd" > /dev/null 9 | 10 | status="$(om_cmd installations --format json | jq .[0].status)" 11 | trimmed_status="$(xargs <<< "$status")" 12 | 13 | if [ "$trimmed_status" == "running" ]; then 14 | echo "\"Apply Changes\" is in flight." | tee /dev/stderr 15 | exit 1 16 | fi 17 | 18 | echo "No \"Apply Changes\" in flight." 19 | -------------------------------------------------------------------------------- /tasks/check-opsman-status/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pcfplatformrecovery/bbr-pcf-pipeline-tasks 8 | tag: final 9 | 10 | inputs: 11 | - name: bbr-pipeline-tasks-repo 12 | 13 | outputs: 14 | 15 | run: 16 | path: bbr-pipeline-tasks-repo/tasks/check-opsman-status/task.sh 17 | 18 | params: 19 | # The Ops Manager URL, e.g. https://pcf.example.com 20 | OPSMAN_URL: 21 | 22 | # If true, SSL validation will be skipped when connecting to Ops Manager API 23 | SKIP_SSL_VALIDATION: false 24 | 25 | # Client credentials for Ops Manager API. If empty, user credentials will be used 26 | CLIENT_ID: 27 | CLIENT_SECRET: 28 | 29 | # User credentials for Ops Manager API 30 | OPSMAN_USERNAME: 31 | OPSMAN_PASSWORD: 32 | 33 | # If provided, a SSH tunnel through the Ops Manager VM is created and used by bbr 34 | OPSMAN_PRIVATE_KEY: 35 | -------------------------------------------------------------------------------- /tasks/export-om-installation/task.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | # shellcheck disable=SC1090 6 | source "$(dirname "$0")/../../scripts/om-cmd" 7 | 8 | current_date="$( date +"%Y-%m-%d-%H-%M-%S" )" 9 | 10 | om_cmd --request-timeout 7200 export-installation --output-file "om-installation/installation_${current_date}.zip" 11 | -------------------------------------------------------------------------------- /tasks/export-om-installation/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pcfplatformrecovery/bbr-pcf-pipeline-tasks 8 | tag: final 9 | 10 | inputs: 11 | - name: bbr-pipeline-tasks-repo 12 | 13 | outputs: 14 | # Contains exported Ops Manager installation settings in `installation.zip` 15 | - name: om-installation 16 | 17 | run: 18 | path: bbr-pipeline-tasks-repo/tasks/export-om-installation/task.sh 19 | 20 | params: 21 | # The Ops Manager URL, e.g. https://pcf.example.com 22 | OPSMAN_URL: 23 | 24 | # If true, SSL validation will be skipped when connecting to Ops Manager API 25 | SKIP_SSL_VALIDATION: false 26 | 27 | # Client credentials for Ops Manager API. If empty, user credentials will be used 28 | CLIENT_ID: 29 | CLIENT_SECRET: 30 | 31 | # User credentials for Ops Manager API 32 | OPSMAN_USERNAME: 33 | OPSMAN_PASSWORD: 34 | -------------------------------------------------------------------------------- /tasks/extract-bbr-binary/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pcfplatformrecovery/bbr-pcf-pipeline-tasks 8 | tag: final 9 | 10 | inputs: 11 | # Contains the latest release of bbr from GitHub 12 | - name: bbr-release 13 | 14 | outputs: 15 | # Directory containing the bbr executable to use in other tasks 16 | - name: binary 17 | 18 | run: 19 | path: bash 20 | args: 21 | - -c 22 | - | 23 | tar -xvf bbr-release/bbr*.tar 24 | cp releases/bbr binary/ 25 | -------------------------------------------------------------------------------- /tasks/lock-pks/task.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | ensure_pksapi_stopped() { 6 | output=$(bosh -d "$DEPLOYMENT_NAME" ssh -c 'sudo /var/vcap/bosh/bin/monit summary' | grep pks-api) 7 | until [[ $output == *"not monitored"* && $output != *"stop pending"* ]]; do 8 | echo "waiting" 9 | sleep 1 10 | output=$(bosh -d "$DEPLOYMENT_NAME" ssh -c 'sudo /var/vcap/bosh/bin/monit summary' | grep pks-api) 11 | done 12 | } 13 | # timeout is a command and executed as a subprocess, so ensure_pksapi_stopped must be exported 14 | export -f ensure_pksapi_stopped 15 | 16 | scripts="$(dirname "$0")/../../scripts" 17 | 18 | # shellcheck disable=SC1090 19 | source "$scripts/export-director-metadata" 20 | # shellcheck disable=SC1090 21 | source "$scripts/export-pks-metadata" 22 | 23 | bosh -d "$DEPLOYMENT_NAME" ssh pivotal-container-service -c "sudo /var/vcap/bosh/bin/monit stop pks-api" 24 | 25 | TIMEOUT=60 26 | 27 | if timeout "$TIMEOUT" bash -c ensure_pksapi_stopped 28 | then 29 | echo "PKS API has been stopped" 30 | else 31 | echo "Timed out stopping PKS API after $TIMEOUT seconds" 32 | exit 1 33 | fi 34 | -------------------------------------------------------------------------------- /tasks/lock-pks/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pcfplatformrecovery/bbr-pcf-pipeline-tasks 8 | tag: final 9 | 10 | inputs: 11 | - name: bbr-pipeline-tasks-repo 12 | 13 | run: 14 | path: bbr-pipeline-tasks-repo/tasks/lock-pks/task.sh 15 | 16 | params: 17 | # The Ops Manager URL, e.g. https://pcf.example.com 18 | OPSMAN_URL: 19 | 20 | # If true, SSL validation will be skipped when connecting to Ops Manager API 21 | SKIP_SSL_VALIDATION: false 22 | 23 | # Client credentials for Ops Manager API. If empty, user credentials will be used 24 | CLIENT_ID: 25 | CLIENT_SECRET: 26 | 27 | # User credentials for Ops Manager API 28 | OPSMAN_USERNAME: 29 | OPSMAN_PASSWORD: 30 | 31 | # The SSH private key for the Ops Manager VM 32 | # If provided, a SSH tunnel through the Ops Manager VM is created and used by bbr 33 | OPSMAN_PRIVATE_KEY: 34 | 35 | # Config NO_PROXY for BOSH cli and OM 36 | # Setting SET_NO_PROXY: true results in NO_PROXY being exported 37 | # with the BOSH Director IP 38 | SET_NO_PROXY: 39 | -------------------------------------------------------------------------------- /tasks/unlock-pks/task.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | ensure_pksapi_started() { 6 | output=$(bosh -d "$DEPLOYMENT_NAME" ssh -c 'sudo /var/vcap/bosh/bin/monit summary' | grep pks-api) 7 | until [[ $output == *"running"* ]]; do 8 | echo "waiting" 9 | sleep 1 10 | output=$(bosh -d "$DEPLOYMENT_NAME" ssh -c 'sudo /var/vcap/bosh/bin/monit summary' | grep pks-api) 11 | done 12 | } 13 | # timeout is a command and executed as a subprocess, so ensure_pksapi_started must be exported 14 | export -f ensure_pksapi_started 15 | 16 | scripts="$(dirname "$0")/../../scripts" 17 | 18 | # shellcheck disable=SC1090 19 | source "$scripts/export-director-metadata" 20 | # shellcheck disable=SC1090 21 | source "$scripts/export-pks-metadata" 22 | 23 | bosh -d "$DEPLOYMENT_NAME" ssh pivotal-container-service -c "sudo /var/vcap/bosh/bin/monit start pks-api" 24 | 25 | TIMEOUT=60 26 | 27 | if timeout "$TIMEOUT" bash -c ensure_pksapi_started 28 | then 29 | echo "PKS API has been started" 30 | else 31 | echo "Timed out starting PKS API after $TIMEOUT seconds" 32 | exit 1 33 | fi 34 | -------------------------------------------------------------------------------- /tasks/unlock-pks/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pcfplatformrecovery/bbr-pcf-pipeline-tasks 8 | tag: final 9 | 10 | inputs: 11 | - name: bbr-pipeline-tasks-repo 12 | 13 | run: 14 | path: bbr-pipeline-tasks-repo/tasks/unlock-pks/task.sh 15 | 16 | params: 17 | # The Ops Manager URL, e.g. https://pcf.example.com 18 | OPSMAN_URL: 19 | 20 | # If true, SSL validation will be skipped when connecting to Ops Manager API 21 | SKIP_SSL_VALIDATION: false 22 | 23 | # Client credentials for Ops Manager API. If empty, user credentials will be used 24 | CLIENT_ID: 25 | CLIENT_SECRET: 26 | 27 | # User credentials for Ops Manager API 28 | OPSMAN_USERNAME: 29 | OPSMAN_PASSWORD: 30 | 31 | # The SSH private key for the Ops Manager VM 32 | # If provided, a SSH tunnel through the Ops Manager VM is created and used by bbr 33 | OPSMAN_PRIVATE_KEY: 34 | 35 | # Config NO_PROXY for BOSH cli and OM 36 | # Setting SET_NO_PROXY: true results in NO_PROXY being exported 37 | # with the BOSH Director IP 38 | SET_NO_PROXY: 39 | -------------------------------------------------------------------------------- /testFile: -------------------------------------------------------------------------------- 1 | Test diff for PR pipeline purposes 2 | --------------------------------------------------------------------------------