├── .github └── workflows │ ├── codeql.yml │ ├── golangci-lint.yml │ └── release.yml ├── .gitignore ├── .golangci.yml ├── .goreleaser.yml ├── LICENSE ├── Makefile ├── README.md ├── cmd └── cyclone │ └── main.go ├── go.mod ├── go.sum ├── pkg ├── backup.go ├── backup_clone.go ├── backup_types.go ├── image.go ├── logger.go ├── main.go ├── secrets.go ├── security_group.go ├── server.go ├── share.go ├── utils.go ├── version.go └── volume.go └── renovate.json /.github/workflows/codeql.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL Advanced" 13 | 14 | on: 15 | push: 16 | branches: [ "master" ] 17 | pull_request: 18 | branches: [ "master" ] 19 | schedule: 20 | - cron: '42 15 * * 6' 21 | 22 | jobs: 23 | analyze: 24 | name: Analyze (${{ matrix.language }}) 25 | # Runner size impacts CodeQL analysis time. To learn more, please see: 26 | # - https://gh.io/recommended-hardware-resources-for-running-codeql 27 | # - https://gh.io/supported-runners-and-hardware-resources 28 | # - https://gh.io/using-larger-runners (GitHub.com only) 29 | # Consider using larger runners or machines with greater resources for possible analysis time improvements. 30 | runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }} 31 | permissions: 32 | # required for all workflows 33 | security-events: write 34 | 35 | # required to fetch internal or private CodeQL packs 36 | packages: read 37 | 38 | # only required for workflows in private repositories 39 | actions: read 40 | contents: read 41 | 42 | strategy: 43 | fail-fast: false 44 | matrix: 45 | include: 46 | - language: actions 47 | build-mode: none 48 | - language: go 49 | build-mode: autobuild 50 | # CodeQL supports the following values keywords for 'language': 'actions', 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift' 51 | # Use `c-cpp` to analyze code written in C, C++ or both 52 | # Use 'java-kotlin' to analyze code written in Java, Kotlin or both 53 | # Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both 54 | # To learn more about changing the languages that are analyzed or customizing the build mode for your analysis, 55 | # see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning. 56 | # If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how 57 | # your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages 58 | steps: 59 | - name: Checkout repository 60 | uses: actions/checkout@v4 61 | 62 | # Add any setup steps before running the `github/codeql-action/init` action. 63 | # This includes steps like installing compilers or runtimes (`actions/setup-node` 64 | # or others). This is typically only required for manual builds. 65 | # - name: Setup runtime (example) 66 | # uses: actions/setup-example@v1 67 | 68 | # Initializes the CodeQL tools for scanning. 69 | - name: Initialize CodeQL 70 | uses: github/codeql-action/init@v3 71 | with: 72 | languages: ${{ matrix.language }} 73 | build-mode: ${{ matrix.build-mode }} 74 | # If you wish to specify custom queries, you can do so here or in a config file. 75 | # By default, queries listed here will override any specified in a config file. 76 | # Prefix the list here with "+" to use these queries and those in the config file. 77 | 78 | # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs 79 | # queries: security-extended,security-and-quality 80 | 81 | # If the analyze step fails for one of the languages you are analyzing with 82 | # "We were unable to automatically build your code", modify the matrix above 83 | # to set the build mode to "manual" for that language. Then modify this step 84 | # to build your code. 85 | # ℹ️ Command-line programs to run using the OS shell. 86 | # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun 87 | - if: matrix.build-mode == 'manual' 88 | shell: bash 89 | run: | 90 | echo 'If you are using a "manual" build mode for one or more of the' \ 91 | 'languages you are analyzing, replace this with the commands to build' \ 92 | 'your code, for example:' 93 | echo ' make bootstrap' 94 | echo ' make release' 95 | exit 1 96 | 97 | - name: Perform CodeQL Analysis 98 | uses: github/codeql-action/analyze@v3 99 | with: 100 | category: "/language:${{matrix.language}}" 101 | -------------------------------------------------------------------------------- /.github/workflows/golangci-lint.yml: -------------------------------------------------------------------------------- 1 | name: Golangci-lint 2 | on: 3 | push: 4 | branches: 5 | - master 6 | pull_request: 7 | 8 | permissions: 9 | contents: read 10 | 11 | jobs: 12 | golangci-lint: 13 | name: Lint 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Setup Go 17 | uses: actions/setup-go@v5 18 | with: 19 | go-version: '1.24' 20 | 21 | - name: Checkout 22 | uses: actions/checkout@v4 23 | 24 | - name: Golangci-lint 25 | uses: golangci/golangci-lint-action@v8 26 | with: 27 | version: 'v2.1' 28 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | # This GitHub action will publish assets for release when a tag is created 2 | # that matches the pattern "v*" (ie. v0.1.0). 3 | # 4 | # Based on the configuration provided at: 5 | # https://github.com/hashicorp/terraform-provider-scaffolding 6 | name: Release 7 | 8 | on: 9 | push: 10 | tags: 11 | - 'v*' 12 | 13 | # to allow the action to create a release 14 | permissions: 15 | contents: write 16 | 17 | jobs: 18 | goreleaser: 19 | runs-on: ubuntu-latest 20 | steps: 21 | - name: Checkout 22 | uses: actions/checkout@v4 23 | 24 | - name: Unshallow 25 | run: git fetch --prune --unshallow 26 | 27 | - name: Set up Go 28 | uses: actions/setup-go@v5 29 | with: 30 | go-version: '1.24' 31 | 32 | - name: Run GoReleaser 33 | uses: goreleaser/goreleaser-action@v6 34 | with: 35 | version: v2 36 | args: release --clean 37 | env: 38 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 39 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | bin 2 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | 3 | run: 4 | timeout: 5m 5 | 6 | linters: 7 | default: none 8 | enable: 9 | - asciicheck 10 | - bodyclose 11 | - copyloopvar 12 | - dogsled 13 | - errcheck 14 | - exhaustive 15 | - godot 16 | - goheader 17 | - gomodguard 18 | - goprintffuncname 19 | - govet 20 | - ineffassign 21 | - misspell 22 | - nakedret 23 | - nolintlint 24 | - prealloc 25 | - staticcheck 26 | - unconvert 27 | - unused 28 | - whitespace 29 | # - cyclop 30 | # - dupl 31 | # - durationcheck 32 | # - errname 33 | # - errorlint 34 | # - exhaustivestruct 35 | # - forbidigo 36 | # - forcetypeassert 37 | # - funlen 38 | # - gochecknoglobals 39 | # - gochecknoinits 40 | # - gocognit 41 | # - goconst 42 | # - gocritic 43 | # - gocyclo 44 | # - godox 45 | # - goerr113 46 | # - gofumpt 47 | # - golint 48 | # - gomnd 49 | # - gomoddirectives 50 | # - gosec (gas) 51 | # - gosimple (megacheck) 52 | # - ifshort 53 | # - importas 54 | # - interfacer 55 | # - lll 56 | # - makezero 57 | # - maligned 58 | # - nestif 59 | # - nilerr 60 | # - nlreturn 61 | # - noctx 62 | # - paralleltest 63 | # - predeclared 64 | # - promlinter 65 | # - revive 66 | # - scopelint 67 | # - sqlclosecheck 68 | # - tagliatelle 69 | # - testpackage 70 | # - thelper 71 | # - tparallel 72 | # - unparam 73 | # - wastedassign 74 | # - wrapcheck 75 | # - wsl 76 | exclusions: 77 | generated: lax 78 | presets: 79 | - comments 80 | - common-false-positives 81 | - legacy 82 | - std-error-handling 83 | #rules: 84 | # - linters: 85 | # - staticcheck 86 | # text: "SA1019: (x509.EncryptPEMBlock|strings.Title)" 87 | paths: 88 | - third_party$ 89 | - builtin$ 90 | - examples$ 91 | formatters: 92 | enable: 93 | - gofmt 94 | - goimports 95 | - gci 96 | exclusions: 97 | generated: lax 98 | paths: 99 | - third_party$ 100 | - builtin$ 101 | - examples$ 102 | -------------------------------------------------------------------------------- /.goreleaser.yml: -------------------------------------------------------------------------------- 1 | # Visit https://goreleaser.com for documentation on how to customize this 2 | # behavior. 3 | version: 2 4 | before: 5 | hooks: 6 | # this is just an example and not a requirement for provider building/publishing 7 | - go mod tidy 8 | builds: 9 | - main: ./cmd/cyclone 10 | env: 11 | # goreleaser does not work with CGO, it could also complicate 12 | # usage by users in CI/CD systems like Terraform Cloud where 13 | # they are unable to install libraries. 14 | - CGO_ENABLED=0 15 | mod_timestamp: '{{ .CommitTimestamp }}' 16 | flags: 17 | - -trimpath 18 | ldflags: 19 | - '-s -w -X github.com/sapcc/cyclone/pkg.Version={{.Version}}' 20 | goos: 21 | - freebsd 22 | - windows 23 | - linux 24 | - darwin 25 | goarch: 26 | - amd64 27 | - '386' 28 | - arm 29 | - arm64 30 | ignore: 31 | - goos: darwin 32 | goarch: '386' 33 | binary: '{{ .ProjectName }}' 34 | archives: 35 | - format: zip 36 | name_template: '{{ .ProjectName }}-{{ .Version }}-{{ .Os }}-{{ .Arch }}' 37 | checksum: 38 | name_template: '{{ .ProjectName }}-{{ .Version }}-SHA256SUMS' 39 | algorithm: sha256 40 | release: 41 | # Visit your project's GitHub Releases page to publish this release. 42 | draft: true 43 | changelog: 44 | disable: true 45 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | PKG:=github.com/sapcc/cyclone 2 | APP_NAME:=cyclone 3 | PWD:=$(shell pwd) 4 | UID:=$(shell id -u) 5 | VERSION:=$(shell git describe --tags --always --dirty="-dev" | sed "s/^v//") 6 | LDFLAGS:=-X $(PKG)/pkg.Version=$(VERSION) -w -s 7 | 8 | export CGO_ENABLED:=0 9 | 10 | build: fmt vet 11 | GOOS=linux go build -mod=mod -trimpath -ldflags="$(LDFLAGS)" -o bin/$(APP_NAME) ./cmd/$(APP_NAME) 12 | GOOS=darwin go build -mod=mod -trimpath -ldflags="$(LDFLAGS)" -o bin/$(APP_NAME)_darwin ./cmd/$(APP_NAME) 13 | GOOS=windows go build -mod=mod -trimpath -ldflags="$(LDFLAGS)" -o bin/$(APP_NAME).exe ./cmd/$(APP_NAME) 14 | 15 | docker: 16 | docker pull golang:latest 17 | docker run -ti --rm -e GOCACHE=/tmp -v $(PWD):/$(APP_NAME) -u $(UID):$(UID) --workdir /$(APP_NAME) golang:latest make 18 | 19 | fmt: 20 | gofmt -s -w cmd pkg 21 | 22 | vet: 23 | go vet -mod=mod ./cmd/... ./pkg/... 24 | 25 | static: 26 | staticcheck ./cmd/... ./pkg/... 27 | 28 | mod: 29 | go mod tidy 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # cyclone 2 | 3 | Clone OpenStack entities easily. 4 | 5 | ## Why? 6 | 7 | In modern clusters compute instances are considered as a Cattle, but there are exceptions, when a compute instance is a Pet and needs care, especially when you need to migrate or clone it to a new OpenStack region or availability zone. 8 | 9 | Here comes cyclone (**C**loud **Clone** or cclone) to help you with this task. It takes care about all volumes attached to a VM and clones them with all required intermediate type conversions. 10 | 11 | ## Help 12 | 13 | By default Glance image data will be streamed through cyclone and the traffic will be consumed on the execution side. To enable the Glance V2 [web-download](https://docs.openstack.org/glance/latest/admin/interoperable-image-import.html#image-import-methods) method, set the `--image-web-download` flag. This method allows Glance to download an image using a remote URL. It is not recommended to use **web-download** method for images bigger than 1-10GiB, since Glance service will try to download the image to its intermediate local storage and may cause insufficient disk space error. 14 | 15 | A remote URL can be generated using a Swift [Temporary URL](https://docs.openstack.org/swift/latest/api/temporary_url_middleware.html). 16 | 17 | A volume migration is performed by converting a volume to an image and further image migration between regions, then converting an image back to a volume. 18 | 19 | A volume migration within the same region is performed using a [Volume Transfer](https://docs.openstack.org/cinder/latest/cli/cli-manage-volumes.html#transfer-a-volume) method. 20 | 21 | By default the tool uses the same credentials from environment variables for the source and destination projects, but for the destination you can define different region, domain and project name. It is also possible to override destination credentials via OpenStack environment variables with the `TO_` prefix or via CLI parameters. 22 | 23 | ~> **Note:** Be aware about the quota, especially the source project quota, when cloning a volume. It requires up to 2x source volume size Cinder (Block Storage) quota. If a `--clone-via-snapshot` flag is specified, the quota requirement increases up to 3x source volume size. 24 | 25 | ~> **Note:** Cloning a volume within the same region, but different availability zones requires an extra Swift storage quota. If you don't have an ability to use Swift in this case, you can specify a `--clone-via-snapshot` flag. 26 | 27 | ~> **Note:** It is strongly recommended to shut down the VM before you start a migration of a VM or its volumes. 28 | 29 | ~> **Note:** By default cyclone writes all OpenStack request/response logs into a `cyclone` directory, located in System Temporary Directory. Define `-d` or `--debug` flag if you want to see these logs in console output. 30 | 31 | ```sh 32 | Clone OpenStack entities easily 33 | 34 | Usage: 35 | cyclone [command] 36 | 37 | Available Commands: 38 | backup 39 | completion Generate the autocompletion script for the specified shell 40 | help Help about any command 41 | image Clone an image 42 | secret Clone a secret 43 | security-group Clone a security group 44 | server Clone a server 45 | share Clone a share 46 | version Print version information 47 | volume Clone a volume 48 | 49 | Flags: 50 | -d, --debug print out request and response objects 51 | -h, --help help for cyclone 52 | --image-web-download use Glance web-download image import method 53 | -k, --insecure Allow insecure server connections (use if you understand the risks) 54 | -n, --no assume "no" to all questions 55 | --timeout-backup string timeout to wait for a backup status (default "24h") 56 | --timeout-image string timeout to wait for an image status (default "24h") 57 | --timeout-secret string timeout to wait for a secret status (default "24h") 58 | --timeout-security-group string timeout to wait for a security group status (default "24h") 59 | --timeout-server string timeout to wait for a server status (default "24h") 60 | --timeout-share string timeout to wait for a share status (default "24h") 61 | --timeout-share-replica string timeout to wait for a share replica status (default "24h") 62 | --timeout-share-snapshot string timeout to wait for a share snapshot status (default "24h") 63 | --timeout-snapshot string timeout to wait for a snapshot status (default "24h") 64 | --timeout-volume string timeout to wait for a volume status (default "24h") 65 | --to-application-credential-id string destination application credential ID 66 | --to-application-credential-name string destination application credential name 67 | --to-application-credential-secret string destination application credential secret 68 | --to-auth-url string destination auth URL (if not provided, detected automatically from the source auth URL and destination region) 69 | --to-domain string destination domain name 70 | --to-password string destination username password 71 | --to-project string destination project name 72 | --to-region string destination region 73 | --to-username string destination username 74 | -y, --yes assume "yes" to all questions 75 | 76 | Use "cyclone [command] --help" for more information about a command. 77 | ``` 78 | 79 | ## Examples 80 | 81 | ### Clone an image between regions 82 | 83 | ```sh 84 | $ source openrc-of-the-source-project 85 | $ cyclone image 77c125f1-2c7b-473e-a56b-28a9a0bc4787 --to-region eu-de-2 --to-project destination-project-name --to-image-name image-from-source-project-name 86 | ``` 87 | 88 | ~> **Note:** Please ensure that your OpenStack user has sufficient permissions (e.g. `image_admin` and `swiftoperator` user roles) before initiating the above command. 89 | 90 | ### Clone an image between regions using download/upload method 91 | 92 | ```sh 93 | $ source openrc-of-the-source-project 94 | $ cyclone image 77c125f1-2c7b-473e-a56b-28a9a0bc4787 --to-region eu-de-2 --to-project destination-project-name --to-image-name image-from-source-project-name 95 | ``` 96 | 97 | ### Clone a bootable volume between regions 98 | 99 | ```sh 100 | $ source openrc-of-the-source-project 101 | $ cyclone volume c4c18329-b124-4a23-8546-cf1ca502ef95 --to-region eu-de-2 --to-project destination-project-name --to-volume-name volume-from-source-project-name 102 | ``` 103 | 104 | ### Clone a volume within the same project, but different availability zones 105 | 106 | ```sh 107 | $ source openrc-of-the-source-project 108 | $ cyclone volume 97d682ae-840f-461f-b956-98af30533a22 --to-az eu-de-2a 109 | ``` 110 | 111 | ### Clone a server with all attached volumes to a specific availability zone 112 | 113 | ```sh 114 | $ source openrc-of-the-source-project 115 | $ cyclone server 6eb76733-95b7-4867-9f83-a6ab19804e2f --to-az eu-de-2a --to-key-name my-nova-keypair 116 | ``` 117 | 118 | ### Clone a server with a local storage to a server with bootable Cinder storage 119 | 120 | `--bootable-volume 16` will create a 16 GiB bootable volume from the source VM snapshot and create a new VM using this volume. 121 | 122 | ```sh 123 | $ source openrc-of-the-source-project 124 | $ cyclone server 6eb76733-95b7-4867-9f83-a6ab19804e2f --bootable-volume 16 --to-key-name my-nova-keypair 125 | ``` 126 | 127 | ### Clone a server with a local disk or a bootable volume only 128 | 129 | `--bootable-disk-only` flag allows to clone a VM with only a local disk or a bootable volume, ignoring all secondary attached volumes. 130 | 131 | ```sh 132 | $ source openrc-of-the-source-project 133 | $ cyclone server 6eb76733-95b7-4867-9f83-a6ab19804e2f --bootable-disk-only --to-key-name my-nova-keypair 134 | ``` 135 | 136 | ### Clone a server with a bootable volume to a server with a local disk 137 | 138 | `--local-disk` allows to clone a VM with a Cinder bootable volume to a VM with a local disk. 139 | 140 | ```sh 141 | $ source openrc-of-the-source-project 142 | $ cyclone server 6eb76733-95b7-4867-9f83-a6ab19804e2f --local-disk --to-key-name my-nova-keypair 143 | ``` 144 | 145 | ### Clone only server artifacts 146 | 147 | The `--skip-server-creation` flag clones only images or volumes, which are used or attached to the source server. The destination server won't be created. 148 | The example below will convert the server's local bootable disk to a bootable block storage, which can be attached to some server lately. 149 | 150 | ```sh 151 | $ source openrc-of-the-source-project 152 | $ cyclone server 6eb76733-95b7-4867-9f83-a6ab19804e2f --bootable-volume 64 --skip-server-creation 153 | ``` 154 | 155 | ### Upload a local image file into a backup 156 | 157 | Properties must be defined, when a backup supposed to be restored to a bootable volume. 158 | 159 | ```sh 160 | $ source openrc-of-the-source-project 161 | $ cyclone backup upload my-file.vmdk --to-container-name swift-backup-container --volume-size=160 --threads=16 \ 162 | -p hw_vif_model=VirtualVmxnet3 \ 163 | -p vmware_ostype=sles12_64Guest \ 164 | -p hypervisor_type=vmware \ 165 | -p min_ram=1008 \ 166 | -p vmware_disktype=streamOptimized \ 167 | -p disk_format=vmdk \ 168 | -p hw_video_ram=16 \ 169 | -p vmware_adaptertype=paraVirtual \ 170 | -p container_format=bare \ 171 | -p min_disk=10 \ 172 | -p architecture=x86_64 \ 173 | -p hw_disk_bus=scsi 174 | ``` 175 | 176 | ### Upload a remote Glance image into a backup 177 | 178 | ```sh 179 | $ source openrc-of-the-source-project 180 | $ cyclone backup upload my-glance-image --to-container-name swift-backup-container --volume-size=160 --threads=16 181 | ``` 182 | 183 | ### Transfer a big volume from one region to another 184 | 185 | ~> **Note:** The `cyclone backup upload` command produces high traffic and CPU/RAM usage. It's recommended to run it inside a VM, located in the target region. 186 | 187 | In this case you need to convert a volume to an image first: 188 | 189 | ```sh 190 | $ source openrc-of-the-source-project 191 | $ cyclone volume to-image my-cinder-volume --to-image-name my-glance-image 192 | ``` 193 | 194 | then transfer it within multiple parallel connections to a target backup resource with a further volume restore action. 195 | 196 | ```sh 197 | $ source openrc-of-the-source-project 198 | $ cyclone backup upload my-glance-image --to-container-name swift-backup-container --to-region my-region-1 \ 199 | --volume-size=160 --threads=16 --restore-volume 200 | ``` 201 | 202 | It's strongly recommended to run the `cyclone backup upload` command inside a VM, located in the source or the target region. 203 | 204 | ### Create a new volume from an existing backup 205 | 206 | ```sh 207 | $ source openrc-of-the-source-project 208 | $ cyclone backup restore my-backup 209 | ``` 210 | 211 | ### Clone an existing backup to another region 212 | 213 | ~> **Note:** The `cyclone backup clone` command produces high traffic. It's recommended to run it inside a VM, located in the source or the target region. 214 | 215 | ```sh 216 | $ source openrc-of-the-source-project 217 | $ cyclone backup clone my-backup --to-region my-region-1 --threads=16 218 | ``` 219 | 220 | ### Manila shares support 221 | 222 | Manila share type must support replicas, i.e. 223 | 224 | ```sh 225 | $ openstack share type show default -c optional_extra_specs -f json | jq '.optional_extra_specs.replication_type' 226 | "dr" 227 | ``` 228 | 229 | #### Clone a Manila share to a new share in a new availability zone 230 | 231 | ```sh 232 | $ source openrc-of-the-source-project 233 | $ cyclone share my-share --to-share-name my-new-share --to-az my-region-1b 234 | ``` 235 | 236 | #### Move an existing Manila share to a new availability zone 237 | 238 | ```sh 239 | $ source openrc-of-the-source-project 240 | $ cyclone share move my-share --to-az my-region-1b 241 | ``` 242 | 243 | ## Build 244 | 245 | ```sh 246 | $ make 247 | # or within the docker container 248 | $ make docker 249 | ``` 250 | -------------------------------------------------------------------------------- /cmd/cyclone/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/sapcc/cyclone/pkg" 5 | ) 6 | 7 | func main() { 8 | pkg.Execute() 9 | } 10 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/sapcc/cyclone 2 | 3 | go 1.24 4 | 5 | require ( 6 | github.com/google/uuid v1.6.0 7 | github.com/gophercloud/gophercloud/v2 v2.7.0 8 | github.com/gophercloud/utils/v2 v2.0.0-20250506092640-af27464b6166 9 | github.com/klauspost/compress v1.18.0 10 | github.com/machinebox/progress v0.2.0 11 | github.com/majewsky/schwift/v2 v2.0.0 12 | github.com/sapcc/go-bits v0.0.0-20250519145235-ff15acf81904 13 | github.com/sapcc/swift-http-import v0.0.0-20250519153110-b7bf408ce850 14 | github.com/spf13/cobra v1.9.1 15 | github.com/spf13/viper v1.20.1 16 | github.com/xhit/go-str2duration/v2 v2.1.0 17 | golang.org/x/term v0.32.0 18 | ) 19 | 20 | require ( 21 | github.com/cactus/go-statsd-client/v5 v5.1.0 // indirect 22 | github.com/fsnotify/fsnotify v1.8.0 // indirect 23 | github.com/go-viper/mapstructure/v2 v2.2.1 // indirect 24 | github.com/gofrs/uuid/v5 v5.3.2 // indirect 25 | github.com/google/go-github/v68 v68.0.0 // indirect 26 | github.com/google/go-github/v72 v72.0.0 // indirect 27 | github.com/google/go-querystring v1.1.0 // indirect 28 | github.com/hashicorp/hcl v1.0.0 // indirect 29 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 30 | github.com/jpillora/longestcommon v0.0.0-20161227235612-adb9d91ee629 // indirect 31 | github.com/magiconair/properties v1.8.7 // indirect 32 | github.com/matryer/is v1.4.1 // indirect 33 | github.com/mitchellh/mapstructure v1.5.0 // indirect 34 | github.com/pelletier/go-toml/v2 v2.2.3 // indirect 35 | github.com/sagikazarmark/locafero v0.7.0 // indirect 36 | github.com/sagikazarmark/slog-shim v0.1.0 // indirect 37 | github.com/sapcc/go-api-declarations v1.15.0 // indirect 38 | github.com/sourcegraph/conc v0.3.0 // indirect 39 | github.com/spf13/afero v1.12.0 // indirect 40 | github.com/spf13/cast v1.7.1 // indirect 41 | github.com/spf13/pflag v1.0.6 // indirect 42 | github.com/subosito/gotenv v1.6.0 // indirect 43 | github.com/ulikunitz/xz v0.5.12 // indirect 44 | go.uber.org/atomic v1.11.0 // indirect 45 | go.uber.org/multierr v1.9.0 // indirect 46 | golang.org/x/crypto v0.38.0 // indirect 47 | golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect 48 | golang.org/x/net v0.40.0 // indirect 49 | golang.org/x/oauth2 v0.30.0 // indirect 50 | golang.org/x/sys v0.33.0 // indirect 51 | golang.org/x/text v0.25.0 // indirect 52 | gopkg.in/ini.v1 v1.67.0 // indirect 53 | gopkg.in/yaml.v2 v2.4.0 // indirect 54 | gopkg.in/yaml.v3 v3.0.1 // indirect 55 | pault.ag/go/debian v0.18.0 // indirect 56 | pault.ag/go/topsort v0.1.1 // indirect 57 | ) 58 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/cactus/go-statsd-client/v5 v5.1.0 h1:sbbdfIl9PgisjEoXzvXI1lwUKWElngsjJKaZeC021P4= 2 | github.com/cactus/go-statsd-client/v5 v5.1.0/go.mod h1:COEvJ1E+/E2L4q6QE5CkjWPi4eeDw9maJBMIuMPBZbY= 3 | github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= 4 | github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= 5 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 6 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 7 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= 8 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 9 | github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= 10 | github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= 11 | github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= 12 | github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= 13 | github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= 14 | github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= 15 | github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= 16 | github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= 17 | github.com/gofrs/uuid/v5 v5.3.1 h1:aPx49MwJbekCzOyhZDjJVb0hx3A0KLjlbLx6p2gY0p0= 18 | github.com/gofrs/uuid/v5 v5.3.1/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= 19 | github.com/gofrs/uuid/v5 v5.3.2 h1:2jfO8j3XgSwlz/wHqemAEugfnTlikAYHhnqQ8Xh4fE0= 20 | github.com/gofrs/uuid/v5 v5.3.2/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= 21 | github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 22 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= 23 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 24 | github.com/google/go-github/v68 v68.0.0 h1:ZW57zeNZiXTdQ16qrDiZ0k6XucrxZ2CGmoTvcCyQG6s= 25 | github.com/google/go-github/v68 v68.0.0/go.mod h1:K9HAUBovM2sLwM408A18h+wd9vqdLOEqTUCbnRIcx68= 26 | github.com/google/go-github/v72 v72.0.0 h1:FcIO37BLoVPBO9igQQ6tStsv2asG4IPcYFi655PPvBM= 27 | github.com/google/go-github/v72 v72.0.0/go.mod h1:WWtw8GMRiL62mvIquf1kO3onRHeWWKmK01qdCY8c5fg= 28 | github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= 29 | github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= 30 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= 31 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 32 | github.com/gophercloud/gophercloud/v2 v2.5.0 h1:DubPfC43gsZiGZ9LT1IJflVMm+0rck0ejoPsH8D5rqk= 33 | github.com/gophercloud/gophercloud/v2 v2.5.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk= 34 | github.com/gophercloud/gophercloud/v2 v2.7.0 h1:o0m4kgVcPgHlcXiWAjoVxGd8QCmvM5VU+YM71pFbn0E= 35 | github.com/gophercloud/gophercloud/v2 v2.7.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk= 36 | github.com/gophercloud/utils/v2 v2.0.0-20250106080359-c8704d65b4fa h1:Hc3Dc0KeW5ejxPKbKd1HNvzLUJXT3YLXq6nz2dogrws= 37 | github.com/gophercloud/utils/v2 v2.0.0-20250106080359-c8704d65b4fa/go.mod h1:z7jvB7v6jhL8w58JMuaPV7pGp07PTvOwIL3Vk2stf3M= 38 | github.com/gophercloud/utils/v2 v2.0.0-20250506092640-af27464b6166 h1:U82pil2tMV/BkeWmy3eKT9mTmodSfwOw+Czl/s5eBrw= 39 | github.com/gophercloud/utils/v2 v2.0.0-20250506092640-af27464b6166/go.mod h1:WypkO28BNmRXS8EPyW6I58L3qrBUy3w60schxwx7ZVc= 40 | github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= 41 | github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= 42 | github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= 43 | github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= 44 | github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= 45 | github.com/jpillora/longestcommon v0.0.0-20161227235612-adb9d91ee629 h1:1dSBUfGlorLAua2CRx0zFN7kQsTpE2DQSmr7rrTNgY8= 46 | github.com/jpillora/longestcommon v0.0.0-20161227235612-adb9d91ee629/go.mod h1:mb5nS4uRANwOJSZj8rlCWAfAcGi72GGMIXx+xGOjA7M= 47 | github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= 48 | github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= 49 | github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= 50 | github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= 51 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 52 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 53 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 54 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 55 | github.com/machinebox/progress v0.2.0 h1:7z8+w32Gy1v8S6VvDoOPPBah3nLqdKjr3GUly18P8Qo= 56 | github.com/machinebox/progress v0.2.0/go.mod h1:hl4FywxSjfmkmCrersGhmJH7KwuKl+Ueq9BXkOny+iE= 57 | github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= 58 | github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= 59 | github.com/majewsky/schwift/v2 v2.0.0 h1:Rgzv/18yMAej3bBZWoxYmS2lZMiCKD6P451dU8TyQtE= 60 | github.com/majewsky/schwift/v2 v2.0.0/go.mod h1:qqN4N7s2+jhwebBgxFZm/e2NOqDzNphwb7SnAIB5G+4= 61 | github.com/matryer/is v1.4.1 h1:55ehd8zaGABKLXQUe2awZ99BD/PTc2ls+KV/dXphgEQ= 62 | github.com/matryer/is v1.4.1/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= 63 | github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= 64 | github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= 65 | github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= 66 | github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= 67 | github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= 68 | github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= 69 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 70 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= 71 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 72 | github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= 73 | github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= 74 | github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= 75 | github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= 76 | github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= 77 | github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= 78 | github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= 79 | github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= 80 | github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= 81 | github.com/sapcc/go-api-declarations v1.13.2 h1:dPYYsjwKGObSAm6+K+dYCiLQWunYuWkywlZnuXfjsmk= 82 | github.com/sapcc/go-api-declarations v1.13.2/go.mod h1:83R3hTANhuRXt/pXDby37IJetw8l7DG41s33Tp9NXxI= 83 | github.com/sapcc/go-api-declarations v1.15.0 h1:41i1YnHaUq6HGzXtuCd5NqjJsWl8Pav3MSfMlPOYJuQ= 84 | github.com/sapcc/go-api-declarations v1.15.0/go.mod h1:WIT/BvApD9klcDb9aQbNgOr6sx0hGwd8NUnmGhNVuQQ= 85 | github.com/sapcc/go-bits v0.0.0-20250214102218-9d3698f376b3 h1:BfQBVOSy60b+fbM8Bz13/xZkya24CbDEMph8tcPE6yU= 86 | github.com/sapcc/go-bits v0.0.0-20250214102218-9d3698f376b3/go.mod h1:WQfpJRe9wMLwrQqmamuK2Mf4BSj3YE0vIl1ryUOIzXk= 87 | github.com/sapcc/go-bits v0.0.0-20250519145235-ff15acf81904 h1:9E9GAKwZBuwQVY0LprUWt44cFMbUUslMyJqm8Mn56l4= 88 | github.com/sapcc/go-bits v0.0.0-20250519145235-ff15acf81904/go.mod h1:LqJWSwArb4faPLSyPE2B8guOf0f/taCOZRS1g4qQWHA= 89 | github.com/sapcc/swift-http-import v0.0.0-20250110100603-5251131bfffe h1:sxNWv2cQQ+O8Tgn/BeVO5qTSdJ5/dyocSXmHhyTgqRQ= 90 | github.com/sapcc/swift-http-import v0.0.0-20250110100603-5251131bfffe/go.mod h1:UkkQ4uh+QFMVUZekDN92CTp8I2flLsbig/S/RXab+rU= 91 | github.com/sapcc/swift-http-import v0.0.0-20250519153110-b7bf408ce850 h1:2t7VjRpqbEWe6m9s8WIu6ot93gEAQsr85OMhZoDw2wU= 92 | github.com/sapcc/swift-http-import v0.0.0-20250519153110-b7bf408ce850/go.mod h1:DYK9DRBSyqF+YgNakYID6oiWOlXymD0bVzYCOX0ANjk= 93 | github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= 94 | github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= 95 | github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= 96 | github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= 97 | github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= 98 | github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= 99 | github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= 100 | github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= 101 | github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= 102 | github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= 103 | github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= 104 | github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= 105 | github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= 106 | github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= 107 | github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= 108 | github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= 109 | github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= 110 | github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= 111 | github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= 112 | github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= 113 | github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= 114 | github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= 115 | github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= 116 | github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= 117 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 118 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= 119 | github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= 120 | github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= 121 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 122 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= 123 | github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= 124 | github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= 125 | github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 126 | github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= 127 | github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= 128 | github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= 129 | github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= 130 | github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= 131 | github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= 132 | go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= 133 | go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= 134 | go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= 135 | go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= 136 | golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= 137 | golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= 138 | golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= 139 | golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= 140 | golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= 141 | golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= 142 | golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= 143 | golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= 144 | golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= 145 | golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= 146 | golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= 147 | golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= 148 | golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= 149 | golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= 150 | golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= 151 | golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= 152 | golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= 153 | golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= 154 | golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= 155 | golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 156 | golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= 157 | golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= 158 | golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= 159 | golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= 160 | golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= 161 | golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= 162 | golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= 163 | golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= 164 | golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= 165 | golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= 166 | golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= 167 | golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= 168 | golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= 169 | golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= 170 | golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= 171 | golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= 172 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 173 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 174 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= 175 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 176 | gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= 177 | gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= 178 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= 179 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= 180 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 181 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 182 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 183 | pault.ag/go/debian v0.18.0 h1:nr0iiyOU5QlG1VPnhZLNhnCcHx58kukvBJp+dvaM6CQ= 184 | pault.ag/go/debian v0.18.0/go.mod h1:JFl0XWRCv9hWBrB5MDDZjA5GSEs1X3zcFK/9kCNIUmE= 185 | pault.ag/go/topsort v0.1.1 h1:L0QnhUly6LmTv0e3DEzbN2q6/FGgAcQvaEw65S53Bg4= 186 | pault.ag/go/topsort v0.1.1/go.mod h1:r1kc/L0/FZ3HhjezBIPaNVhkqv8L0UJ9bxRuHRVZ0q4= 187 | -------------------------------------------------------------------------------- /pkg/backup.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "crypto/md5" 7 | "crypto/sha256" 8 | "encoding/hex" 9 | "encoding/json" 10 | "fmt" 11 | "io" 12 | "os" 13 | "runtime" 14 | "strconv" 15 | "strings" 16 | "sync" 17 | "time" 18 | 19 | "github.com/google/uuid" 20 | "github.com/gophercloud/gophercloud/v2" 21 | "github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/backups" 22 | "github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes" 23 | "github.com/gophercloud/gophercloud/v2/openstack/image/v2/imagedata" 24 | "github.com/gophercloud/gophercloud/v2/openstack/image/v2/images" 25 | "github.com/gophercloud/gophercloud/v2/openstack/objectstorage/v1/containers" 26 | "github.com/gophercloud/gophercloud/v2/openstack/objectstorage/v1/objects" 27 | backups_utils "github.com/gophercloud/utils/v2/openstack/blockstorage/v3/backups" 28 | images_utils "github.com/gophercloud/utils/v2/openstack/image/v2/images" 29 | "github.com/klauspost/compress/zlib" 30 | "github.com/machinebox/progress" 31 | "github.com/spf13/cobra" 32 | "github.com/spf13/viper" 33 | ) 34 | 35 | var ( 36 | waitForBackupSec float64 37 | ) 38 | 39 | const ( 40 | backupChunk = 52428800 41 | sha256chunk = 32768 42 | compressionLevel = 6 // comply with default python level 6 43 | backupTimeFormat = "20060102150405" 44 | ) 45 | 46 | type chunk struct { 47 | wg *sync.WaitGroup 48 | i int 49 | path string 50 | containerName string 51 | objClient *gophercloud.ServiceClient 52 | reader *progress.Reader 53 | meta *metadata 54 | sha256meta *sha256file 55 | contChan chan bool 56 | limitChan chan struct{} 57 | errChan chan error 58 | } 59 | 60 | func createBackupSpeed(ctx context.Context, client *gophercloud.ServiceClient, backup *backups.Backup) { 61 | if client != nil { 62 | container, err := containers.Get(ctx, client, backup.Container, nil).Extract() 63 | if err != nil { 64 | log.Printf("Failed to detect a backup container size: %s", err) 65 | return 66 | } 67 | t := backup.UpdatedAt.Sub(backup.CreatedAt) 68 | log.Printf("Time to create a backup: %s", t) 69 | size := float64(container.BytesUsed / (1024 * 1024)) 70 | log.Printf("Size of the backup: %.2f Mb", size) 71 | log.Printf("Speed of the backup creation: %.2f Mb/sec", size/t.Seconds()) 72 | } 73 | } 74 | 75 | func waitForBackup(ctx context.Context, client *gophercloud.ServiceClient, id string, secs float64) (*backups.Backup, error) { 76 | var backup *backups.Backup 77 | var err error 78 | err = NewBackoff(int(secs), backoffFactor, backoffMaxInterval).WaitFor(func() (bool, error) { 79 | backup, err = backups.Get(ctx, client, id).Extract() 80 | if err != nil { 81 | return false, err 82 | } 83 | 84 | log.Printf("Intermediate backup status: %s", backup.Status) 85 | if backup.Status == "available" { 86 | return true, nil 87 | } 88 | 89 | if strings.Contains(backup.Status, "error") { 90 | return false, fmt.Errorf("intermediate backup status is %q", backup.Status) 91 | } 92 | 93 | // continue status checks 94 | return false, nil 95 | }) 96 | 97 | return backup, err 98 | } 99 | 100 | // calculate sha256 hashes in parallel. 101 | func calcSha256Hash(myChunk []byte, sha256meta *sha256file, i int, done chan struct{}) { 102 | var length = len(myChunk) 103 | var hashes int 104 | if n, mod := length/sha256chunk, length%sha256chunk; mod > 0 { 105 | hashes = n + 1 106 | } else { 107 | hashes = n 108 | } 109 | 110 | h := make([][32]byte, hashes) 111 | sha256calc := func(j int, wg *sync.WaitGroup) { 112 | defer wg.Done() 113 | start := j * sha256chunk 114 | end := start + sha256chunk 115 | if end > length { 116 | end = length 117 | } 118 | h[j] = sha256.Sum256(myChunk[start:end]) 119 | } 120 | 121 | wg := &sync.WaitGroup{} 122 | for j := 0; j < hashes; j++ { 123 | wg.Add(1) 124 | go sha256calc(j, wg) 125 | } 126 | 127 | wg.Wait() 128 | 129 | sha256meta.Lock() 130 | sha256meta.Sha256s[i] = h 131 | sha256meta.Unlock() 132 | 133 | close(done) 134 | } 135 | 136 | // calculate md5 hashes. 137 | func calcMd5Hash(myChunk []byte, meta *metadata, i int, done chan struct{}, chunkPath string) { 138 | hash := md5.Sum(myChunk) 139 | object := backupChunkEntry{ 140 | chunkPath: { 141 | "compression": "zlib", 142 | "length": len(myChunk), 143 | "md5": hex.EncodeToString(hash[:]), 144 | "offset": (i - 1) * backupChunk, 145 | }, 146 | } 147 | meta.Lock() 148 | meta.Objects[i] = object 149 | meta.Unlock() 150 | 151 | close(done) 152 | } 153 | 154 | func (c *chunk) process(ctx context.Context) { 155 | defer func() { 156 | c.wg.Done() 157 | // release the queue 158 | <-c.limitChan 159 | }() 160 | 161 | myChunk, err := io.ReadAll(io.LimitReader(c.reader, backupChunk)) 162 | if err != nil { 163 | if err != io.EOF { 164 | c.errChan <- fmt.Errorf("failed to read file: %s", err) 165 | return 166 | } 167 | } 168 | if len(myChunk) == 0 { 169 | // stop further reading, no data 170 | c.contChan <- false 171 | return 172 | } else if err == io.EOF { 173 | // EOF, but we still need to process some data 174 | c.contChan <- false 175 | } else { 176 | // allow next go routine to process the input 177 | c.contChan <- true 178 | } 179 | 180 | chunkPath := fmt.Sprintf("%s-%05d", c.path, c.i) 181 | 182 | // calculate md5 hash while we upload chunks 183 | md5done := make(chan struct{}) 184 | go calcMd5Hash(myChunk, c.meta, c.i, md5done, chunkPath) 185 | 186 | // calculate sha256 hash while we upload chunks 187 | sha256done := make(chan struct{}) 188 | go calcSha256Hash(myChunk, c.sha256meta, c.i, sha256done) 189 | 190 | rb := new(bytes.Buffer) 191 | zf, err := zlib.NewWriterLevel(rb, compressionLevel) 192 | if err != nil { 193 | c.errChan <- fmt.Errorf("failed to set zlib %d compression level: %s", compressionLevel, err) 194 | return 195 | } 196 | _, err = zf.Write(myChunk) 197 | if err != nil { 198 | c.errChan <- fmt.Errorf("failed to write zlib compressed data: %s", err) 199 | return 200 | } 201 | err = zf.Close() 202 | if err != nil { 203 | c.errChan <- fmt.Errorf("failed to flush and close zlib compressed data: %s", err) 204 | return 205 | } 206 | // free up the compressor 207 | zf.Reset(nil) 208 | 209 | // TODO: check if the remote object exists 210 | // upload and retry when upload fails 211 | var retries = 5 212 | var sleep = 15 * time.Second 213 | for j := 0; j < retries; j++ { 214 | uploadOpts := objects.CreateOpts{ 215 | // this is needed for retries 216 | // bytes.Buffer doesn't have UnreadAll method 217 | Content: bytes.NewReader(rb.Bytes()), 218 | } 219 | err = objects.Create(ctx, c.objClient, c.containerName, chunkPath, uploadOpts).Err 220 | if err != nil { 221 | log.Printf("failed to upload %s/%s data in %d retry: %s: sleeping for %0.f seconds", c.containerName, chunkPath, j, err, sleep.Seconds()) 222 | time.Sleep(sleep) 223 | continue 224 | } 225 | break 226 | } 227 | // free up the buffer 228 | rb.Reset() 229 | 230 | if err != nil { 231 | c.errChan <- fmt.Errorf("failed to upload %s/%s data: %s", c.containerName, chunkPath, err) 232 | return 233 | } 234 | 235 | <-md5done 236 | <-sha256done 237 | 238 | //nolint:ineffassign 239 | myChunk = nil 240 | } 241 | 242 | func uploadBackup(ctx context.Context, srcImgClient, srcObjClient, dstObjClient, dstVolClient *gophercloud.ServiceClient, backupName, containerName, imageID, az string, properties map[string]string, size int, threads uint) (*backups.Backup, error) { 243 | imageData, err := getSourceData(ctx, srcImgClient, srcObjClient, imageID) 244 | if err != nil { 245 | return nil, err 246 | } 247 | defer imageData.readCloser.Close() 248 | 249 | if len(properties) > 0 { 250 | imageData.properties = properties 251 | } 252 | 253 | if size == 0 { 254 | if imageData.minDisk == 0 { 255 | return nil, fmt.Errorf("target volume size cannot be zero") 256 | } 257 | size = imageData.minDisk 258 | } 259 | 260 | if imageData.minDisk > size { 261 | return nil, fmt.Errorf("cannot create a backup with the size less than the source image min_disk=%d > %d", imageData.minDisk, size) 262 | } 263 | 264 | progressReader := progress.NewReader(imageData.readCloser) 265 | go func() { 266 | var s int64 267 | for p := range progress.NewTicker(context.Background(), progressReader, imageData.size, 1*time.Second) { 268 | s = p.N() - s 269 | speed := s / (1024 * 1024) 270 | s = p.N() 271 | log.Printf("Progress: %d/%d (%.2f%%), speed: %d MiB/sec, remaining: %s", p.N(), p.Size(), p.Percent(), speed, p.Remaining().Round(time.Second)) 272 | } 273 | }() 274 | 275 | var volumeID, backupID string 276 | // generate a new volume UUID 277 | if v, err := uuid.NewUUID(); err != nil { 278 | return nil, fmt.Errorf("failed to generate a new volume UUID: %s", err) 279 | } else { 280 | volumeID = v.String() 281 | } 282 | 283 | // generate a new backup UUID 284 | if v, err := uuid.NewUUID(); err != nil { 285 | return nil, fmt.Errorf("failed to generate a new backup UUID: %s", err) 286 | } else { 287 | backupID = v.String() 288 | } 289 | 290 | path := fmt.Sprintf("volume_%s/%s/az_%s_backup_%s", volumeID, time.Now().UTC().Format(backupTimeFormat), az, backupID) 291 | sha256meta := &sha256file{ 292 | VolumeID: volumeID, 293 | BackupID: backupID, 294 | ChunkSize: sha256chunk, 295 | CreatedAt: time.Now().UTC(), 296 | Version: "1.0.0", 297 | Sha256s: make(map[int][][32]byte), 298 | } 299 | 300 | volMeta := volumeMeta{ 301 | Version: 2, 302 | VolumeBaseMeta: volumeBaseMeta{ 303 | Bootable: len(imageData.properties) > 0, 304 | }, 305 | VolumeGlanceMetadata: imageData.properties, 306 | } 307 | jd, err := json.Marshal(volMeta) 308 | if err != nil { 309 | return nil, fmt.Errorf("failed to marshal meta") 310 | } 311 | 312 | meta := &metadata{ 313 | CreatedAt: sha256meta.CreatedAt, 314 | Version: sha256meta.Version, 315 | VolumeID: sha256meta.VolumeID, 316 | VolumeMeta: string(jd), 317 | Objects: make(map[int]backupChunkEntry), 318 | } 319 | 320 | // create container 321 | _, err = containers.Create(ctx, dstObjClient, containerName, nil).Extract() 322 | if err != nil { 323 | return nil, fmt.Errorf("failed to create a %q container: %s", containerName, err) 324 | } 325 | 326 | var i int 327 | errChan := make(chan error, 1) 328 | contChan := make(chan bool, 1) 329 | limitChan := make(chan struct{}, threads) 330 | wg := &sync.WaitGroup{} 331 | 332 | err = func() error { 333 | // start 334 | contChan <- true 335 | for { 336 | select { 337 | case err := <-errChan: 338 | return err 339 | case do := <-contChan: 340 | if !do { 341 | return nil 342 | } 343 | i++ 344 | wg.Add(1) 345 | // consume the queue 346 | limitChan <- struct{}{} 347 | c := &chunk{ 348 | wg, 349 | i, 350 | path, 351 | containerName, 352 | dstObjClient, 353 | progressReader, 354 | meta, 355 | sha256meta, 356 | contChan, 357 | limitChan, 358 | errChan, 359 | } 360 | go c.process(ctx) 361 | } 362 | } 363 | }() 364 | if err != nil { 365 | return nil, err 366 | } 367 | 368 | log.Printf("Uploading the rest and the metadata") 369 | wg.Wait() 370 | imageData.readCloser.Close() 371 | 372 | // run garbage collector before processing the potential memory consuming JSON marshalling 373 | runtime.GC() 374 | 375 | // write _sha256file 376 | buf, err := json.MarshalIndent(sha256meta, "", " ") 377 | if err != nil { 378 | return nil, fmt.Errorf("failed to marshal sha256meta: %s", err) 379 | } 380 | sha256meta = nil 381 | 382 | createOpts := objects.CreateOpts{ 383 | Content: bytes.NewReader(buf), 384 | } 385 | p := path + "_sha256file" 386 | err = objects.Create(ctx, dstObjClient, containerName, p, createOpts).Err 387 | if err != nil { 388 | return nil, fmt.Errorf("failed to upload %s/%s data: %s", containerName, p, err) 389 | } 390 | // free up the heap 391 | //nolint:ineffassign 392 | buf = nil 393 | runtime.GC() 394 | 395 | // write _metadata 396 | buf, err = json.MarshalIndent(meta, "", " ") 397 | if err != nil { 398 | return nil, fmt.Errorf("failed to marshal meta: %s", err) 399 | } 400 | meta = nil 401 | 402 | createOpts = objects.CreateOpts{ 403 | Content: bytes.NewReader(buf), 404 | } 405 | p = path + "_metadata" 406 | err = objects.Create(ctx, dstObjClient, containerName, p, createOpts).Err 407 | if err != nil { 408 | return nil, fmt.Errorf("failed to upload %s/%s data: %s", containerName, p, err) 409 | } 410 | // free up the heap 411 | //nolint:ineffassign 412 | buf = nil 413 | runtime.GC() 414 | 415 | // import the backup 416 | service := "cinder.backup.drivers.swift.SwiftBackupDriver" 417 | backupImport := backups.ImportBackup{ 418 | ID: backupID, 419 | DisplayName: &backupName, 420 | VolumeID: volumeID, 421 | AvailabilityZone: &az, 422 | UpdatedAt: time.Now().UTC(), 423 | ServiceMetadata: &path, 424 | Size: &size, 425 | ObjectCount: &i, 426 | Container: &containerName, 427 | Service: &service, 428 | CreatedAt: time.Now().UTC(), 429 | DataTimestamp: time.Now().UTC(), 430 | } 431 | 432 | backupURL, err := json.Marshal(backupImport) 433 | if err != nil { 434 | return nil, fmt.Errorf("failed to marshal backupURL: %s", err) 435 | } 436 | 437 | options := backups.ImportOpts{ 438 | BackupService: service, 439 | BackupURL: backupURL, 440 | } 441 | importResponse, err := backups.Import(ctx, dstVolClient, options).Extract() 442 | if err != nil { 443 | return nil, fmt.Errorf("failed to import the backup: %s", err) 444 | } 445 | 446 | backupObj, err := waitForBackup(ctx, dstVolClient, importResponse.ID, waitForBackupSec) 447 | if err != nil { 448 | return nil, fmt.Errorf("failed to wait for backup status: %s", err) 449 | } 450 | 451 | measureTime("Backup upload time: %s") 452 | 453 | return backupObj, nil 454 | } 455 | 456 | func backupToVolume(ctx context.Context, dstVolClient *gophercloud.ServiceClient, backupObj *backups.Backup, volumeName, volumeType, az string) (*volumes.Volume, error) { 457 | reauthClient(ctx, dstVolClient, "backupToVolume") 458 | 459 | // create a volume from a backup 460 | dstVolClient.Microversion = "3.47" 461 | volOpts := volumes.CreateOpts{ 462 | Name: volumeName, 463 | Size: backupObj.Size, 464 | Description: fmt.Sprintf("a volume restored from a %s backup", backupObj.ID), 465 | AvailabilityZone: az, 466 | BackupID: backupObj.ID, 467 | VolumeType: volumeType, 468 | } 469 | 470 | newVolume, err := volumes.Create(ctx, dstVolClient, volOpts, nil).Extract() 471 | if err != nil { 472 | return nil, fmt.Errorf("failed to create a source volume from a backup: %s", err) 473 | } 474 | 475 | newVolume, err = waitForVolume(ctx, dstVolClient, newVolume.ID, waitForVolumeSec) 476 | if err != nil { 477 | return nil, fmt.Errorf("failed to wait for a volume: %s", err) 478 | } 479 | 480 | return newVolume, nil 481 | } 482 | 483 | type imageSource struct { 484 | readCloser io.ReadCloser 485 | size int64 486 | properties map[string]string 487 | minDisk int 488 | } 489 | 490 | func getSourceData(ctx context.Context, srcImgClient, srcObjClient *gophercloud.ServiceClient, imageID string) (*imageSource, error) { 491 | // read file 492 | file, err := os.Open(imageID) 493 | if err == nil { 494 | if fi, err := file.Stat(); err == nil { 495 | return &imageSource{file, fi.Size(), nil, 0}, nil 496 | } else { 497 | log.Printf("Failed to get %q filename size: %s", imageID, err) 498 | } 499 | return &imageSource{file, 0, nil, 0}, nil 500 | } 501 | 502 | log.Printf("Cannot read %q file: %s: fallback to Swift URL as a source", imageID, err) 503 | // read Glance image metadata 504 | image, err := images.Get(ctx, srcImgClient, imageID).Extract() 505 | if err != nil { 506 | return nil, fmt.Errorf("error getting the source image: %s", err) 507 | } 508 | properties := expandImageProperties(image.Properties) 509 | 510 | if srcObjClient != nil { 511 | // read Glance image Swift source 512 | resp := objects.Download(ctx, srcObjClient, fmt.Sprintf("glance_%s", imageID), imageID, nil) 513 | if resp.Err == nil { 514 | if size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64); size > 0 { 515 | return &imageSource{ 516 | resp.Body, 517 | size, 518 | properties, 519 | image.MinDiskGigabytes, 520 | }, nil 521 | } else if err != nil { 522 | log.Printf("Failed to detect %q image size: %s: fallback to %d", imageID, err, image.SizeBytes) 523 | } else { 524 | log.Printf("Failed to detect %q image size: %d is <= 0: fallback to %d", imageID, size, image.SizeBytes) 525 | } 526 | return &imageSource{ 527 | resp.Body, 528 | image.SizeBytes, 529 | properties, 530 | image.MinDiskGigabytes, 531 | }, nil 532 | } 533 | log.Printf("Cannot read Swift URL as a source: %s, fallback to Glance as a source", resp.Err) 534 | } 535 | 536 | // read Glance image 537 | readCloser, err := imagedata.Download(ctx, srcImgClient, imageID).Extract() 538 | if err != nil { 539 | return nil, fmt.Errorf("error getting the source image reader: %s", err) 540 | } 541 | 542 | return &imageSource{ 543 | readCloser, 544 | image.SizeBytes, 545 | properties, 546 | image.MinDiskGigabytes, 547 | }, nil 548 | } 549 | 550 | // BackupCmd represents the backup command. 551 | var BackupCmd = &cobra.Command{ 552 | Use: "backup", 553 | } 554 | 555 | var BackupUploadCmd = &cobra.Command{ 556 | Use: "upload ", 557 | Args: cobra.ExactArgs(1), 558 | Short: "Upload an image into a backup", 559 | PreRunE: func(cmd *cobra.Command, args []string) error { 560 | if err := parseTimeoutArgs(); err != nil { 561 | return err 562 | } 563 | return viper.BindPFlags(cmd.Flags()) 564 | }, 565 | RunE: func(cmd *cobra.Command, args []string) error { 566 | image := args[0] 567 | 568 | toVolumeName := viper.GetString("to-volume-name") 569 | toBackupName := viper.GetString("to-backup-name") 570 | toContainerName := viper.GetString("to-container-name") 571 | size := viper.GetUint("volume-size") 572 | threads := viper.GetUint("threads") 573 | toAZ := viper.GetString("to-az") 574 | toVolumeType := viper.GetString("to-volume-type") 575 | restoreVolume := viper.GetBool("restore-volume") 576 | properties := viper.GetStringMapString("property") 577 | 578 | if threads == 0 { 579 | return fmt.Errorf("an amount of threads cannot be zero") 580 | } 581 | 582 | if toContainerName == "" { 583 | return fmt.Errorf("swift container name connot be empty") 584 | } 585 | 586 | // source and destination parameters 587 | loc, err := getSrcAndDst("") 588 | if err != nil { 589 | return err 590 | } 591 | 592 | srcProvider, err := newOpenStackClient(cmd.Context(), loc.Src) 593 | if err != nil { 594 | return fmt.Errorf("failed to create a source OpenStack client: %s", err) 595 | } 596 | 597 | srcObjectClient, err := newObjectStorageV1Client(srcProvider, loc.Src.Region) 598 | if err != nil { 599 | // don't fail, will use Glance client instead 600 | log.Printf("Failed to create source object storage client: %s", err) 601 | } 602 | 603 | srcImageClient, err := newGlanceV2Client(srcProvider, loc.Src.Region) 604 | if err != nil { 605 | return fmt.Errorf("failed to create source image client: %s", err) 606 | } 607 | 608 | // resolve image name to an ID 609 | if v, err := images_utils.IDFromName(cmd.Context(), srcImageClient, image); err == nil { 610 | image = v 611 | } else if err, ok := err.(gophercloud.ErrMultipleResourcesFound); ok { 612 | return err 613 | } 614 | 615 | dstProvider, err := newOpenStackClient(cmd.Context(), loc.Dst) 616 | if err != nil { 617 | return fmt.Errorf("failed to create a destination OpenStack client: %s", err) 618 | } 619 | 620 | dstVolumeClient, err := newBlockStorageV3Client(dstProvider, loc.Dst.Region) 621 | if err != nil { 622 | return fmt.Errorf("failed to create destination volume client: %s", err) 623 | } 624 | 625 | dstObjectClient, err := newObjectStorageV1Client(dstProvider, loc.Dst.Region) 626 | if err != nil { 627 | return fmt.Errorf("failed to create destination object storage client, detailed image clone statistics will be unavailable: %s", err) 628 | } 629 | 630 | err = checkAvailabilityZone(cmd.Context(), dstVolumeClient, "", &toAZ, &loc) 631 | if err != nil { 632 | return err 633 | } 634 | 635 | defer measureTime() 636 | 637 | backup, err := uploadBackup(cmd.Context(), srcImageClient, srcObjectClient, dstObjectClient, dstVolumeClient, toBackupName, toContainerName, image, toAZ, properties, int(size), threads) 638 | if err != nil { 639 | return err 640 | } 641 | 642 | log.Printf("Target backup name is %q (id: %q)", backup.Name, backup.ID) 643 | 644 | if !restoreVolume { 645 | return nil 646 | } 647 | 648 | // reauth before the long-time task 649 | dstVolumeClient.TokenID = "" 650 | dstVolume, err := backupToVolume(cmd.Context(), dstVolumeClient, backup, toVolumeName, toVolumeType, toAZ) 651 | if err != nil { 652 | return err 653 | } 654 | 655 | log.Printf("Target volume name is %q (id: %q)", dstVolume.Name, dstVolume.ID) 656 | 657 | return nil 658 | 659 | }, 660 | } 661 | 662 | var BackupRestoreCmd = &cobra.Command{ 663 | Use: "restore ", 664 | Args: cobra.ExactArgs(1), 665 | Short: "Restore a backup into a volume", 666 | PreRunE: func(cmd *cobra.Command, args []string) error { 667 | if err := parseTimeoutArgs(); err != nil { 668 | return err 669 | } 670 | return viper.BindPFlags(cmd.Flags()) 671 | }, 672 | RunE: func(cmd *cobra.Command, args []string) error { 673 | backup := args[0] 674 | 675 | toVolumeName := viper.GetString("to-volume-name") 676 | size := viper.GetUint("volume-size") 677 | toAZ := viper.GetString("to-az") 678 | toVolumeType := viper.GetString("to-volume-type") 679 | 680 | // source and destination parameters 681 | loc, err := getSrcAndDst("") 682 | if err != nil { 683 | return err 684 | } 685 | 686 | dstProvider, err := newOpenStackClient(cmd.Context(), loc.Dst) 687 | if err != nil { 688 | return fmt.Errorf("failed to create a destination OpenStack client: %s", err) 689 | } 690 | 691 | dstVolumeClient, err := newBlockStorageV3Client(dstProvider, loc.Dst.Region) 692 | if err != nil { 693 | return fmt.Errorf("failed to create destination volume client: %s", err) 694 | } 695 | 696 | err = checkAvailabilityZone(cmd.Context(), dstVolumeClient, "", &toAZ, &loc) 697 | if err != nil { 698 | return err 699 | } 700 | 701 | // resolve backup name to an ID 702 | if v, err := backups_utils.IDFromName(cmd.Context(), dstVolumeClient, backup); err == nil { 703 | backup = v 704 | } else if err, ok := err.(gophercloud.ErrMultipleResourcesFound); ok { 705 | return err 706 | } 707 | 708 | backupObj, err := waitForBackup(cmd.Context(), dstVolumeClient, backup, waitForBackupSec) 709 | if err != nil { 710 | return fmt.Errorf("failed to wait for backup status: %s", err) 711 | } 712 | 713 | if backupObj.Size == 0 { 714 | return fmt.Errorf("target volume size must be specified") 715 | } 716 | 717 | if size > 0 { 718 | if int(size) < backupObj.Size { 719 | return fmt.Errorf("target volume size must not be less than %d", backupObj.Size) 720 | } 721 | backupObj.Size = int(size) 722 | } 723 | 724 | defer measureTime() 725 | 726 | dstVolume, err := backupToVolume(cmd.Context(), dstVolumeClient, backupObj, toVolumeName, toVolumeType, toAZ) 727 | if err != nil { 728 | return err 729 | } 730 | 731 | log.Printf("Target volume name is %q (id: %q)", dstVolume.Name, dstVolume.ID) 732 | 733 | return nil 734 | }, 735 | } 736 | 737 | func init() { 738 | initBackupCmdFlags() 739 | BackupCmd.AddCommand(BackupUploadCmd) 740 | BackupCmd.AddCommand(BackupRestoreCmd) 741 | BackupCmd.AddCommand(BackupCloneCmd) 742 | RootCmd.AddCommand(BackupCmd) 743 | } 744 | 745 | func initBackupCmdFlags() { 746 | BackupUploadCmd.Flags().StringP("to-container-name", "", "", "destination backup Swift container name") 747 | BackupUploadCmd.Flags().StringP("to-az", "", "", "destination availability zone") 748 | BackupUploadCmd.Flags().UintP("threads", "t", 1, "an amount of parallel threads") 749 | BackupUploadCmd.Flags().BoolP("restore-volume", "", false, "restore a volume after upload") 750 | BackupUploadCmd.Flags().StringP("to-volume-name", "", "", "target volume name") 751 | BackupUploadCmd.Flags().StringP("to-backup-name", "", "", "target backup name") 752 | BackupUploadCmd.Flags().StringP("to-volume-type", "", "", "destination volume type") 753 | BackupUploadCmd.Flags().UintP("volume-size", "b", 0, "target volume size (must not be less than original image virtual size)") 754 | BackupUploadCmd.Flags().StringToStringP("property", "p", nil, "image property for the target volume") 755 | 756 | BackupRestoreCmd.Flags().StringP("to-volume-name", "", "", "destination backup name") 757 | BackupRestoreCmd.Flags().StringP("to-az", "", "", "destination availability zone") 758 | BackupRestoreCmd.Flags().StringP("to-volume-type", "", "", "destination volume type") 759 | BackupRestoreCmd.Flags().UintP("volume-size", "b", 0, "target volume size") 760 | 761 | BackupCloneCmd.Flags().UintP("threads", "t", 1, "an amount of parallel threads") 762 | BackupCloneCmd.Flags().StringP("to-backup-name", "", "", "destination backup name") 763 | BackupCloneCmd.Flags().StringP("to-container-name", "", "", "destination backup container name") 764 | } 765 | -------------------------------------------------------------------------------- /pkg/backup_clone.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "os" 8 | "os/signal" 9 | "path/filepath" 10 | "regexp" 11 | "sync" 12 | "syscall" 13 | 14 | "github.com/google/uuid" 15 | "github.com/gophercloud/gophercloud/v2" 16 | "github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/backups" 17 | backups_utils "github.com/gophercloud/utils/v2/openstack/blockstorage/v3/backups" 18 | "github.com/majewsky/schwift/v2/gopherschwift" 19 | "github.com/sapcc/go-bits/logg" 20 | "github.com/sapcc/go-bits/secrets" 21 | "github.com/sapcc/swift-http-import/pkg/actors" 22 | "github.com/sapcc/swift-http-import/pkg/objects" 23 | "github.com/spf13/cobra" 24 | "github.com/spf13/viper" 25 | ) 26 | 27 | func prepareSwiftConfig(ctx context.Context, srcObjectClient, dstObjectClient *gophercloud.ServiceClient, srcContainerName, dstContainerName, prefix string, threads uint) (*objects.Configuration, error) { 28 | srcSchwift, err := gopherschwift.Wrap(srcObjectClient, &gopherschwift.Options{ 29 | UserAgent: srcObjectClient.UserAgent.Join(), 30 | }) 31 | if err != nil { 32 | return nil, err 33 | } 34 | srcContainer, err := srcSchwift.Container(srcContainerName).EnsureExists(ctx) 35 | if err != nil { 36 | return nil, err 37 | } 38 | 39 | dstSchwift, err := gopherschwift.Wrap(dstObjectClient, &gopherschwift.Options{ 40 | UserAgent: dstObjectClient.UserAgent.Join(), 41 | }) 42 | if err != nil { 43 | return nil, err 44 | } 45 | dstContainer, err := dstSchwift.Container(dstContainerName).EnsureExists(ctx) 46 | if err != nil { 47 | return nil, err 48 | } 49 | 50 | source := objects.SwiftLocation{ 51 | Account: srcSchwift, 52 | Container: srcContainer, 53 | ContainerName: secrets.FromEnv(srcContainerName), 54 | ObjectNamePrefix: secrets.FromEnv(filepath.Dir(prefix) + "/"), 55 | } 56 | 57 | target := objects.SwiftLocation{ 58 | Account: dstSchwift, 59 | Container: dstContainer, 60 | ContainerName: secrets.FromEnv(dstContainerName), 61 | ObjectNamePrefix: secrets.FromEnv(filepath.Dir(prefix) + "/"), 62 | } 63 | 64 | // TODO: fail, when target file exists 65 | rx, err := regexp.Compile(fmt.Sprintf("%s.*", filepath.Base(prefix))) 66 | if err != nil { 67 | return nil, fmt.Errorf("failed to compile a regexp: %v", err) 68 | } 69 | config := &objects.Configuration{ 70 | Jobs: []*objects.Job{ 71 | { 72 | Source: objects.SourceUnmarshaler{ 73 | Source: &source, 74 | }, 75 | Target: &target, 76 | Matcher: objects.Matcher{ 77 | IncludeRx: rx, 78 | }, 79 | }, 80 | }, 81 | } 82 | config.WorkerCounts.Transfer = threads 83 | 84 | return config, nil 85 | } 86 | 87 | func transferObjects(config *objects.Configuration) (int, actors.Stats) { 88 | //setup the Report actor 89 | reportChan := make(chan actors.ReportEvent) 90 | report := &actors.Report{ 91 | Input: reportChan, 92 | Statsd: config.Statsd, 93 | StartTime: startTime, 94 | } 95 | wgReport := &sync.WaitGroup{} 96 | 97 | //receive SIGINT/SIGTERM signals 98 | sigs := make(chan os.Signal, 1) 99 | signal.Notify(sigs, os.Interrupt, syscall.SIGTERM) 100 | 101 | //setup a context that shuts down all pipeline actors when one of the signals above is received 102 | ctx, cancelFunc := context.WithCancel(context.Background()) 103 | defer cancelFunc() 104 | go func() { 105 | <-sigs 106 | logg.Error("Interrupt received! Shutting down...") 107 | cancelFunc() 108 | }() 109 | 110 | actors.Start(ctx, report, wgReport) 111 | 112 | //do the work 113 | runPipeline(ctx, config, reportChan) 114 | 115 | //shutdown Report actor 116 | close(reportChan) 117 | wgReport.Wait() 118 | 119 | return report.ExitCode, report.Stats() 120 | } 121 | 122 | func runPipeline(ctx context.Context, config *objects.Configuration, report chan<- actors.ReportEvent) { 123 | //start the pipeline actors 124 | var wg sync.WaitGroup 125 | var wgTransfer sync.WaitGroup 126 | queue1 := make(chan objects.File, 10) //will be closed by scraper when it's done 127 | queue2 := make(chan actors.FileInfoForCleaner, 10) //will be closed by us when all transferors are done 128 | actors.Start(ctx, &actors.Scraper{ 129 | Jobs: config.Jobs, 130 | Output: queue1, 131 | Report: report, 132 | }, &wg) 133 | 134 | for i := uint(0); i < config.WorkerCounts.Transfer; i++ { 135 | actors.Start(ctx, &actors.Transferor{ 136 | Input: queue1, 137 | Output: queue2, 138 | Report: report, 139 | }, &wg, &wgTransfer) 140 | } 141 | 142 | actors.Start(ctx, &actors.Cleaner{ 143 | Input: queue2, 144 | Report: report, 145 | }, &wg) 146 | 147 | //wait for transfer phase to finish 148 | wgTransfer.Wait() 149 | //signal to cleaner to start its work 150 | close(queue2) 151 | //wait for remaining workers to finish 152 | wg.Wait() 153 | 154 | // signal.Reset(os.Interrupt, syscall.SIGTERM) 155 | } 156 | 157 | func cloneBackup(ctx context.Context, srcVolumeClient, srcObjectClient, dstVolumeClient, dstObjectClient *gophercloud.ServiceClient, srcBackup *backups.Backup, toBackupName string, toContainerName string, threads uint) (*backups.Backup, error) { 158 | backupExport, err := backups.Export(ctx, srcVolumeClient, srcBackup.ID).Extract() 159 | if err != nil { 160 | return nil, fmt.Errorf("failed to export a %q backup: %s", srcBackup.ID, err) 161 | } 162 | 163 | backupRecord := backups.ImportBackup{} 164 | err = json.Unmarshal(backupExport.BackupURL, &backupRecord) 165 | if err != nil { 166 | return nil, fmt.Errorf("failed to unmarshal a %q backup record: %s", srcBackup.ID, err) 167 | } 168 | 169 | if backupRecord.ObjectCount == nil { 170 | return nil, fmt.Errorf("backup record contains nil object_count") 171 | } 172 | if backupRecord.Container == nil { 173 | return nil, fmt.Errorf("backup record contains nil container") 174 | } 175 | 176 | if toContainerName == "" { 177 | toContainerName = *backupRecord.Container 178 | } 179 | description := fmt.Sprintf("cloned from %q backup (%q project ID)", srcBackup.ID, backupRecord.ProjectID) 180 | backupRecord.DisplayDescription = &description 181 | if toBackupName != "" { 182 | backupRecord.DisplayName = &toBackupName 183 | } 184 | 185 | // TODO: recursive 186 | config, err := prepareSwiftConfig(ctx, srcObjectClient, dstObjectClient, *backupRecord.Container, toContainerName, *backupRecord.ServiceMetadata, threads) 187 | if err != nil { 188 | return nil, err 189 | } 190 | 191 | ret, stats := transferObjects(config) 192 | if ret != 0 { 193 | return nil, fmt.Errorf("error while transferring objects") 194 | } 195 | 196 | expectedObjectsCount := uint64(*backupRecord.ObjectCount) + 2 // + metadata and sha256file 197 | // TODO: stats.FilesFound vs stats.FilesTransferred 198 | if stats.FilesFound != expectedObjectsCount { 199 | return nil, fmt.Errorf("error while transferring objects: an amount of transferred files doesn't correspond to an amount of file in the record: %d != %d", stats.FilesFound, expectedObjectsCount) 200 | } 201 | 202 | // generate a new backup UUID 203 | if v, err := uuid.NewUUID(); err != nil { 204 | return nil, fmt.Errorf("failed to generate a new backup UUID: %s", err) 205 | } else { 206 | backupRecord.ID = v.String() 207 | } 208 | backupRecord.Container = &toContainerName 209 | 210 | backupURL, err := json.Marshal(backupRecord) 211 | if err != nil { 212 | return nil, fmt.Errorf("failed to marshal a backup record: %s", err) 213 | } 214 | 215 | backupImport := backups.ImportOpts{ 216 | BackupService: backupExport.BackupService, 217 | BackupURL: backupURL, 218 | } 219 | 220 | importResponse, err := backups.Import(ctx, dstVolumeClient, backupImport).Extract() 221 | if err != nil { 222 | return nil, fmt.Errorf("failed to import a backup: %s", err) 223 | } 224 | 225 | return waitForBackup(ctx, dstVolumeClient, importResponse.ID, waitForBackupSec) 226 | } 227 | 228 | // BackupCloneCmd represents the backup clone command. 229 | var BackupCloneCmd = &cobra.Command{ 230 | Use: "clone ", 231 | Args: cobra.ExactArgs(1), 232 | Short: "Clone a backup", 233 | PreRunE: func(cmd *cobra.Command, args []string) error { 234 | if err := parseTimeoutArgs(); err != nil { 235 | return err 236 | } 237 | return viper.BindPFlags(cmd.Flags()) 238 | }, 239 | RunE: func(cmd *cobra.Command, args []string) error { 240 | // clone backup 241 | backup := args[0] 242 | 243 | toName := viper.GetString("to-backup-name") 244 | toContainerName := viper.GetString("to-container-name") 245 | threads := viper.GetUint("threads") 246 | 247 | if threads == 0 { 248 | return fmt.Errorf("an amount of threads cannot be zero") 249 | } 250 | 251 | if toContainerName == "" { 252 | return fmt.Errorf("swift container name connot be empty") 253 | } 254 | 255 | // source and destination parameters 256 | loc, err := getSrcAndDst("") 257 | if err != nil { 258 | return err 259 | } 260 | 261 | srcProvider, err := newOpenStackClient(cmd.Context(), loc.Src) 262 | if err != nil { 263 | return fmt.Errorf("failed to create a source OpenStack client: %s", err) 264 | } 265 | 266 | srcVolumeClient, err := newBlockStorageV3Client(srcProvider, loc.Src.Region) 267 | if err != nil { 268 | return fmt.Errorf("failed to create source volume client: %s", err) 269 | } 270 | 271 | srcObjectClient, err := newObjectStorageV1Client(srcProvider, loc.Src.Region) 272 | if err != nil { 273 | return fmt.Errorf("failed to create source object storage client: %s", err) 274 | } 275 | 276 | // resolve volume name to an ID 277 | if v, err := backups_utils.IDFromName(cmd.Context(), srcVolumeClient, backup); err == nil { 278 | backup = v 279 | } else if err, ok := err.(gophercloud.ErrMultipleResourcesFound); ok { 280 | return err 281 | } 282 | 283 | dstProvider, err := newOpenStackClient(cmd.Context(), loc.Dst) 284 | if err != nil { 285 | return fmt.Errorf("failed to create a destination OpenStack client: %s", err) 286 | } 287 | 288 | dstVolumeClient, err := newBlockStorageV3Client(dstProvider, loc.Dst.Region) 289 | if err != nil { 290 | return fmt.Errorf("failed to create destination volume client: %s", err) 291 | } 292 | 293 | dstObjectClient, err := newObjectStorageV1Client(dstProvider, loc.Dst.Region) 294 | if err != nil { 295 | return fmt.Errorf("failed to destination source object storage client: %s", err) 296 | } 297 | 298 | srcBackup, err := waitForBackup(cmd.Context(), srcVolumeClient, backup, waitForBackupSec) 299 | if err != nil { 300 | return fmt.Errorf("failed to wait for a %q backup: %s", backup, err) 301 | } 302 | 303 | if srcBackup.IsIncremental { 304 | return fmt.Errorf("incremental backups are not supported") 305 | } 306 | 307 | defer measureTime() 308 | 309 | dstBackup, err := cloneBackup(cmd.Context(), srcVolumeClient, srcObjectClient, dstVolumeClient, dstObjectClient, srcBackup, toName, toContainerName, threads) 310 | if err != nil { 311 | return err 312 | } 313 | 314 | log.Printf("Migrated target backup name is %q (id: %q)", dstBackup.Name, dstBackup.ID) 315 | 316 | return nil 317 | }, 318 | } 319 | -------------------------------------------------------------------------------- /pkg/backup_types.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "encoding/hex" 5 | "encoding/json" 6 | "runtime" 7 | "sort" 8 | "sync" 9 | "time" 10 | 11 | "github.com/gophercloud/gophercloud/v2" 12 | ) 13 | 14 | type sha256file struct { 15 | sync.Mutex 16 | BackupDescription *string 17 | BackupID string 18 | BackupName *string 19 | ChunkSize int 20 | CreatedAt time.Time 21 | Version string 22 | VolumeID string 23 | // using map here, because concurrency may mix up the actual order 24 | // the actual order will be restored during JSON marshalling 25 | Sha256s map[int][][32]byte 26 | } 27 | 28 | func (r *sha256file) MarshalJSON() ([]byte, error) { 29 | type s struct { 30 | BackupDescription *string `json:"backup_description"` 31 | BackupID string `json:"backup_id"` 32 | BackupName *string `json:"backup_name"` 33 | ChunkSize int `json:"chunk_size"` 34 | Version string `json:"version"` 35 | VolumeID string `json:"volume_id"` 36 | CreatedAt string `json:"created_at"` 37 | Sha256s []string `json:"sha256s"` 38 | } 39 | 40 | // restore the actual order of hashsums 41 | keys := make([]int, 0, len(r.Sha256s)) 42 | for k := range r.Sha256s { 43 | keys = append(keys, k) 44 | } 45 | sort.Ints(keys) 46 | 47 | var str []string 48 | for _, k := range keys { 49 | for _, v := range r.Sha256s[k] { 50 | str = append(str, hex.EncodeToString(v[:])) 51 | } 52 | r.Sha256s[k] = nil 53 | } 54 | r.Sha256s = nil 55 | 56 | // clean the r.Sha256s memory 57 | runtime.GC() 58 | 59 | return json.Marshal(s{ 60 | r.BackupDescription, 61 | r.BackupID, 62 | r.BackupName, 63 | r.ChunkSize, 64 | r.Version, 65 | r.VolumeID, 66 | r.CreatedAt.Format(gophercloud.RFC3339ZNoT), 67 | str, 68 | }) 69 | } 70 | 71 | type backupChunkEntry map[string]map[string]interface{} 72 | 73 | type metadata struct { 74 | sync.Mutex 75 | BackupDescription *string 76 | BackupID string 77 | BackupName *string 78 | CreatedAt time.Time 79 | ParentID *string 80 | Version string 81 | VolumeID string 82 | VolumeMeta string 83 | // using map here, because concurrency may mix up the actual order 84 | // the actual order will be restored during JSON marshalling 85 | Objects map[int]backupChunkEntry 86 | } 87 | 88 | func (r *metadata) MarshalJSON() ([]byte, error) { 89 | type s struct { 90 | BackupDescription *string `json:"backup_description"` 91 | BackupID string `json:"backup_id"` 92 | BackupName *string `json:"backup_name"` 93 | ParentID *string `json:"parent_id"` 94 | Version string `json:"version"` 95 | VolumeID string `json:"volume_id"` 96 | VolumeMeta string `json:"volume_meta"` 97 | CreatedAt string `json:"created_at"` 98 | Objects []backupChunkEntry `json:"objects"` 99 | } 100 | 101 | // restore the actual order of hashsums 102 | keys := make([]int, 0, len(r.Objects)) 103 | for k := range r.Objects { 104 | keys = append(keys, k) 105 | } 106 | sort.Ints(keys) 107 | 108 | obj := make([]backupChunkEntry, 0, len(keys)) 109 | for _, k := range keys { 110 | obj = append(obj, r.Objects[k]) 111 | r.Objects[k] = nil 112 | } 113 | r.Objects = nil 114 | 115 | // clean the r.Objects memory 116 | runtime.GC() 117 | 118 | return json.Marshal(s{ 119 | r.BackupDescription, 120 | r.BackupID, 121 | r.BackupName, 122 | r.ParentID, 123 | r.Version, 124 | r.VolumeID, 125 | r.VolumeMeta, 126 | r.CreatedAt.Format(gophercloud.RFC3339ZNoT), 127 | obj, 128 | }) 129 | } 130 | 131 | type volumeMeta struct { 132 | VolumeBaseMeta volumeBaseMeta `json:"volume-base-metadata"` 133 | Version int `json:"version"` 134 | VolumeGlanceMetadata map[string]string `json:"volume-glance-metadata"` 135 | } 136 | 137 | func (r *volumeMeta) MarshalJSON() ([]byte, error) { 138 | if r.VolumeGlanceMetadata == nil { 139 | r.VolumeGlanceMetadata = make(map[string]string) 140 | } 141 | return json.Marshal(r) 142 | } 143 | 144 | type volumeBaseMeta struct { 145 | MigrationStatus *string `json:"migration_status"` 146 | ProviderID *string `json:"provider_id"` 147 | AvailabilityZone string `json:"availability_zone"` 148 | TerminatedAt *time.Time `json:"-"` 149 | UpdatedAt time.Time `json:"-"` 150 | ProviderGeometry *string `json:"provider_geometry"` 151 | ReplicationExtendedStatus *string `json:"replication_extended_status"` 152 | ReplicationStatus *string `json:"replication_status"` 153 | SnapshotID *string `json:"snapshot_id"` 154 | EC2ID *string `json:"ec2_id"` 155 | DeletedAt *time.Time `json:"-"` 156 | ID string `json:"id"` 157 | Size int `json:"size"` 158 | UserID string `json:"user_id"` 159 | DisplayDescription *string `json:"display_description"` 160 | ClusterName *string `json:"cluster_name"` 161 | ProjectID string `json:"project_id"` 162 | LaunchedAt time.Time `json:"-"` 163 | ScheduledAt time.Time `json:"-"` 164 | Status string `json:"status"` 165 | VolumeTypeID string `json:"volume_type_id"` 166 | Multiattach bool `json:"multiattach"` 167 | Deleted bool `json:"deleted"` 168 | ServiceUUID string `json:"service_uuid"` 169 | ProviderLocation *string `json:"provider_location"` 170 | Host string `json:"host"` 171 | ConsistencygroupID *string `json:"consistencygroup_id"` 172 | SourceVolID *string `json:"source_volid"` 173 | ProviderAuth *string `json:"provider_auth"` 174 | PreviousStatus string `json:"previous_status"` 175 | DisplayName string `json:"display_name"` 176 | Bootable bool `json:"bootable"` 177 | CreatedAt time.Time `json:"-"` 178 | AttachStatus string `json:"attach_status"` 179 | NameID *string `json:"_name_id"` 180 | EncryptionKeyID *string `json:"encryption_key_id"` 181 | ReplicationDriverData *string `json:"replication_driver_data"` 182 | GroupID *string `json:"group_id"` 183 | SharedTargets bool `json:"shared_targets"` 184 | } 185 | 186 | func (r *volumeBaseMeta) MarshalJSON() ([]byte, error) { 187 | type t volumeBaseMeta 188 | type s struct { 189 | t 190 | UpdatedAt string `json:"updated_at"` 191 | LaunchedAt string `json:"launched_at"` 192 | ScheduledAt string `json:"scheduled_at"` 193 | CreatedAt string `json:"created_at"` 194 | TerminatedAt *string `json:"terminated_at"` 195 | DeletedAt *string `json:"deleted_at"` 196 | } 197 | 198 | v := s{ 199 | t(*r), 200 | r.UpdatedAt.Format(gophercloud.RFC3339ZNoT), 201 | r.LaunchedAt.Format(gophercloud.RFC3339ZNoT), 202 | r.ScheduledAt.Format(gophercloud.RFC3339ZNoT), 203 | r.CreatedAt.Format(gophercloud.RFC3339ZNoT), 204 | nil, 205 | nil, 206 | } 207 | 208 | if r.TerminatedAt != nil { 209 | s := r.TerminatedAt.Format(gophercloud.RFC3339ZNoT) 210 | v.TerminatedAt = &s 211 | } 212 | 213 | if r.DeletedAt != nil { 214 | s := r.DeletedAt.Format(gophercloud.RFC3339ZNoT) 215 | v.DeletedAt = &s 216 | } 217 | 218 | return json.Marshal(v) 219 | } 220 | -------------------------------------------------------------------------------- /pkg/image.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | "math/rand" 8 | "net/http" 9 | "time" 10 | 11 | "github.com/gophercloud/gophercloud/v2" 12 | "github.com/gophercloud/gophercloud/v2/openstack/image/v2/imagedata" 13 | "github.com/gophercloud/gophercloud/v2/openstack/image/v2/imageimport" 14 | "github.com/gophercloud/gophercloud/v2/openstack/image/v2/images" 15 | "github.com/gophercloud/gophercloud/v2/openstack/image/v2/tasks" 16 | "github.com/gophercloud/gophercloud/v2/openstack/objectstorage/v1/containers" 17 | "github.com/gophercloud/gophercloud/v2/openstack/objectstorage/v1/objects" 18 | "github.com/gophercloud/gophercloud/v2/pagination" 19 | images_utils "github.com/gophercloud/utils/v2/openstack/image/v2/images" 20 | "github.com/machinebox/progress" 21 | "github.com/spf13/cobra" 22 | "github.com/spf13/viper" 23 | ) 24 | 25 | var ( 26 | waitForImageSec float64 27 | swiftTempURLTTL int = 10 // 10 seconds is enough 28 | imageWebDownload bool 29 | ) 30 | 31 | var imageWaitStatuses = []string{ 32 | string(images.ImageStatusSaving), 33 | string(images.ImageStatusQueued), 34 | string(images.ImageStatusImporting), 35 | } 36 | 37 | func createImageSpeed(image *images.Image) { 38 | t := image.UpdatedAt.Sub(image.CreatedAt) 39 | log.Printf("Time to create an image: %s", t) 40 | size := float64(image.SizeBytes / (1024 * 1024)) 41 | log.Printf("Size of the image: %.2f Mb", size) 42 | log.Printf("Speed of the image creation: %.2f Mb/sec", size/t.Seconds()) 43 | } 44 | 45 | func waitForImageTask(ctx context.Context, client, swiftClient *gophercloud.ServiceClient, id string, srcSizeBytes int64, secs float64) (*images.Image, error) { 46 | // initial image status 47 | img, err := images.Get(ctx, client, id).Extract() 48 | if err != nil { 49 | return nil, err 50 | } 51 | 52 | updateStatus := func(task tasks.Task) (bool, error) { 53 | var err error 54 | if task.Status == string(tasks.TaskStatusSuccess) { 55 | // update image status 56 | img, err = images.Get(ctx, client, id).Extract() 57 | if err != nil { 58 | return false, err 59 | } 60 | } 61 | return false, nil 62 | } 63 | 64 | var taskListAccessDenied bool 65 | var taskID string 66 | err = NewBackoff(int(secs), backoffFactor, backoffMaxInterval).WaitFor(func() (bool, error) { 67 | var taskStatus string 68 | 69 | if !taskListAccessDenied { 70 | err = tasks.List(client, tasks.ListOpts{}).EachPage(ctx, func(ctx context.Context, page pagination.Page) (bool, error) { 71 | tl, err := tasks.ExtractTasks(page) 72 | if err != nil { 73 | return false, fmt.Errorf("failed to list image tasks: %s", err) 74 | } 75 | 76 | for _, task := range tl { 77 | if taskID != "" && task.Status != string(tasks.TaskStatusFailure) { 78 | taskStatus = fmt.Sprintf("Target image task status is: %s", task.Status) 79 | return updateStatus(task) 80 | } 81 | 82 | tid := task.ID 83 | if taskID != "" { 84 | // we know the task ID 85 | tid = taskID 86 | } 87 | 88 | t, err := tasks.Get(ctx, client, tid).Extract() 89 | if err != nil { 90 | // TODO: return an error? 91 | log.Printf("Failed to get %q task details: %s", tid, err) 92 | return false, nil 93 | } 94 | 95 | if v, ok := t.Input["image_id"]; ok { 96 | if v, ok := v.(string); ok { 97 | if v == id { 98 | taskStatus = fmt.Sprintf("Target image task status is: %s", t.Status) 99 | 100 | // save the correcsponding task id for next calls 101 | taskID = t.ID 102 | if t.Status == string(tasks.TaskStatusFailure) { 103 | // set failed image status 104 | img.Status = images.ImageStatus(t.Status) 105 | return false, fmt.Errorf("target image import failed: %s", t.Message) 106 | } 107 | return updateStatus(*t) 108 | } 109 | } 110 | } 111 | } 112 | 113 | // continue listing 114 | return true, nil 115 | }) 116 | if err != nil { 117 | if !gophercloud.ResponseCodeIs(err, http.StatusForbidden) { 118 | return false, err 119 | } 120 | // don't fail when tasks list is denied 121 | taskListAccessDenied = true 122 | } 123 | } else { 124 | // just update the image status, when tasks list is denied 125 | img, err = images.Get(ctx, client, id).Extract() 126 | if err != nil { 127 | return false, err 128 | } 129 | } 130 | 131 | // show user friendly status 132 | containerSize := getContainerSize(ctx, swiftClient, id, srcSizeBytes) 133 | if containerSize == "" { 134 | log.Printf("Target image status: %s", joinSkipEmpty(", ", string(img.Status), taskStatus)) 135 | } else { 136 | log.Printf("Target image status: %s", joinSkipEmpty(", ", string(img.Status), taskStatus, containerSize)) 137 | } 138 | 139 | if img.Status == images.ImageStatusActive { 140 | return true, nil 141 | } else { 142 | // continue status checks 143 | return false, nil 144 | } 145 | }) 146 | 147 | return img, err 148 | } 149 | 150 | // this function may show confused size results due to Swift eventual consistency. 151 | func getContainerSize(ctx context.Context, client *gophercloud.ServiceClient, id string, srcSizeBytes int64) string { 152 | if client != nil { 153 | container, err := containers.Get(ctx, client, "glance_"+id, nil).Extract() 154 | if err != nil { 155 | if !gophercloud.ResponseCodeIs(err, http.StatusNotFound) { 156 | log.Printf("Failed to get Swift container status: %s", err) 157 | } 158 | return "" 159 | } 160 | 161 | var containerSize, percent int64 162 | if container != nil { 163 | containerSize = container.BytesUsed 164 | } 165 | 166 | if srcSizeBytes > 0 { 167 | percent = 100 * containerSize / srcSizeBytes 168 | return fmt.Sprintf("image size: %d/%d (%d%%)", containerSize, srcSizeBytes, percent) 169 | } 170 | 171 | // container size in Mb 172 | return fmt.Sprintf("image size: %.2f Mb", float64(containerSize/(1024*1024))) 173 | } 174 | return "" 175 | } 176 | 177 | func waitForImage(ctx context.Context, client, swiftClient *gophercloud.ServiceClient, id string, srcSizeBytes int64, secs float64) (*images.Image, error) { 178 | var image *images.Image 179 | var err error 180 | err = NewBackoff(int(secs), backoffFactor, backoffMaxInterval).WaitFor(func() (bool, error) { 181 | image, err = images.Get(ctx, client, id).Extract() 182 | if err != nil { 183 | return false, err 184 | } 185 | 186 | // show user friendly status 187 | containerSize := getContainerSize(ctx, swiftClient, id, srcSizeBytes) 188 | if containerSize == "" { 189 | log.Printf("Transition image status: %s", image.Status) 190 | } else { 191 | log.Printf("Transition image status: %s, %s", image.Status, containerSize) 192 | } 193 | if image.Status == images.ImageStatusActive { 194 | return true, nil 195 | } 196 | 197 | if !isSliceContainsStr(imageWaitStatuses, string(image.Status)) { 198 | return false, fmt.Errorf("transition image status is %q", image.Status) 199 | } 200 | 201 | // continue status checks 202 | return false, nil 203 | }) 204 | 205 | return image, err 206 | } 207 | 208 | var skipImageAttributes = []string{ 209 | "direct_url", 210 | "boot_roles", 211 | "os_hash_algo", 212 | "os_hash_value", 213 | } 214 | 215 | func expandImageProperties(v map[string]interface{}) map[string]string { 216 | properties := map[string]string{} 217 | for key, value := range v { 218 | if isSliceContainsStr(skipImageAttributes, key) { 219 | continue 220 | } 221 | if v, ok := value.(string); ok && v != "" { 222 | properties[key] = v 223 | } 224 | } 225 | 226 | return properties 227 | } 228 | 229 | func generateTmpURLKey(n int) string { 230 | var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") 231 | 232 | r := rand.New(rand.NewSource(time.Now().UnixNano())) 233 | 234 | b := make([]rune, n) 235 | for i := range b { 236 | b[i] = letters[r.Intn(len(letters))] 237 | } 238 | return string(b) 239 | } 240 | 241 | func migrateImage(ctx context.Context, srcImageClient, dstImageClient, srcObjectClient, dstObjectClient *gophercloud.ServiceClient, srcImg *images.Image, toImageName string) (*images.Image, error) { 242 | var url string 243 | containerName := "glance_" + srcImg.ID 244 | objectName := srcImg.ID 245 | 246 | if imageWebDownload { 247 | tempURLKey := containers.UpdateOpts{ 248 | TempURLKey: generateTmpURLKey(20), 249 | } 250 | _, err := containers.Update(ctx, srcObjectClient, containerName, tempURLKey).Extract() 251 | if err != nil { 252 | return nil, fmt.Errorf("unable to set container temporary url key: %s", err) 253 | } 254 | 255 | tmpURLOptions := objects.CreateTempURLOpts{ 256 | Method: "GET", 257 | TTL: swiftTempURLTTL, 258 | } 259 | 260 | url, err = objects.CreateTempURL(ctx, srcObjectClient, containerName, objectName, tmpURLOptions) 261 | if err != nil { 262 | return nil, fmt.Errorf("unable to generate a temporary url for the %q container: %s", containerName, err) 263 | } 264 | 265 | log.Printf("Generated Swift Temp URL: %s", url) 266 | } 267 | 268 | imageName := srcImg.Name 269 | if toImageName != "" { 270 | imageName = toImageName 271 | } 272 | 273 | // create an empty image 274 | visibility := images.ImageVisibilityPrivate 275 | createOpts := &images.CreateOpts{ 276 | Name: imageName, 277 | ContainerFormat: srcImg.ContainerFormat, 278 | DiskFormat: srcImg.DiskFormat, 279 | MinDisk: srcImg.MinDiskGigabytes, 280 | MinRAM: srcImg.MinRAMMegabytes, 281 | Visibility: &visibility, 282 | Properties: expandImageProperties(srcImg.Properties), 283 | Tags: srcImg.Tags, 284 | } 285 | 286 | dstImg, err := images.Create(ctx, dstImageClient, createOpts).Extract() 287 | if err != nil { 288 | return nil, fmt.Errorf("error creating destination Image: %s", err) 289 | } 290 | 291 | dstImgID := dstImg.ID 292 | defer func() { 293 | if err != nil { 294 | log.Printf("Deleting target %q image", dstImgID) 295 | if err := images.Delete(ctx, dstImageClient, dstImgID).ExtractErr(); err != nil { 296 | log.Printf("Error deleting target image: %s", err) 297 | } 298 | } 299 | }() 300 | 301 | reauthClient(ctx, srcImageClient, "migrateImage") 302 | reauthClient(ctx, dstImageClient, "migrateImage") 303 | 304 | if imageWebDownload { 305 | if !isSliceContainsStr(dstImg.OpenStackImageImportMethods, string(imageimport.WebDownloadMethod)) { 306 | return nil, fmt.Errorf("the %q import method is not supported, supported import methods: %q", imageimport.WebDownloadMethod, dstImg.OpenStackImageImportMethods) 307 | } 308 | 309 | // import 310 | importOpts := &imageimport.CreateOpts{ 311 | Name: imageimport.WebDownloadMethod, 312 | URI: url, 313 | } 314 | 315 | err = imageimport.Create(ctx, dstImageClient, dstImg.ID, importOpts).ExtractErr() 316 | if err != nil { 317 | return nil, fmt.Errorf("error while importing url %q: %s", url, err) 318 | } 319 | 320 | dstImg, err = waitForImageTask(ctx, dstImageClient, dstObjectClient, dstImg.ID, srcImg.SizeBytes, waitForImageSec) 321 | if err != nil { 322 | return nil, fmt.Errorf("error while importing url %q: %s", url, err) 323 | } 324 | } else { 325 | // get the source reader 326 | var imageReader io.ReadCloser 327 | imageReader, err = imagedata.Download(ctx, srcImageClient, srcImg.ID).Extract() 328 | if err != nil { 329 | return nil, fmt.Errorf("error getting the source image reader: %s", err) 330 | } 331 | 332 | progressReader := progress.NewReader(imageReader) 333 | go func() { 334 | for p := range progress.NewTicker(context.Background(), progressReader, srcImg.SizeBytes, 1*time.Second) { 335 | log.Printf("Image size: %d/%d (%.2f%%), remaining: %s", p.N(), p.Size(), p.Percent(), p.Remaining().Round(time.Second)) 336 | } 337 | }() 338 | 339 | // write the source to the destination 340 | err = imagedata.Upload(ctx, dstImageClient, dstImg.ID, progressReader).ExtractErr() 341 | if err != nil { 342 | return nil, fmt.Errorf("failed to upload an image: %s", err) 343 | } 344 | imageReader.Close() 345 | 346 | dstImg, err = waitForImage(ctx, dstImageClient, dstObjectClient, dstImg.ID, srcImg.SizeBytes, waitForImageSec) 347 | if err != nil { 348 | return nil, fmt.Errorf("error while waiting for an image to be uploaded: %s", err) 349 | } 350 | } 351 | 352 | createImageSpeed(dstImg) 353 | 354 | log.Printf("Migrated target image name is %q (id: %q)", dstImg.Name, dstImg.ID) 355 | 356 | // verify destination image size and hash 357 | if srcImg.SizeBytes != dstImg.SizeBytes { 358 | return dstImg, fmt.Errorf("image was migrated, but the source size doesn't correspond the destination size: %d != %d", srcImg.SizeBytes, dstImg.SizeBytes) 359 | } 360 | 361 | if srcImg.Checksum != dstImg.Checksum { 362 | return dstImg, fmt.Errorf("image was migrated, but the source checksum doesn't correspond the destination checksum: %s != %s", srcImg.Checksum, dstImg.Checksum) 363 | } 364 | 365 | if srcImg.Properties["os_hash_algo"] != dstImg.Properties["os_hash_algo"] { 366 | return dstImg, fmt.Errorf("image was migrated, but the source hash also doesn't correspond the destination hash algo: %s != %s", srcImg.Properties["os_hash_algo"], dstImg.Properties["os_hash_algo"]) 367 | } 368 | 369 | if srcImg.Properties["os_hash_value"] != dstImg.Properties["os_hash_value"] { 370 | return dstImg, fmt.Errorf("image was migrated, but the source hash doesn't correspond the destination hash: %s != %s", srcImg.Properties["os_hash_value"], dstImg.Properties["os_hash_value"]) 371 | } 372 | 373 | return dstImg, nil 374 | } 375 | 376 | // ImageCmd represents the image command. 377 | var ImageCmd = &cobra.Command{ 378 | Use: "image ", 379 | Args: cobra.ExactArgs(1), 380 | Short: "Clone an image", 381 | PreRunE: func(cmd *cobra.Command, args []string) error { 382 | if err := parseTimeoutArgs(); err != nil { 383 | return err 384 | } 385 | imageWebDownload = viper.GetBool("image-web-download") 386 | return viper.BindPFlags(cmd.Flags()) 387 | }, 388 | RunE: func(cmd *cobra.Command, args []string) error { 389 | // migrate image 390 | image := args[0] 391 | toName := viper.GetString("to-image-name") 392 | 393 | // source and destination parameters 394 | loc, err := getSrcAndDst("") 395 | if err != nil { 396 | return err 397 | } 398 | 399 | srcProvider, err := newOpenStackClient(cmd.Context(), loc.Src) 400 | if err != nil { 401 | return fmt.Errorf("failed to create a source OpenStack client: %s", err) 402 | } 403 | 404 | srcImageClient, err := newGlanceV2Client(srcProvider, loc.Src.Region) 405 | if err != nil { 406 | return fmt.Errorf("failed to create source image client: %s", err) 407 | } 408 | 409 | var srcObjectClient *gophercloud.ServiceClient 410 | if imageWebDownload { 411 | srcObjectClient, err = newObjectStorageV1Client(srcProvider, loc.Src.Region) 412 | if err != nil { 413 | return fmt.Errorf("failed to create source object storage client: %s", err) 414 | } 415 | } 416 | 417 | // resolve image name to an ID 418 | if v, err := images_utils.IDFromName(cmd.Context(), srcImageClient, image); err == nil { 419 | image = v 420 | } else if err, ok := err.(gophercloud.ErrMultipleResourcesFound); ok { 421 | return err 422 | } 423 | 424 | dstProvider, err := newOpenStackClient(cmd.Context(), loc.Dst) 425 | if err != nil { 426 | return fmt.Errorf("failed to create a destination OpenStack client: %s", err) 427 | } 428 | 429 | dstImageClient, err := newGlanceV2Client(dstProvider, loc.Dst.Region) 430 | if err != nil { 431 | return fmt.Errorf("failed to create destination image client: %s", err) 432 | } 433 | 434 | dstObjectClient, err := newObjectStorageV1Client(dstProvider, loc.Dst.Region) 435 | if err != nil { 436 | log.Printf("failed to create destination object storage client, detailed image clone statistics will be unavailable: %s", err) 437 | } 438 | 439 | srcImg, err := waitForImage(cmd.Context(), srcImageClient, nil, image, 0, waitForImageSec) 440 | if err != nil { 441 | return fmt.Errorf("failed to wait for %q source image: %s", image, err) 442 | } 443 | 444 | if imageWebDownload { 445 | // check whether current user scope belongs to the image owner 446 | userProjectID, err := getAuthProjectID(srcImageClient.ProviderClient) 447 | if err != nil { 448 | return fmt.Errorf("failed to extract user project ID scope: %s", err) 449 | } 450 | if userProjectID != srcImg.Owner { 451 | return fmt.Errorf("cannot clone an image using web download import method, when an image belongs to another project (%s), try to set --image-web-download=false", srcImg.Owner) 452 | } 453 | } 454 | 455 | defer measureTime() 456 | 457 | dstImg, err := migrateImage(cmd.Context(), srcImageClient, dstImageClient, srcObjectClient, dstObjectClient, srcImg, toName) 458 | if err != nil { 459 | return err 460 | } 461 | 462 | log.Printf("Target image name is %q (id: %q)", dstImg.Name, dstImg.ID) 463 | 464 | return nil 465 | }, 466 | } 467 | 468 | func init() { 469 | initImageCmdFlags() 470 | RootCmd.AddCommand(ImageCmd) 471 | } 472 | 473 | func initImageCmdFlags() { 474 | ImageCmd.Flags().StringP("to-image-name", "", "", "destination image name") 475 | } 476 | -------------------------------------------------------------------------------- /pkg/logger.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | llog "log" 7 | "os" 8 | "path/filepath" 9 | "strings" 10 | "sync" 11 | "time" 12 | 13 | "github.com/sapcc/go-bits/logg" 14 | "github.com/spf13/viper" 15 | ) 16 | 17 | type logger struct { 18 | Prefix string 19 | } 20 | 21 | func (lg *logger) Printf(format string, args ...interface{}) { 22 | for _, v := range strings.Split(fmt.Sprintf(format, args...), "\n") { 23 | l.Printf("[%s] %s", lg.Prefix, v) 24 | } 25 | } 26 | 27 | type compactLogger struct { 28 | sync.RWMutex 29 | lastMsg string 30 | } 31 | 32 | func (cl *compactLogger) Printf(format string, args ...interface{}) { 33 | msg := fmt.Sprintf(format, args...) 34 | cl.RLock() 35 | if cl.lastMsg == msg { 36 | cl.RUnlock() 37 | return 38 | } 39 | cl.RUnlock() 40 | cl.Lock() 41 | defer cl.Unlock() 42 | cl.lastMsg = msg 43 | llog.Print(msg) 44 | } 45 | 46 | func (cl *compactLogger) Fatal(args ...interface{}) { 47 | llog.Fatal(args...) 48 | } 49 | 50 | var ( 51 | l *llog.Logger 52 | log compactLogger 53 | ) 54 | 55 | func initLogger() { 56 | if l == nil { 57 | dir := filepath.Join(os.TempDir(), "cyclone") 58 | err := os.MkdirAll(dir, os.ModePerm) 59 | if err != nil { 60 | log.Fatal(err) 61 | } 62 | fileName := time.Now().Format("20060102150405") + ".log" 63 | logFile, err := os.Create(filepath.Join(dir, fileName)) 64 | if err != nil { 65 | log.Fatal(err) 66 | } 67 | 68 | symLink := filepath.Join(dir, "latest.log") 69 | if _, err := os.Lstat(symLink); err == nil { 70 | os.Remove(symLink) 71 | } 72 | 73 | err = os.Symlink(fileName, symLink) 74 | if err != nil { 75 | log.Printf("Failed to create a log symlink: %s", err) 76 | } 77 | 78 | // no need to close the log: https://golang.org/pkg/runtime/#SetFinalizer 79 | l = llog.New(logFile, llog.Prefix(), llog.Flags()) 80 | 81 | logg.SetLogger(l) 82 | logg.ShowDebug = true 83 | 84 | if viper.GetBool("debug") { 85 | // write log into stderr and log file 86 | l.SetOutput(io.MultiWriter(llog.Writer(), l.Writer())) 87 | } 88 | 89 | // write stderr logs into the log file 90 | llog.SetOutput(io.MultiWriter(llog.Writer(), logFile)) 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /pkg/main.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "os/signal" 7 | "strings" 8 | "sync" 9 | "time" 10 | 11 | "github.com/spf13/cobra" 12 | "github.com/spf13/viper" 13 | ) 14 | 15 | type Locations struct { 16 | Src Location 17 | Dst Location 18 | SameRegion bool 19 | SameAZ bool 20 | SameProject bool 21 | } 22 | 23 | type Location struct { 24 | AuthURL string 25 | Region string 26 | Domain string 27 | Project string 28 | Username string 29 | Password string 30 | ApplicationCredentialName string 31 | ApplicationCredentialID string 32 | ApplicationCredentialSecret string 33 | Token string 34 | Origin string 35 | } 36 | 37 | var ( 38 | // RootCmd represents the base command when called without any subcommands. 39 | RootCmd = &cobra.Command{ 40 | Use: "cyclone", 41 | Short: "Clone OpenStack entities easily", 42 | SilenceUsage: true, 43 | } 44 | cleanupFuncs []func(*sync.WaitGroup) 45 | // unattended mode, assume yes to all questions. 46 | yes bool 47 | // unattended mode, assume no to all questions. 48 | no bool 49 | ) 50 | 51 | // Execute adds all child commands to the root command sets flags appropriately. 52 | // This is called by main.main(). It only needs to happen once to the rootCmd. 53 | func Execute() { 54 | initRootCmdFlags() 55 | 56 | cleanupFunc := func() { 57 | var wg = &sync.WaitGroup{} 58 | for _, f := range cleanupFuncs { 59 | wg.Add(1) 60 | go f(wg) 61 | } 62 | wg.Wait() 63 | } 64 | 65 | c := make(chan os.Signal, 1) 66 | signal.Notify(c, os.Interrupt) 67 | go func() { 68 | <-c 69 | log.Printf("Interrupted") 70 | cleanupFunc() 71 | os.Exit(1) 72 | }() 73 | 74 | err := RootCmd.Execute() 75 | if err != nil { 76 | log.Printf("Error: %s", err) 77 | } 78 | 79 | cleanupFunc() 80 | 81 | if err != nil { 82 | os.Exit(1) 83 | } 84 | } 85 | 86 | func initRootCmdFlags() { 87 | // debug flag 88 | RootCmd.PersistentFlags().BoolP("debug", "d", false, "print out request and response objects") 89 | RootCmd.PersistentFlags().BoolP("yes", "y", false, "assume \"yes\" to all questions") 90 | RootCmd.PersistentFlags().BoolP("no", "n", false, "assume \"no\" to all questions") 91 | RootCmd.PersistentFlags().StringP("to-auth-url", "", "", "destination auth URL (if not provided, detected automatically from the source auth URL and destination region)") 92 | RootCmd.PersistentFlags().StringP("to-region", "", "", "destination region") 93 | RootCmd.PersistentFlags().StringP("to-domain", "", "", "destination domain name") 94 | RootCmd.PersistentFlags().StringP("to-project", "", "", "destination project name") 95 | RootCmd.PersistentFlags().StringP("to-username", "", "", "destination username") 96 | RootCmd.PersistentFlags().StringP("to-password", "", "", "destination username password") 97 | RootCmd.PersistentFlags().StringP("to-application-credential-name", "", "", "destination application credential name") 98 | RootCmd.PersistentFlags().StringP("to-application-credential-id", "", "", "destination application credential ID") 99 | RootCmd.PersistentFlags().StringP("to-application-credential-secret", "", "", "destination application credential secret") 100 | RootCmd.PersistentFlags().StringP("timeout-image", "", "24h", "timeout to wait for an image status") 101 | RootCmd.PersistentFlags().StringP("timeout-volume", "", "24h", "timeout to wait for a volume status") 102 | RootCmd.PersistentFlags().StringP("timeout-server", "", "24h", "timeout to wait for a server status") 103 | RootCmd.PersistentFlags().StringP("timeout-snapshot", "", "24h", "timeout to wait for a snapshot status") 104 | RootCmd.PersistentFlags().StringP("timeout-backup", "", "24h", "timeout to wait for a backup status") 105 | RootCmd.PersistentFlags().StringP("timeout-share", "", "24h", "timeout to wait for a share status") 106 | RootCmd.PersistentFlags().StringP("timeout-share-snapshot", "", "24h", "timeout to wait for a share snapshot status") 107 | RootCmd.PersistentFlags().StringP("timeout-share-replica", "", "24h", "timeout to wait for a share replica status") 108 | RootCmd.PersistentFlags().StringP("timeout-secret", "", "24h", "timeout to wait for a secret status") 109 | RootCmd.PersistentFlags().StringP("timeout-security-group", "", "24h", "timeout to wait for a security group status") 110 | RootCmd.PersistentFlags().BoolP("image-web-download", "", false, "use Glance web-download image import method") 111 | RootCmd.PersistentFlags().BoolP("insecure", "k", false, "Allow insecure server connections (use if you understand the risks)") 112 | 113 | err := viper.BindPFlag("debug", RootCmd.PersistentFlags().Lookup("debug")) 114 | if err != nil { 115 | panic(err) 116 | } 117 | err = viper.BindPFlag("yes", RootCmd.PersistentFlags().Lookup("yes")) 118 | if err != nil { 119 | panic(err) 120 | } 121 | err = viper.BindPFlag("no", RootCmd.PersistentFlags().Lookup("no")) 122 | if err != nil { 123 | panic(err) 124 | } 125 | err = viper.BindPFlag("to-auth-url", RootCmd.PersistentFlags().Lookup("to-auth-url")) 126 | if err != nil { 127 | panic(err) 128 | } 129 | err = viper.BindPFlag("to-region", RootCmd.PersistentFlags().Lookup("to-region")) 130 | if err != nil { 131 | panic(err) 132 | } 133 | err = viper.BindPFlag("to-domain", RootCmd.PersistentFlags().Lookup("to-domain")) 134 | if err != nil { 135 | panic(err) 136 | } 137 | err = viper.BindPFlag("to-project", RootCmd.PersistentFlags().Lookup("to-project")) 138 | if err != nil { 139 | panic(err) 140 | } 141 | err = viper.BindPFlag("to-username", RootCmd.PersistentFlags().Lookup("to-username")) 142 | if err != nil { 143 | panic(err) 144 | } 145 | err = viper.BindPFlag("to-password", RootCmd.PersistentFlags().Lookup("to-password")) 146 | if err != nil { 147 | panic(err) 148 | } 149 | err = viper.BindPFlag("to-application-credential-name", RootCmd.PersistentFlags().Lookup("to-application-credential-name")) 150 | if err != nil { 151 | panic(err) 152 | } 153 | err = viper.BindPFlag("to-application-credential-id", RootCmd.PersistentFlags().Lookup("to-application-credential-id")) 154 | if err != nil { 155 | panic(err) 156 | } 157 | err = viper.BindPFlag("to-application-credential-secret", RootCmd.PersistentFlags().Lookup("to-application-credential-secret")) 158 | if err != nil { 159 | panic(err) 160 | } 161 | err = viper.BindPFlag("timeout-image", RootCmd.PersistentFlags().Lookup("timeout-image")) 162 | if err != nil { 163 | panic(err) 164 | } 165 | err = viper.BindPFlag("timeout-volume", RootCmd.PersistentFlags().Lookup("timeout-volume")) 166 | if err != nil { 167 | panic(err) 168 | } 169 | err = viper.BindPFlag("timeout-server", RootCmd.PersistentFlags().Lookup("timeout-server")) 170 | if err != nil { 171 | panic(err) 172 | } 173 | err = viper.BindPFlag("timeout-snapshot", RootCmd.PersistentFlags().Lookup("timeout-snapshot")) 174 | if err != nil { 175 | panic(err) 176 | } 177 | err = viper.BindPFlag("timeout-backup", RootCmd.PersistentFlags().Lookup("timeout-backup")) 178 | if err != nil { 179 | panic(err) 180 | } 181 | err = viper.BindPFlag("timeout-share", RootCmd.PersistentFlags().Lookup("timeout-share")) 182 | if err != nil { 183 | panic(err) 184 | } 185 | err = viper.BindPFlag("timeout-share-snapshot", RootCmd.PersistentFlags().Lookup("timeout-share-snapshot")) 186 | if err != nil { 187 | panic(err) 188 | } 189 | err = viper.BindPFlag("timeout-share-replica", RootCmd.PersistentFlags().Lookup("timeout-share-replica")) 190 | if err != nil { 191 | panic(err) 192 | } 193 | err = viper.BindPFlag("timeout-secret", RootCmd.PersistentFlags().Lookup("timeout-secret")) 194 | if err != nil { 195 | panic(err) 196 | } 197 | err = viper.BindPFlag("timeout-security-group", RootCmd.PersistentFlags().Lookup("timeout-security-group")) 198 | if err != nil { 199 | panic(err) 200 | } 201 | err = viper.BindPFlag("image-web-download", RootCmd.PersistentFlags().Lookup("image-web-download")) 202 | if err != nil { 203 | panic(err) 204 | } 205 | err = viper.BindPFlag("insecure", RootCmd.PersistentFlags().Lookup("insecure")) 206 | if err != nil { 207 | panic(err) 208 | } 209 | } 210 | 211 | func parseTimeoutArg(arg string, dst *float64, errors *[]error) { 212 | s := viper.GetString(arg) 213 | v, err := time.ParseDuration(s) 214 | if err != nil { 215 | *errors = append(*errors, fmt.Errorf("failed to parse --%s value: %q", arg, s)) 216 | return 217 | } 218 | t := int(v.Seconds()) 219 | if t == 0 { 220 | *errors = append(*errors, fmt.Errorf("--%s value cannot be zero: %d", arg, t)) 221 | } 222 | if t < 0 { 223 | *errors = append(*errors, fmt.Errorf("--%s value cannot be negative: %d", arg, t)) 224 | } 225 | *dst = float64(t) 226 | } 227 | 228 | func parseTimeoutArgs() error { 229 | var errors []error 230 | yes = viper.GetBool("yes") 231 | no = viper.GetBool("no") 232 | if yes && no { 233 | errors = append(errors, fmt.Errorf("combining \"yes\" and \"no\" flags is not allowed")) 234 | } 235 | parseTimeoutArg("timeout-image", &waitForImageSec, &errors) 236 | parseTimeoutArg("timeout-volume", &waitForVolumeSec, &errors) 237 | parseTimeoutArg("timeout-server", &waitForServerSec, &errors) 238 | parseTimeoutArg("timeout-snapshot", &waitForSnapshotSec, &errors) 239 | parseTimeoutArg("timeout-backup", &waitForBackupSec, &errors) 240 | parseTimeoutArg("timeout-share", &waitForShareSec, &errors) 241 | parseTimeoutArg("timeout-share-snapshot", &waitForShareSnapshotSec, &errors) 242 | parseTimeoutArg("timeout-share-replica", &waitForShareReplicaSec, &errors) 243 | parseTimeoutArg("timeout-secret", &waitForSecretSec, &errors) 244 | parseTimeoutArg("timeout-security-group", &waitForSecurityGroupSec, &errors) 245 | if len(errors) > 0 { 246 | return fmt.Errorf("%q", errors) 247 | } 248 | return nil 249 | } 250 | 251 | func getSrcAndDst(az string) (Locations, error) { 252 | initLogger() 253 | 254 | var loc Locations 255 | 256 | // source and destination parameters 257 | loc.Src.Origin = "src" 258 | loc.Src.Region = os.Getenv("OS_REGION_NAME") 259 | loc.Src.AuthURL = os.Getenv("OS_AUTH_URL") 260 | loc.Src.Domain = os.Getenv("OS_PROJECT_DOMAIN_NAME") 261 | if loc.Src.Domain == "" { 262 | loc.Src.Domain = os.Getenv("OS_USER_DOMAIN_NAME") 263 | } 264 | loc.Src.Project = os.Getenv("OS_PROJECT_NAME") 265 | loc.Src.Username = os.Getenv("OS_USERNAME") 266 | loc.Src.Password = os.Getenv("OS_PASSWORD") 267 | loc.Src.ApplicationCredentialName = os.Getenv("OS_APPLICATION_CREDENTIAL_NAME") 268 | loc.Src.ApplicationCredentialID = os.Getenv("OS_APPLICATION_CREDENTIAL_ID") 269 | loc.Src.ApplicationCredentialSecret = os.Getenv("OS_APPLICATION_CREDENTIAL_SECRET") 270 | loc.Src.Token = os.Getenv("OS_AUTH_TOKEN") 271 | 272 | loc.Dst.Origin = "dst" 273 | loc.Dst.Region = viper.GetString("to-region") 274 | loc.Dst.AuthURL = viper.GetString("to-auth-url") 275 | loc.Dst.Domain = viper.GetString("to-domain") 276 | loc.Dst.Project = viper.GetString("to-project") 277 | loc.Dst.Username = viper.GetString("to-username") 278 | loc.Dst.Password = viper.GetString("to-password") 279 | loc.Dst.ApplicationCredentialName = viper.GetString("to-application-credential-name") 280 | loc.Dst.ApplicationCredentialID = viper.GetString("to-application-credential-id") 281 | loc.Dst.ApplicationCredentialSecret = viper.GetString("to-application-credential-secret") 282 | 283 | if loc.Dst.Project == "" { 284 | loc.Dst.Project = loc.Src.Project 285 | } 286 | 287 | if loc.Dst.Region == "" { 288 | loc.Dst.Region = loc.Src.Region 289 | } 290 | 291 | if loc.Dst.Domain == "" { 292 | loc.Dst.Domain = loc.Src.Domain 293 | } 294 | 295 | if loc.Dst.Username == "" { 296 | loc.Dst.Username = loc.Src.Username 297 | } 298 | 299 | if loc.Dst.Password == "" { 300 | loc.Dst.Password = loc.Src.Password 301 | } 302 | 303 | if loc.Dst.AuthURL == "" { 304 | // try to transform a source auth URL to a destination source URL 305 | s := strings.Replace(loc.Src.AuthURL, loc.Src.Region, loc.Dst.Region, 1) 306 | if strings.Contains(s, loc.Dst.Region) { 307 | loc.Dst.AuthURL = s 308 | log.Printf("Detected %q destination auth URL", loc.Dst.AuthURL) 309 | } else { 310 | return loc, fmt.Errorf("failed to detect destination auth URL, please specify --to-auth-url explicitly") 311 | } 312 | } 313 | 314 | loc.SameRegion = false 315 | if loc.Src.Region == loc.Dst.Region { 316 | if loc.Src.Domain == loc.Dst.Domain { 317 | if loc.Src.Project == loc.Dst.Project { 318 | loc.SameProject = true 319 | // share the same token 320 | loc.Dst.Token = loc.Src.Token 321 | } 322 | } 323 | loc.SameRegion = true 324 | } 325 | 326 | return loc, nil 327 | } 328 | -------------------------------------------------------------------------------- /pkg/secrets.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "context" 5 | "encoding/base64" 6 | "fmt" 7 | "strings" 8 | "time" 9 | 10 | "github.com/gophercloud/gophercloud/v2" 11 | "github.com/gophercloud/gophercloud/v2/openstack/keymanager/v1/acls" 12 | "github.com/gophercloud/gophercloud/v2/openstack/keymanager/v1/secrets" 13 | "github.com/spf13/cobra" 14 | "github.com/spf13/viper" 15 | "github.com/xhit/go-str2duration/v2" 16 | ) 17 | 18 | var ( 19 | waitForSecretSec float64 20 | secretWaitStatuses = []string{ 21 | "PENDING", 22 | } 23 | ) 24 | 25 | func secretsIDFromName(ctx context.Context, client *gophercloud.ServiceClient, name string) (string, error) { 26 | pages, err := secrets.List(client, secrets.ListOpts{ 27 | Name: name, 28 | }).AllPages(ctx) 29 | if err != nil { 30 | return "", err 31 | } 32 | 33 | all, err := secrets.ExtractSecrets(pages) 34 | if err != nil { 35 | return "", err 36 | } 37 | 38 | ids := make([]string, len(all)) 39 | for i := range all { 40 | ids[i] = all[i].SecretRef 41 | } 42 | 43 | switch count := len(ids); count { 44 | case 0: 45 | return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "secret"} 46 | case 1: 47 | return uuidFromSecretRef(ids[0]), nil 48 | default: 49 | return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "secret"} 50 | } 51 | } 52 | 53 | func waitForSecret(ctx context.Context, client *gophercloud.ServiceClient, id string, secs float64) (*secrets.Secret, error) { 54 | var secret *secrets.Secret 55 | var err error 56 | err = NewBackoff(int(secs), backoffFactor, backoffMaxInterval).WaitFor(func() (bool, error) { 57 | secret, err = secrets.Get(ctx, client, id).Extract() 58 | if err != nil { 59 | return false, err 60 | } 61 | 62 | // show user friendly status 63 | log.Printf("secret status: %s", secret.Status) 64 | 65 | if secret.Status == "ACTIVE" { 66 | return true, nil 67 | } 68 | 69 | if !isSliceContainsStr(secretWaitStatuses, secret.Status) { 70 | return false, fmt.Errorf("secret status is %q", secret.Status) 71 | } 72 | 73 | // continue status checks 74 | return false, nil 75 | }) 76 | 77 | return secret, err 78 | } 79 | 80 | func secretPayload(ctx context.Context, kmClient *gophercloud.ServiceClient, id, contentType string) (string, error) { 81 | opts := secrets.GetPayloadOpts{ 82 | PayloadContentType: contentType, 83 | } 84 | payload, err := secrets.GetPayload(ctx, kmClient, id, opts).Extract() 85 | if err != nil { 86 | return "", fmt.Errorf("could not retrieve payload for secret with id %s: %v", id, err) 87 | } 88 | 89 | if !strings.HasPrefix(contentType, "text/") { 90 | return base64.StdEncoding.EncodeToString(payload), nil 91 | } 92 | 93 | return string(payload), nil 94 | } 95 | 96 | func migrateSecret(ctx context.Context, srcSecretClient, dstSecretClient *gophercloud.ServiceClient, srcSecret *secrets.Secret, toSecretName string, toExpiration time.Duration) (*secrets.Secret, error) { 97 | id := uuidFromSecretRef(srcSecret.SecretRef) 98 | contentType := srcSecret.ContentTypes["default"] 99 | payload, err := secretPayload(ctx, srcSecretClient, id, contentType) 100 | if err != nil { 101 | return nil, err 102 | } 103 | 104 | acl, err := acls.GetSecretACL(ctx, srcSecretClient, id).Extract() 105 | if err != nil { 106 | return nil, fmt.Errorf("unable to get %s secret acls: %v", id, err) 107 | } 108 | 109 | metadataMap, err := secrets.GetMetadata(ctx, srcSecretClient, id).Extract() 110 | if err != nil { 111 | return nil, fmt.Errorf("unable to get %s secret metadata: %v", id, err) 112 | } 113 | 114 | // WRITE 115 | name := srcSecret.Name 116 | if toSecretName != "" { 117 | name = toSecretName 118 | } 119 | now := time.Now().UTC() 120 | // default to one month 121 | expiration := now.AddDate(0, 1, 0) 122 | // if source expiration is not expired, use its expiration date 123 | if srcSecret.Expiration.After(now) { 124 | log.Printf("The expiration date for the source secret (%s) has already passed", srcSecret.Expiration) 125 | expiration = srcSecret.Expiration 126 | } 127 | // if custom expiration is set, enforce it 128 | if toExpiration > 0 { 129 | expiration = now.Add(toExpiration) 130 | } 131 | log.Printf("setting destination secret expiration date to %s", expiration) 132 | createOpts := secrets.CreateOpts{ 133 | Name: name, 134 | Algorithm: srcSecret.Algorithm, 135 | BitLength: srcSecret.BitLength, 136 | Mode: srcSecret.Mode, 137 | Expiration: &expiration, 138 | SecretType: secrets.SecretType(srcSecret.SecretType), 139 | } 140 | dstSecret, err := secrets.Create(ctx, dstSecretClient, createOpts).Extract() 141 | if err != nil { 142 | return nil, fmt.Errorf("error creating the destination secret: %v", err) 143 | } 144 | 145 | dstID := uuidFromSecretRef(dstSecret.SecretRef) 146 | cleanup := func() { 147 | if err == nil { 148 | return 149 | } 150 | 151 | // cleanup partially created secret 152 | log.Printf("cleanup partially created %s destination secret", dstID) 153 | err := secrets.Delete(ctx, dstSecretClient, dstID).ExtractErr() 154 | if err != nil { 155 | log.Printf("failed to delete partially created %s destination secret", dstID) 156 | } 157 | } 158 | defer cleanup() 159 | 160 | // populate the "dstSecret", since Create method returns only the SecretRef 161 | dstSecret, err = waitForSecret(ctx, dstSecretClient, dstID, waitForSecretSec) 162 | if err != nil { 163 | return nil, fmt.Errorf("error waiting for the destination secret: %v", err) 164 | } 165 | 166 | // set the acl first before uploading the payload 167 | if acl != nil { 168 | acl, ok := (*acl)["read"] 169 | if ok { 170 | var setOpts acls.SetOpts 171 | var users *[]string 172 | if len(acl.Users) > 0 { 173 | users = &acl.Users 174 | } 175 | setOpts = []acls.SetOpt{ 176 | { 177 | Type: "read", 178 | Users: users, 179 | ProjectAccess: &acl.ProjectAccess, 180 | }, 181 | } 182 | _, err = acls.SetSecretACL(ctx, dstSecretClient, dstID, setOpts).Extract() 183 | if err != nil { 184 | return nil, fmt.Errorf("error settings ACLs for the destination secret: %v", err) 185 | } 186 | } 187 | } 188 | 189 | encoding := "" 190 | if !strings.HasPrefix(contentType, "text/") { 191 | encoding = "base64" 192 | } 193 | updateOpts := secrets.UpdateOpts{ 194 | Payload: payload, 195 | ContentType: contentType, 196 | ContentEncoding: encoding, 197 | } 198 | err = secrets.Update(ctx, dstSecretClient, dstID, updateOpts).Err 199 | if err != nil { 200 | return nil, fmt.Errorf("error setting the destination secret payload: %v", err) 201 | } 202 | 203 | _, err = waitForSecret(ctx, dstSecretClient, dstID, waitForSecretSec) 204 | if err != nil { 205 | return nil, fmt.Errorf("error waiting for the destination secret: %v", err) 206 | } 207 | 208 | if len(metadataMap) == 0 { 209 | return dstSecret, nil 210 | } 211 | 212 | _, err = secrets.CreateMetadata(ctx, dstSecretClient, dstID, secrets.MetadataOpts(metadataMap)).Extract() 213 | if err != nil { 214 | return nil, fmt.Errorf("error creating metadata for the destination secret: %v", err) 215 | } 216 | 217 | _, err = waitForSecret(ctx, dstSecretClient, dstID, waitForSecretSec) 218 | if err != nil { 219 | return nil, fmt.Errorf("error waiting for the destination secret: %v", err) 220 | } 221 | 222 | return dstSecret, nil 223 | } 224 | 225 | func uuidFromSecretRef(ref string) string { 226 | // secret ref has form https://{barbican_host}/v1/secrets/{secret_uuid} 227 | // so we are only interested in the last part 228 | return ref[strings.LastIndex(ref, "/")+1:] 229 | } 230 | 231 | // SecretCmd represents the secret command. 232 | var SecretCmd = &cobra.Command{ 233 | Use: "secret ", 234 | Args: cobra.ExactArgs(1), 235 | Short: "Clone a secret", 236 | PreRunE: func(cmd *cobra.Command, args []string) error { 237 | if err := parseTimeoutArgs(); err != nil { 238 | return err 239 | } 240 | return viper.BindPFlags(cmd.Flags()) 241 | }, 242 | RunE: func(cmd *cobra.Command, args []string) error { 243 | // migrate image 244 | secret := args[0] 245 | toName := viper.GetString("to-secret-name") 246 | toExp := viper.GetString("to-secret-expiration") 247 | var toExpiration time.Duration 248 | 249 | if toExp != "" { 250 | var err error 251 | toExpiration, err = str2duration.ParseDuration(toExp) 252 | if err != nil { 253 | return fmt.Errorf("failed to parse --to-add-expiration-duration value: %v", err) 254 | } 255 | } 256 | 257 | // source and destination parameters 258 | loc, err := getSrcAndDst("") 259 | if err != nil { 260 | return err 261 | } 262 | 263 | srcProvider, err := newOpenStackClient(cmd.Context(), loc.Src) 264 | if err != nil { 265 | return fmt.Errorf("failed to create a source OpenStack client: %v", err) 266 | } 267 | 268 | srcSecretClient, err := newSecretManagerV1Client(srcProvider, loc.Src.Region) 269 | if err != nil { 270 | return fmt.Errorf("failed to create source keymanager client: %v", err) 271 | } 272 | 273 | // resolve secret name to an ID 274 | if v, err := secretsIDFromName(cmd.Context(), srcSecretClient, secret); err == nil { 275 | secret = v 276 | } else if err, ok := err.(gophercloud.ErrMultipleResourcesFound); ok { 277 | return err 278 | } 279 | 280 | srcSecret, err := waitForSecret(cmd.Context(), srcSecretClient, secret, waitForSecretSec) 281 | if err != nil { 282 | // try to get secret uuid from URL 283 | srcSecret, err = waitForSecret(cmd.Context(), srcSecretClient, uuidFromSecretRef(secret), waitForSecretSec) 284 | if err != nil { 285 | return fmt.Errorf("failed to wait for %q source image: %v", secret, err) 286 | } 287 | } 288 | 289 | dstProvider, err := newOpenStackClient(cmd.Context(), loc.Dst) 290 | if err != nil { 291 | return fmt.Errorf("failed to create a destination OpenStack client: %v", err) 292 | } 293 | 294 | dstSecretClient, err := newSecretManagerV1Client(dstProvider, loc.Dst.Region) 295 | if err != nil { 296 | return fmt.Errorf("failed to create destination image client: %v", err) 297 | } 298 | 299 | defer measureTime() 300 | 301 | dstSecret, err := migrateSecret(cmd.Context(), srcSecretClient, dstSecretClient, srcSecret, toName, toExpiration) 302 | if err != nil { 303 | return err 304 | } 305 | 306 | log.Printf("Target secret name is %q (id: %q)", dstSecret.Name, dstSecret.SecretRef) 307 | 308 | return nil 309 | }, 310 | } 311 | 312 | func init() { 313 | initSecretCmdFlags() 314 | RootCmd.AddCommand(SecretCmd) 315 | } 316 | 317 | func initSecretCmdFlags() { 318 | SecretCmd.Flags().StringP("to-secret-name", "", "", "destination secret name") 319 | SecretCmd.Flags().StringP("to-secret-expiration", "", "", "destination secret expiration duration from now (if not set defaults to source expiration; if source expiration is also not set or already expired, automatically add one month from now)") 320 | } 321 | -------------------------------------------------------------------------------- /pkg/security_group.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/http" 7 | 8 | "github.com/gophercloud/gophercloud/v2" 9 | "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/security/groups" 10 | "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/security/rules" 11 | "github.com/spf13/cobra" 12 | "github.com/spf13/viper" 13 | ) 14 | 15 | var ( 16 | waitForSecurityGroupSec float64 17 | syncedSrcDstSecurityGroups = make(map[string]string) 18 | disableDetection bool 19 | ) 20 | 21 | func securityGroupsIDFromName(ctx context.Context, client *gophercloud.ServiceClient, name string) (string, error) { 22 | pages, err := groups.List(client, groups.ListOpts{ 23 | Name: name, 24 | }).AllPages(ctx) 25 | if err != nil { 26 | return "", err 27 | } 28 | 29 | all, err := groups.ExtractGroups(pages) 30 | if err != nil { 31 | return "", err 32 | } 33 | 34 | ids := make([]string, len(all)) 35 | for i := range all { 36 | ids[i] = all[i].ID 37 | } 38 | 39 | switch count := len(ids); count { 40 | case 0: 41 | return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "security-group"} 42 | case 1: 43 | return ids[0], nil 44 | default: 45 | return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "security-group"} 46 | } 47 | } 48 | 49 | func retryRulesCreate(ctx context.Context, client *gophercloud.ServiceClient, opts rules.CreateOpts) (*rules.SecGroupRule, error) { 50 | var rule *rules.SecGroupRule 51 | var err error 52 | err = NewBackoff(int(waitForSecurityGroupSec), backoffFactor, backoffMaxInterval).WaitFor(func() (bool, error) { 53 | rule, err = rules.Create(ctx, client, opts).Extract() 54 | if gophercloud.ResponseCodeIs(err, http.StatusTooManyRequests) { 55 | // 429 is a rate limit error, we should retry 56 | return false, nil 57 | } 58 | return true, err 59 | }) 60 | return rule, err 61 | } 62 | 63 | // SecurityGroupCmd represents the security group command. 64 | var SecurityGroupCmd = &cobra.Command{ 65 | Use: "security-group ", 66 | Args: cobra.ExactArgs(1), 67 | Short: "Clone a security group", 68 | PreRunE: func(cmd *cobra.Command, args []string) error { 69 | if err := parseTimeoutArgs(); err != nil { 70 | return err 71 | } 72 | return viper.BindPFlags(cmd.Flags()) 73 | }, 74 | RunE: func(cmd *cobra.Command, args []string) error { 75 | // migrate security group 76 | securityGroup := args[0] 77 | toName, _ := cmd.Flags().GetString("to-security-group-name") 78 | disableDetection, _ = cmd.Flags().GetBool("disable-target-security-group-detection") 79 | 80 | // source and destination parameters 81 | loc, err := getSrcAndDst("") 82 | if err != nil { 83 | return err 84 | } 85 | 86 | srcProvider, err := newOpenStackClient(cmd.Context(), loc.Src) 87 | if err != nil { 88 | return fmt.Errorf("failed to create a source OpenStack client: %v", err) 89 | } 90 | 91 | srcNetworkClient, err := newNetworkV2Client(srcProvider, loc.Src.Region) 92 | if err != nil { 93 | return fmt.Errorf("failed to create source network client: %v", err) 94 | } 95 | 96 | // resolve security group name to an ID 97 | if v, err := securityGroupsIDFromName(cmd.Context(), srcNetworkClient, securityGroup); err == nil { 98 | securityGroup = v 99 | } else if err, ok := err.(gophercloud.ErrMultipleResourcesFound); ok { 100 | return err 101 | } 102 | 103 | dstProvider, err := newOpenStackClient(cmd.Context(), loc.Dst) 104 | if err != nil { 105 | return fmt.Errorf("failed to create a destination OpenStack client: %v", err) 106 | } 107 | 108 | dstNetworkClient, err := newNetworkV2Client(dstProvider, loc.Dst.Region) 109 | if err != nil { 110 | return fmt.Errorf("failed to create destination network client: %v", err) 111 | } 112 | 113 | defer measureTime() 114 | 115 | dstSecurityGroup, err := migrateSecurityGroup(cmd.Context(), srcNetworkClient, dstNetworkClient, securityGroup, toName) 116 | if err != nil { 117 | return err 118 | } 119 | 120 | log.Printf("Target security group name is %q (id: %q)", dstSecurityGroup.Name, dstSecurityGroup.ID) 121 | 122 | return nil 123 | }, 124 | } 125 | 126 | func migrateSecurityGroup(ctx context.Context, srcNetworkClient *gophercloud.ServiceClient, dstNetworkClient *gophercloud.ServiceClient, securityGroup string, toSecurityGroupName string) (*groups.SecGroup, error) { 127 | sg, err := groups.Get(ctx, srcNetworkClient, securityGroup).Extract() 128 | if err != nil { 129 | return nil, err 130 | } 131 | 132 | if toSecurityGroupName == "" { 133 | toSecurityGroupName = sg.Name 134 | } 135 | if !disableDetection { 136 | // check if security group with the same name already exists in the destination 137 | if secGroupID, err := securityGroupsIDFromName(ctx, dstNetworkClient, toSecurityGroupName); err == nil { 138 | // security group already exists 139 | return groups.Get(ctx, dstNetworkClient, secGroupID).Extract() 140 | } 141 | } 142 | 143 | log.Printf("Creating security group %q", toSecurityGroupName) 144 | createOpts := groups.CreateOpts{ 145 | Name: toSecurityGroupName, 146 | Description: sg.Description, 147 | } 148 | newSecurityGroup, err := groups.Create(ctx, dstNetworkClient, createOpts).Extract() 149 | if err != nil { 150 | return nil, err 151 | } 152 | // store mapping of source and destination security group IDs in case we encounter a rule with a remote group 153 | syncedSrcDstSecurityGroups[securityGroup] = newSecurityGroup.ID 154 | 155 | // delete default egress rules to get a clean slate 156 | for _, rule := range newSecurityGroup.Rules { 157 | if rule.Direction == "egress" { 158 | if err = rules.Delete(ctx, dstNetworkClient, rule.ID).ExtractErr(); err != nil { 159 | return nil, err 160 | } 161 | } 162 | } 163 | 164 | for _, rule := range sg.Rules { 165 | var remoteGroupID string 166 | if rule.RemoteGroupID != "" { 167 | if rule.RemoteGroupID == rule.ID { 168 | remoteGroupID = newSecurityGroup.ID 169 | } else if targetRemoteID, ok := syncedSrcDstSecurityGroups[rule.RemoteGroupID]; ok { 170 | // remote group already exists 171 | remoteGroupID = targetRemoteID 172 | } else { 173 | // create remote security group if it doesn't exist 174 | remoteSecurityGroup, err := migrateSecurityGroup(ctx, srcNetworkClient, dstNetworkClient, rule.RemoteGroupID, "") 175 | if err != nil { 176 | return nil, err 177 | } 178 | // update rule with new remote group ID 179 | remoteGroupID = remoteSecurityGroup.ID 180 | } 181 | } 182 | 183 | ruleCreateOpts := rules.CreateOpts{ 184 | Direction: rules.RuleDirection(rule.Direction), 185 | Description: rule.Description, 186 | EtherType: rules.RuleEtherType(rule.EtherType), 187 | SecGroupID: newSecurityGroup.ID, 188 | PortRangeMax: rule.PortRangeMax, 189 | PortRangeMin: rule.PortRangeMin, 190 | Protocol: rules.RuleProtocol(rule.Protocol), 191 | RemoteGroupID: remoteGroupID, 192 | RemoteIPPrefix: rule.RemoteIPPrefix, 193 | } 194 | 195 | // retry rule creation on 429 rate limit errors 196 | if _, err = retryRulesCreate(ctx, dstNetworkClient, ruleCreateOpts); err != nil { 197 | return nil, err 198 | } 199 | } 200 | 201 | return newSecurityGroup, nil 202 | } 203 | 204 | func init() { 205 | initSecurityGroupCmdFlags() 206 | RootCmd.AddCommand(SecurityGroupCmd) 207 | } 208 | 209 | func initSecurityGroupCmdFlags() { 210 | SecurityGroupCmd.Flags().StringP("to-security-group-name", "", "", "destination security group name") 211 | SecurityGroupCmd.Flags().BoolP("disable-target-security-group-detection", "", false, "disable automatic detection of existent target security groups") 212 | } 213 | -------------------------------------------------------------------------------- /pkg/share.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/http" 7 | "strings" 8 | "time" 9 | 10 | "github.com/gophercloud/gophercloud/v2" 11 | "github.com/gophercloud/gophercloud/v2/openstack/sharedfilesystems/v2/replicas" 12 | "github.com/gophercloud/gophercloud/v2/openstack/sharedfilesystems/v2/shares" 13 | "github.com/gophercloud/gophercloud/v2/openstack/sharedfilesystems/v2/snapshots" 14 | shares_utils "github.com/gophercloud/utils/v2/openstack/sharedfilesystems/v2/shares" 15 | "github.com/spf13/cobra" 16 | "github.com/spf13/viper" 17 | ) 18 | 19 | var ( 20 | waitForShareSec float64 21 | waitForShareSnapshotSec float64 22 | waitForShareReplicaSec float64 23 | ) 24 | 25 | var shareNormalStatuses = []string{ 26 | "available", 27 | "in-use", 28 | } 29 | 30 | var shareSnapshotNormalStatuses = []string{ 31 | "available", 32 | } 33 | 34 | var replicaNormalStatuses = []string{ 35 | "available", 36 | } 37 | 38 | func waitForShareSnapshot(ctx context.Context, client *gophercloud.ServiceClient, id string, secs float64) (*snapshots.Snapshot, error) { 39 | var snapshot *snapshots.Snapshot 40 | var err error 41 | err = NewBackoff(int(secs), backoffFactor, backoffMaxInterval).WaitFor(func() (bool, error) { 42 | snapshot, err = snapshots.Get(ctx, client, id).Extract() 43 | if err != nil { 44 | return false, err 45 | } 46 | 47 | log.Printf("Intermediate snapshot status: %s", snapshot.Status) 48 | if isSliceContainsStr(shareSnapshotNormalStatuses, snapshot.Status) { 49 | return true, nil 50 | } 51 | 52 | if strings.Contains(snapshot.Status, "error") { 53 | return false, fmt.Errorf("intermediate snapshot status is %q", snapshot.Status) 54 | } 55 | 56 | // continue status checks 57 | return false, nil 58 | }) 59 | 60 | return snapshot, err 61 | } 62 | 63 | func waitForShareReplica(ctx context.Context, client *gophercloud.ServiceClient, id string, secs float64) (*replicas.Replica, error) { 64 | var replica *replicas.Replica 65 | var err error 66 | err = NewBackoff(int(secs), backoffFactor, backoffMaxInterval).WaitFor(func() (bool, error) { 67 | replica, err = replicas.Get(ctx, client, id).Extract() 68 | if err != nil { 69 | return false, err 70 | } 71 | 72 | log.Printf("Intermediate replica status and state: %s/%s", replica.Status, replica.State) 73 | if isSliceContainsStr(replicaNormalStatuses, replica.Status) { 74 | return true, nil 75 | } 76 | 77 | if strings.Contains(replica.Status, "error") { 78 | return false, fmt.Errorf("intermediate replica status and state is %s/%s", replica.Status, replica.State) 79 | } 80 | 81 | // continue status checks 82 | return false, nil 83 | }) 84 | 85 | return replica, err 86 | } 87 | 88 | func waitForShareReplicaState(ctx context.Context, client *gophercloud.ServiceClient, id, state string, secs float64) (*replicas.Replica, error) { 89 | var replica *replicas.Replica 90 | var err error 91 | err = NewBackoff(int(secs), backoffFactor, backoffMaxInterval).WaitFor(func() (bool, error) { 92 | replica, err = replicas.Get(ctx, client, id).Extract() 93 | if err != nil { 94 | return false, err 95 | } 96 | 97 | log.Printf("Intermediate replica status and state: %s/%s", replica.Status, replica.State) 98 | if isSliceContainsStr(replicaNormalStatuses, replica.Status) && 99 | replica.State == state { 100 | return true, nil 101 | } 102 | 103 | if strings.Contains(replica.Status, "error") { 104 | return false, fmt.Errorf("intermediate replica status and state is %s/%s", replica.Status, replica.State) 105 | } 106 | 107 | // continue status checks 108 | return false, nil 109 | }) 110 | 111 | return replica, err 112 | } 113 | 114 | func waitForShare(ctx context.Context, client *gophercloud.ServiceClient, id string, secs float64) (*shares.Share, error) { 115 | var share *shares.Share 116 | var err error 117 | err = NewBackoff(int(secs), backoffFactor, backoffMaxInterval).WaitFor(func() (bool, error) { 118 | share, err = shares.Get(ctx, client, id).Extract() 119 | if err != nil { 120 | return false, err 121 | } 122 | 123 | log.Printf("Share status: %s", share.Status) 124 | // TODO: specify target states in func params 125 | if isSliceContainsStr(shareNormalStatuses, share.Status) { 126 | return true, nil 127 | } 128 | 129 | if strings.Contains(share.Status, "error") { 130 | return false, fmt.Errorf("share status is %q", share.Status) 131 | } 132 | 133 | // continue status checks 134 | return false, nil 135 | }) 136 | 137 | return share, err 138 | } 139 | 140 | func createShareSpeed(share *shares.Share) { 141 | // cinder doesn't update the UpdatedAt attribute, when the share status is updated 142 | t := time.Since(share.CreatedAt) 143 | log.Printf("Time to create a share: %s", t) 144 | size := float64(share.Size * 1024) 145 | log.Printf("Size of the share: %.2f Mb", size) 146 | log.Printf("Speed of the share creation: %.2f Mb/sec", size/t.Seconds()) 147 | } 148 | 149 | // findOrCreateShareReplica returns the new or existing inactive replica as the first return value and old active replica as the second one. 150 | func findOrCreateShareReplica(ctx context.Context, srcShareClient *gophercloud.ServiceClient, srcShare *shares.Share, netID, az string) (*replicas.Replica, *replicas.Replica, error) { 151 | curReplica, allReplicas, err := findShareActiveReplica(ctx, srcShareClient, srcShare.ID) 152 | if err != nil { 153 | return nil, nil, err 154 | } 155 | if curReplica.AvailabilityZone == az { 156 | return nil, nil, fmt.Errorf("the current %q share replica is already in the desired destination zone", curReplica.ID) 157 | } 158 | 159 | if netID == "" { 160 | netID = srcShare.ShareNetworkID 161 | } 162 | 163 | // check whether there is an existing replica in the destination AZ 164 | for _, v := range allReplicas { 165 | if v.AvailabilityZone == az && v.Status == "available" { 166 | if v.ShareNetworkID != netID { 167 | return nil, nil, fmt.Errorf("the replica was found, but it's created in a different shared network: %s", v.ShareNetworkID) 168 | } 169 | // found an existing replica in the destination AZ 170 | return &v, curReplica, nil 171 | } 172 | } 173 | 174 | // create replica in a new AZ 175 | replicaOpts := &replicas.CreateOpts{ 176 | ShareID: srcShare.ID, 177 | AvailabilityZone: az, 178 | ShareNetworkID: netID, 179 | } 180 | replica, err := replicas.Create(ctx, srcShareClient, replicaOpts).Extract() 181 | if err != nil { 182 | return nil, curReplica, fmt.Errorf("failed to create a new replica for a %q share: %s", srcShare.ID, err) 183 | } 184 | replica, err = waitForShareReplica(ctx, srcShareClient, replica.ID, waitForShareReplicaSec) 185 | if err != nil { 186 | return nil, curReplica, fmt.Errorf("failed to wait for a %q share replica status: %s", replica.ID, err) 187 | } 188 | 189 | return replica, curReplica, nil 190 | } 191 | 192 | // findShareActiveReplica returns the current active replica if found and the list 193 | // of all replicas associated with a share. 194 | func findShareActiveReplica(ctx context.Context, srcShareClient *gophercloud.ServiceClient, shareID string) (*replicas.Replica, 195 | []replicas.Replica, error) { 196 | listReplicasOpts := replicas.ListOpts{ 197 | ShareID: shareID, 198 | } 199 | pages, err := replicas.ListDetail(srcShareClient, listReplicasOpts).AllPages(ctx) 200 | if err != nil { 201 | return nil, nil, fmt.Errorf("failed to list %s share replicas: %s", shareID, err) 202 | } 203 | allReplicas, err := replicas.ExtractReplicas(pages) 204 | if err != nil { 205 | return nil, nil, fmt.Errorf("failed to extract %s share replicas: %s", shareID, err) 206 | } 207 | if len(allReplicas) == 0 { 208 | return nil, nil, fmt.Errorf("failed to find a replica for a %q share", shareID) 209 | } 210 | for _, v := range allReplicas { 211 | if v.Status == "available" && v.State == "active" { 212 | return &v, allReplicas, nil 213 | } 214 | } 215 | return nil, allReplicas, fmt.Errorf("failed to find a replica for a %q share", shareID) 216 | } 217 | 218 | func cloneShare(ctx context.Context, srcShareClient *gophercloud.ServiceClient, srcShare *shares.Share, name, shareType, proto, netID, az string) (*shares.Share, error) { 219 | snapshotOpts := snapshots.CreateOpts{ 220 | ShareID: srcShare.ID, 221 | Description: fmt.Sprintf("Transition snapshot to clone a %q share", srcShare.ID), 222 | } 223 | srcSnapshot, err := snapshots.Create(ctx, srcShareClient, snapshotOpts).Extract() 224 | if err != nil { 225 | return nil, fmt.Errorf("failed to create a source share snapshot: %s", err) 226 | } 227 | log.Printf("Intermediate snapshot %q created", srcSnapshot.ID) 228 | 229 | delSnapshot := func() { 230 | if err := snapshots.Delete(ctx, srcShareClient, srcSnapshot.ID).ExtractErr(); err != nil { 231 | // it is fine, when the volume was already removed. 232 | if !gophercloud.ResponseCodeIs(err, http.StatusNotFound) { 233 | log.Printf("failed to delete a cloned volume: %s", err) 234 | } 235 | } 236 | } 237 | defer delSnapshot() 238 | 239 | srcSnapshot, err = waitForShareSnapshot(ctx, srcShareClient, srcSnapshot.ID, waitForShareSnapshotSec) 240 | if err != nil { 241 | return nil, fmt.Errorf("failed to wait for a snapshot: %s", err) 242 | } 243 | 244 | // TODO 245 | //createShareSnapshotSpeed(srcSnapshot) 246 | 247 | if shareType == "" { 248 | shareType = srcShare.ShareType 249 | } 250 | if proto == "" { 251 | proto = srcShare.ShareProto 252 | } 253 | if name == "" { 254 | name = fmt.Sprintf("%s clone (%s)", srcShare.Name, srcShare.ID) 255 | } 256 | if netID == "" { 257 | netID = srcShare.ShareNetworkID 258 | } 259 | shareOpts := &shares.CreateOpts{ 260 | Name: name, 261 | SnapshotID: srcSnapshot.ID, 262 | ShareNetworkID: netID, 263 | ShareProto: proto, 264 | Size: srcShare.Size, 265 | ShareType: shareType, 266 | Metadata: srcShare.Metadata, 267 | AvailabilityZone: srcShare.AvailabilityZone, 268 | } 269 | 270 | reauthClient(ctx, srcShareClient, "cloneShare") 271 | 272 | // create a share clone in the source AZ 273 | newShare, err := shares.Create(ctx, srcShareClient, shareOpts).Extract() 274 | if err != nil { 275 | return nil, fmt.Errorf("failed to create a source share from a snapshot: %s", err) 276 | } 277 | newShare, err = waitForShare(ctx, srcShareClient, newShare.ID, waitForShareSec) 278 | if err != nil { 279 | return nil, fmt.Errorf("failed to wait for a %q share status: %s", newShare.ID, err) 280 | } 281 | 282 | // delete intermediate snapshot right away 283 | go delSnapshot() 284 | 285 | return moveShare(ctx, srcShareClient, newShare, netID, az, true) 286 | } 287 | 288 | func moveShare(ctx context.Context, srcShareClient *gophercloud.ServiceClient, srcShare *shares.Share, netID, az string, deleteOldReplica bool) (*shares.Share, error) { 289 | srcShareClient.Microversion = "2.60" 290 | // detect current share replica 291 | replica, oldReplica, err := findOrCreateShareReplica(ctx, srcShareClient, srcShare, netID, az) 292 | if err != nil { 293 | return nil, fmt.Errorf("failed to obtain a replica for a %q share: %s", srcShare.ID, err) 294 | } 295 | 296 | // resync replica in a new AZ 297 | err = replicas.Resync(ctx, srcShareClient, replica.ID).ExtractErr() 298 | if err != nil { 299 | return nil, fmt.Errorf("failed to resync a %q share replica: %s", replica.ID, err) 300 | } 301 | replica, err = waitForShareReplicaState(ctx, srcShareClient, replica.ID, "in_sync", waitForShareReplicaSec) 302 | if err != nil { 303 | return nil, fmt.Errorf("failed to wait for a %q share replica state: %s", replica.ID, err) 304 | } 305 | 306 | // promote replica in a new AZ 307 | err = replicas.Promote(ctx, srcShareClient, replica.ID, replicas.PromoteOpts{}).ExtractErr() 308 | if err != nil { 309 | return nil, fmt.Errorf("failed to promote a %q share replica: %s", replica.ID, err) 310 | } 311 | replica, err = waitForShareReplicaState(ctx, srcShareClient, replica.ID, "active", waitForShareReplicaSec) 312 | if err != nil { 313 | return nil, fmt.Errorf("failed to wait for a %q share replica state: %s", replica.ID, err) 314 | } 315 | 316 | // checking the expected share AZ 317 | newShare, err := waitForShare(ctx, srcShareClient, srcShare.ID, waitForShareSec) 318 | if err != nil { 319 | return nil, fmt.Errorf("failed to wait for a share: %s", err) 320 | } 321 | if newShare.AvailabilityZone != az { 322 | return nil, fmt.Errorf("the expected availability zone was not set") 323 | } 324 | 325 | // remove old replica 326 | if deleteOldReplica && oldReplica != nil { 327 | err = replicas.Delete(ctx, srcShareClient, oldReplica.ID).ExtractErr() 328 | if err != nil { 329 | return nil, fmt.Errorf("failed to delete an old %q replica: %s", oldReplica.ID, err) 330 | } 331 | } 332 | 333 | createShareSpeed(newShare) 334 | 335 | return newShare, nil 336 | } 337 | 338 | // ShareCmd represents the share command. 339 | var ShareCmd = &cobra.Command{ 340 | Use: "share ", 341 | Args: cobra.ExactArgs(1), 342 | Short: "Clone a share", 343 | PreRunE: func(cmd *cobra.Command, args []string) error { 344 | if err := parseTimeoutArgs(); err != nil { 345 | return err 346 | } 347 | return viper.BindPFlags(cmd.Flags()) 348 | }, 349 | RunE: func(cmd *cobra.Command, args []string) error { 350 | // migrate share 351 | 352 | share := args[0] 353 | 354 | toAZ := viper.GetString("to-az") 355 | toShareName := viper.GetString("to-share-name") 356 | toShareType := viper.GetString("to-share-type") 357 | toShareProto := viper.GetString("to-share-proto") 358 | toShareNetworkID := viper.GetString("to-share-network-id") 359 | 360 | // source and destination parameters 361 | loc, err := getSrcAndDst(toAZ) 362 | if err != nil { 363 | return err 364 | } 365 | 366 | // check the source and destination projects/regions 367 | if !loc.SameRegion || !loc.SameProject { 368 | return fmt.Errorf("shares can be copied only within the same OpenStack region and project") 369 | } 370 | 371 | srcProvider, err := newOpenStackClient(cmd.Context(), loc.Src) 372 | if err != nil { 373 | return fmt.Errorf("failed to create a source OpenStack client: %s", err) 374 | } 375 | 376 | srcShareClient, err := newSharedFileSystemV2Client(srcProvider, loc.Src.Region) 377 | if err != nil { 378 | return fmt.Errorf("failed to create source share client: %s", err) 379 | } 380 | 381 | // resolve share name to an ID 382 | if v, err := shares_utils.IDFromName(cmd.Context(), srcShareClient, share); err == nil { 383 | share = v 384 | } else if err, ok := err.(gophercloud.ErrMultipleResourcesFound); ok { 385 | return err 386 | } 387 | 388 | srcShare, err := waitForShare(cmd.Context(), srcShareClient, share, waitForShareSec) 389 | if err != nil { 390 | return fmt.Errorf("failed to wait for a %q share: %s", share, err) 391 | } 392 | 393 | err = checkShareAvailabilityZone(cmd.Context(), srcShareClient, srcShare.AvailabilityZone, &toAZ, &loc) 394 | if err != nil { 395 | return err 396 | } 397 | 398 | defer measureTime() 399 | 400 | dstShare, err := cloneShare(cmd.Context(), srcShareClient, srcShare, toShareName, toShareType, toShareProto, toShareNetworkID, toAZ) 401 | if err != nil { 402 | return err 403 | } 404 | 405 | log.Printf("Migrated target share name is %q (id: %q) to %q availability zone", dstShare.Name, dstShare.ID, dstShare.AvailabilityZone) 406 | 407 | return nil 408 | }, 409 | } 410 | 411 | // ShareMoveCmd represents the share move command. 412 | var ShareMoveCmd = &cobra.Command{ 413 | Use: "move ", 414 | Args: cobra.ExactArgs(1), 415 | Short: "Mova a share to a different availability zone", 416 | PreRunE: func(cmd *cobra.Command, args []string) error { 417 | if err := parseTimeoutArgs(); err != nil { 418 | return err 419 | } 420 | return viper.BindPFlags(cmd.Flags()) 421 | }, 422 | RunE: func(cmd *cobra.Command, args []string) error { 423 | // migrate share 424 | 425 | share := args[0] 426 | 427 | toAZ := viper.GetString("to-az") 428 | deleteOldReplica := viper.GetBool("delete-old-replica") 429 | toShareNetworkID := viper.GetString("to-share-network-id") 430 | 431 | // source and destination parameters 432 | loc, err := getSrcAndDst(toAZ) 433 | if err != nil { 434 | return err 435 | } 436 | 437 | // check the source and destination projects/regions 438 | if !loc.SameRegion || !loc.SameProject { 439 | return fmt.Errorf("shares can be copied only within the same OpenStack region and project") 440 | } 441 | 442 | srcProvider, err := newOpenStackClient(cmd.Context(), loc.Src) 443 | if err != nil { 444 | return fmt.Errorf("failed to create a source OpenStack client: %s", err) 445 | } 446 | 447 | srcShareClient, err := newSharedFileSystemV2Client(srcProvider, loc.Src.Region) 448 | if err != nil { 449 | return fmt.Errorf("failed to create source share client: %s", err) 450 | } 451 | 452 | // resolve share name to an ID 453 | if v, err := shares_utils.IDFromName(cmd.Context(), srcShareClient, share); err == nil { 454 | share = v 455 | } else if err, ok := err.(gophercloud.ErrMultipleResourcesFound); ok { 456 | return err 457 | } 458 | 459 | srcShare, err := waitForShare(cmd.Context(), srcShareClient, share, waitForShareSec) 460 | if err != nil { 461 | return fmt.Errorf("failed to wait for a %q share: %s", share, err) 462 | } 463 | 464 | err = checkShareAvailabilityZone(cmd.Context(), srcShareClient, srcShare.AvailabilityZone, &toAZ, &loc) 465 | if err != nil { 466 | return err 467 | } 468 | 469 | defer measureTime() 470 | 471 | dstShare, err := moveShare(cmd.Context(), srcShareClient, srcShare, toShareNetworkID, toAZ, deleteOldReplica) 472 | if err != nil { 473 | return err 474 | } 475 | 476 | log.Printf("Moved target share name is %q (id: %q) to %q availability zone", dstShare.Name, dstShare.ID, dstShare.AvailabilityZone) 477 | 478 | return nil 479 | }, 480 | } 481 | 482 | func init() { 483 | initShareCmdFlags() 484 | initShareMoveCmdFlags() 485 | RootCmd.AddCommand(ShareCmd) 486 | ShareCmd.AddCommand(ShareMoveCmd) 487 | } 488 | 489 | func initShareCmdFlags() { 490 | ShareCmd.Flags().StringP("to-az", "", "", "destination share availability zone") 491 | ShareCmd.Flags().StringP("to-share-name", "", "", "destination share name") 492 | ShareCmd.Flags().StringP("to-share-type", "", "", "destination share type") 493 | ShareCmd.Flags().StringP("to-share-proto", "", "", "destination share proto") 494 | ShareCmd.Flags().StringP("to-share-network-id", "", "", "destination share network ID") 495 | } 496 | 497 | func initShareMoveCmdFlags() { 498 | ShareMoveCmd.Flags().StringP("to-az", "", "", "destination share availability zone") 499 | ShareMoveCmd.Flags().StringP("to-share-network-id", "", "", "destination share network ID") 500 | ShareMoveCmd.Flags().BoolP("delete-old-replica", "", false, "delete old replica after moving a share (in case when there was one)") 501 | } 502 | -------------------------------------------------------------------------------- /pkg/utils.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "context" 5 | "crypto/tls" 6 | "fmt" 7 | "net/http" 8 | "os" 9 | "strings" 10 | "sync" 11 | "time" 12 | 13 | "github.com/gophercloud/gophercloud/v2" 14 | "github.com/gophercloud/gophercloud/v2/openstack" 15 | "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/availabilityzones" 16 | "github.com/gophercloud/gophercloud/v2/openstack/identity/v3/applicationcredentials" 17 | "github.com/gophercloud/gophercloud/v2/openstack/identity/v3/tokens" 18 | shareAZ "github.com/gophercloud/gophercloud/v2/openstack/sharedfilesystems/v2/availabilityzones" 19 | "github.com/gophercloud/utils/v2/client" 20 | "github.com/gophercloud/utils/v2/openstack/clientconfig" 21 | "github.com/spf13/viper" 22 | "golang.org/x/term" 23 | ) 24 | 25 | var ( 26 | startTime time.Time = time.Now() 27 | backoffFactor = 2 28 | backoffMaxInterval = 10 * time.Second 29 | ) 30 | 31 | func measureTime(caption ...string) { 32 | if len(caption) == 0 { 33 | log.Printf("Total execution time: %s", time.Since(startTime)) 34 | } else { 35 | log.Printf(caption[0], time.Since(startTime)) 36 | } 37 | } 38 | 39 | func newOpenStackClient(ctx context.Context, loc Location) (*gophercloud.ProviderClient, error) { 40 | envPrefix := "OS_" 41 | if loc.Origin == "dst" { 42 | envPrefix = "TO_OS_" 43 | } 44 | ao, err := clientconfig.AuthOptions(&clientconfig.ClientOpts{ 45 | EnvPrefix: envPrefix, 46 | AuthInfo: &clientconfig.AuthInfo{ 47 | AuthURL: loc.AuthURL, 48 | Username: loc.Username, 49 | Password: loc.Password, 50 | DomainName: loc.Domain, 51 | ProjectName: loc.Project, 52 | ApplicationCredentialID: loc.ApplicationCredentialID, 53 | ApplicationCredentialName: loc.ApplicationCredentialName, 54 | ApplicationCredentialSecret: loc.ApplicationCredentialSecret, 55 | Token: loc.Token, 56 | }, 57 | RegionName: loc.Region, 58 | }) 59 | if err != nil { 60 | return nil, err 61 | } 62 | 63 | // Could be long-running, therefore we need to be able to renew a token 64 | ao.AllowReauth = true 65 | 66 | /* TODO: Introduce auth by CLI parameters 67 | ao := gophercloud.AuthOptions{ 68 | IdentityEndpoint: authURL, 69 | UserID: userID, 70 | Username: username, 71 | Password: password, 72 | TenantID: tenantID, 73 | TenantName: tenantName, 74 | DomainID: domainID, 75 | DomainName: domainName, 76 | ApplicationCredentialID: applicationCredentialID, 77 | ApplicationCredentialName: applicationCredentialName, 78 | ApplicationCredentialSecret: applicationCredentialSecret, 79 | } 80 | */ 81 | 82 | provider, err := openstack.NewClient(ao.IdentityEndpoint) 83 | if err != nil { 84 | return nil, err 85 | } 86 | provider.UserAgent.Prepend("cyclone/" + Version) 87 | 88 | insecure := viper.GetBool("insecure") 89 | tlsConfig := &tls.Config{ 90 | InsecureSkipVerify: insecure, 91 | } 92 | 93 | // debug logger is enabled by default and writes logs into a cyclone temp dir 94 | provider.HTTPClient = http.Client{ 95 | Transport: &client.RoundTripper{ 96 | MaxRetries: 5, 97 | Rt: &http.Transport{ 98 | TLSClientConfig: tlsConfig, 99 | }, 100 | Logger: &logger{Prefix: loc.Origin}, 101 | }, 102 | } 103 | 104 | if ao.ApplicationCredentialSecret == "" && ao.TokenID == "" && 105 | ao.Username != "" && ao.Password == "" { 106 | fmt.Printf("Enter the %s password: ", loc.Origin) 107 | v, err := term.ReadPassword(int(os.Stdin.Fd())) 108 | if err != nil { 109 | return nil, err 110 | } 111 | ao.Password = string(v) 112 | } 113 | 114 | err = openstack.Authenticate(ctx, provider, *ao) 115 | if err != nil { 116 | return nil, err 117 | } 118 | 119 | if ao.TokenID != "" { 120 | // force application credential creation to allow further reauth 121 | log.Printf("Force %s application credential creation due to OpenStack Keystone token auth", loc.Origin) 122 | userID, err := getAuthUserID(provider) 123 | if err != nil { 124 | return nil, err 125 | } 126 | 127 | identityClient, err := openstack.NewIdentityV3(provider, gophercloud.EndpointOpts{ 128 | Region: loc.Region, 129 | }) 130 | if err != nil { 131 | return nil, fmt.Errorf("failed to create OpenStack Identity V3 client: %s", err) 132 | } 133 | 134 | acName := fmt.Sprintf("cyclone_%s_%s", time.Now().Format("20060102150405"), loc.Origin) 135 | createOpts := applicationcredentials.CreateOpts{ 136 | Name: acName, 137 | Description: "temp credentials for cyclone", 138 | // we need to be able to delete AC token within its own scope 139 | Unrestricted: true, 140 | } 141 | 142 | acWait := &sync.WaitGroup{} 143 | acWait.Add(1) 144 | var ac *applicationcredentials.ApplicationCredential 145 | cleanupFuncs = append(cleanupFuncs, func(wg *sync.WaitGroup) { 146 | defer wg.Done() 147 | log.Printf("Cleaning up %q application credential", acName) 148 | 149 | // wait for ac Create response 150 | acWait.Wait() 151 | if ac == nil { 152 | // nothing to delete 153 | return 154 | } 155 | 156 | if err := applicationcredentials.Delete(ctx, identityClient, userID, ac.ID).ExtractErr(); err != nil { 157 | if !gophercloud.ResponseCodeIs(err, http.StatusNotFound) { 158 | log.Printf("Failed to delete a %q temp application credential: %s", acName, err) 159 | } 160 | } 161 | }) 162 | ac, err = applicationcredentials.Create(ctx, identityClient, userID, createOpts).Extract() 163 | acWait.Done() 164 | if err != nil { 165 | if gophercloud.ResponseCodeIs(err, http.StatusNotFound) { 166 | err := err.(gophercloud.ErrUnexpectedResponseCode) 167 | return nil, fmt.Errorf("failed to create a temp application credential: %s", err.Body) 168 | } 169 | return nil, fmt.Errorf("failed to create a temp application credential: %s", err) 170 | } 171 | 172 | // set new auth options 173 | ao = &gophercloud.AuthOptions{ 174 | IdentityEndpoint: loc.AuthURL, 175 | ApplicationCredentialID: ac.ID, 176 | ApplicationCredentialSecret: ac.Secret, 177 | AllowReauth: true, 178 | } 179 | 180 | err = openstack.Authenticate(ctx, provider, *ao) 181 | if err != nil { 182 | return nil, fmt.Errorf("failed to auth using just created application credentials: %s", err) 183 | } 184 | } 185 | 186 | return provider, nil 187 | } 188 | 189 | func reauthClient(ctx context.Context, client *gophercloud.ServiceClient, funcName string) { 190 | // reauth the client before the long running action to avoid openstack internal auth issues 191 | if client.ReauthFunc != nil { 192 | if err := client.Reauthenticate(ctx, client.TokenID); err != nil { 193 | log.Printf("Failed to re-authenticate the provider client in the %s func: %v", err, funcName) 194 | } 195 | } 196 | } 197 | 198 | func newGlanceV2Client(provider *gophercloud.ProviderClient, region string) (*gophercloud.ServiceClient, error) { 199 | return openstack.NewImageV2(provider, gophercloud.EndpointOpts{ 200 | Region: region, 201 | }) 202 | } 203 | 204 | func newBlockStorageV3Client(provider *gophercloud.ProviderClient, region string) (*gophercloud.ServiceClient, error) { 205 | return openstack.NewBlockStorageV3(provider, gophercloud.EndpointOpts{ 206 | Region: region, 207 | }) 208 | } 209 | 210 | func newObjectStorageV1Client(provider *gophercloud.ProviderClient, region string) (*gophercloud.ServiceClient, error) { 211 | return openstack.NewObjectStorageV1(provider, gophercloud.EndpointOpts{ 212 | Region: region, 213 | }) 214 | } 215 | 216 | func newComputeV2Client(provider *gophercloud.ProviderClient, region string) (*gophercloud.ServiceClient, error) { 217 | return openstack.NewComputeV2(provider, gophercloud.EndpointOpts{ 218 | Region: region, 219 | }) 220 | } 221 | 222 | func newNetworkV2Client(provider *gophercloud.ProviderClient, region string) (*gophercloud.ServiceClient, error) { 223 | return openstack.NewNetworkV2(provider, gophercloud.EndpointOpts{ 224 | Region: region, 225 | }) 226 | } 227 | 228 | func newSharedFileSystemV2Client(provider *gophercloud.ProviderClient, region string) (*gophercloud.ServiceClient, error) { 229 | return openstack.NewSharedFileSystemV2(provider, gophercloud.EndpointOpts{ 230 | Region: region, 231 | }) 232 | } 233 | 234 | func newSecretManagerV1Client(provider *gophercloud.ProviderClient, region string) (*gophercloud.ServiceClient, error) { 235 | return openstack.NewKeyManagerV1(provider, gophercloud.EndpointOpts{ 236 | Region: region, 237 | }) 238 | } 239 | 240 | func checkAvailabilityZone(ctx context.Context, client *gophercloud.ServiceClient, srcAZ string, dstAZ *string, loc *Locations) error { 241 | if *dstAZ == "" { 242 | if strings.HasPrefix(srcAZ, loc.Dst.Region) { 243 | *dstAZ = srcAZ 244 | loc.SameAZ = true 245 | return nil 246 | } 247 | // use as a default 248 | return nil 249 | } 250 | 251 | if client == nil { 252 | return fmt.Errorf("no service client provided") 253 | } 254 | 255 | // check availability zone name 256 | allPages, err := availabilityzones.List(client).AllPages(ctx) 257 | if err != nil { 258 | return fmt.Errorf("error retrieving availability zones: %s", err) 259 | } 260 | zones, err := availabilityzones.ExtractAvailabilityZones(allPages) 261 | if err != nil { 262 | return fmt.Errorf("error extracting availability zones from response: %s", err) 263 | } 264 | 265 | var zonesNames []string 266 | var found bool 267 | for _, z := range zones { 268 | if z.ZoneState.Available { 269 | zonesNames = append(zonesNames, z.ZoneName) 270 | } 271 | if z.ZoneName == *dstAZ { 272 | found = true 273 | break 274 | } 275 | } 276 | if !found { 277 | return fmt.Errorf("failed to find %q availability zone, supported availability zones: %q", *dstAZ, zonesNames) 278 | } 279 | 280 | if srcAZ == *dstAZ { 281 | loc.SameAZ = true 282 | } 283 | 284 | return nil 285 | } 286 | 287 | func checkShareAvailabilityZone(ctx context.Context, client *gophercloud.ServiceClient, srcAZ string, dstAZ *string, loc *Locations) error { 288 | if *dstAZ == "" { 289 | if strings.HasPrefix(srcAZ, loc.Dst.Region) { 290 | *dstAZ = srcAZ 291 | loc.SameAZ = true 292 | return nil 293 | } 294 | // use as a default 295 | return nil 296 | } 297 | 298 | if client == nil { 299 | return fmt.Errorf("no service client provided") 300 | } 301 | 302 | // check availability zone name 303 | allPages, err := shareAZ.List(client).AllPages(ctx) 304 | if err != nil { 305 | return fmt.Errorf("error retrieving availability zones: %s", err) 306 | } 307 | zones, err := shareAZ.ExtractAvailabilityZones(allPages) 308 | if err != nil { 309 | return fmt.Errorf("error extracting availability zones from response: %s", err) 310 | } 311 | 312 | zonesNames := make([]string, 0, len(zones)) 313 | var found bool 314 | for _, z := range zones { 315 | zonesNames = append(zonesNames, z.Name) 316 | if z.Name == *dstAZ { 317 | found = true 318 | break 319 | } 320 | } 321 | if !found { 322 | return fmt.Errorf("failed to find %q availability zone, supported availability zones: %q", *dstAZ, zonesNames) 323 | } 324 | 325 | if srcAZ == *dstAZ { 326 | loc.SameAZ = true 327 | } 328 | 329 | return nil 330 | } 331 | 332 | func getAuthUserID(client *gophercloud.ProviderClient) (string, error) { 333 | if client == nil { 334 | return "", fmt.Errorf("provider client is nil") 335 | } 336 | r := client.GetAuthResult() 337 | if r == nil { 338 | return "", fmt.Errorf("provider client auth result is nil") 339 | } 340 | switch r := r.(type) { 341 | case tokens.CreateResult: 342 | v, err := r.ExtractUser() 343 | if err != nil { 344 | return "", err 345 | } 346 | return v.ID, nil 347 | case tokens.GetResult: 348 | v, err := r.ExtractUser() 349 | if err != nil { 350 | return "", err 351 | } 352 | return v.ID, nil 353 | default: 354 | return "", fmt.Errorf("got unexpected AuthResult type %t", r) 355 | } 356 | } 357 | 358 | func getAuthProjectID(client *gophercloud.ProviderClient) (string, error) { 359 | if client == nil { 360 | return "", fmt.Errorf("provider client is nil") 361 | } 362 | r := client.GetAuthResult() 363 | if r == nil { 364 | return "", fmt.Errorf("provider client auth result is nil") 365 | } 366 | switch r := r.(type) { 367 | case tokens.CreateResult: 368 | v, err := r.ExtractProject() 369 | if err != nil { 370 | return "", err 371 | } 372 | if v == nil { 373 | return "", nil 374 | } 375 | return v.ID, nil 376 | case tokens.GetResult: 377 | v, err := r.ExtractProject() 378 | if err != nil { 379 | return "", err 380 | } 381 | if v == nil { 382 | return "", nil 383 | } 384 | return v.ID, nil 385 | default: 386 | return "", fmt.Errorf("got unexpected AuthResult type %t", r) 387 | } 388 | } 389 | 390 | // isSliceContainsStr returns true if the string exists in given slice. 391 | func isSliceContainsStr(sl []string, str string) bool { 392 | for _, s := range sl { 393 | if s == str { 394 | return true 395 | } 396 | } 397 | return false 398 | } 399 | 400 | // Backoff options. 401 | type Backoff struct { 402 | Timeout int 403 | Factor int 404 | MaxInterval time.Duration 405 | } 406 | 407 | func NewBackoff(timeout int, factor int, maxInterval time.Duration) *Backoff { 408 | return &Backoff{ 409 | Timeout: timeout, 410 | Factor: factor, 411 | MaxInterval: maxInterval, 412 | } 413 | } 414 | 415 | // WaitFor method polls a predicate function, once per interval with an 416 | // arithmetic backoff, up to a timeout limit. This is an enhanced 417 | // gophercloud.WaitFor function with a logic from 418 | // https://github.com/sapcc/go-bits/blob/master/retry/pkg.go 419 | func (eb *Backoff) WaitFor(predicate func() (bool, error)) error { 420 | type WaitForResult struct { 421 | Success bool 422 | Error error 423 | } 424 | 425 | start := time.Now().Unix() 426 | duration := time.Second 427 | 428 | for { 429 | // If a timeout is set, and that's been exceeded, shut it down. 430 | if eb.Timeout >= 0 && time.Now().Unix()-start >= int64(eb.Timeout) { 431 | return fmt.Errorf("a timeout occurred") 432 | } 433 | 434 | duration += time.Second * time.Duration(eb.Factor) 435 | if duration > eb.MaxInterval { 436 | duration = eb.MaxInterval 437 | } 438 | time.Sleep(duration) 439 | 440 | var result WaitForResult 441 | ch := make(chan bool, 1) 442 | go func() { 443 | defer close(ch) 444 | satisfied, err := predicate() 445 | result.Success = satisfied 446 | result.Error = err 447 | }() 448 | 449 | select { 450 | case <-ch: 451 | if result.Error != nil { 452 | return result.Error 453 | } 454 | if result.Success { 455 | return nil 456 | } 457 | // If the predicate has not finished by the timeout, cancel it. 458 | case <-time.After(time.Duration(eb.Timeout) * time.Second): 459 | return fmt.Errorf("a timeout occurred") 460 | } 461 | } 462 | } 463 | 464 | // joinSkipEmpty helper joins only non empty strings. 465 | func joinSkipEmpty(sep string, args ...string) string { 466 | var a []string 467 | for _, s := range args { 468 | if s := strings.TrimSpace(s); s != "" { 469 | a = append(a, s) 470 | } 471 | } 472 | return strings.Join(a, sep) 473 | } 474 | -------------------------------------------------------------------------------- /pkg/version.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "fmt" 5 | "runtime" 6 | 7 | "github.com/spf13/cobra" 8 | ) 9 | 10 | var Version = "dev" 11 | 12 | var VersionCmd = &cobra.Command{ 13 | Use: "version", 14 | Short: "Print version information", 15 | DisableAutoGenTag: true, 16 | Run: func(cmd *cobra.Command, args []string) { 17 | fmt.Printf("cyclone %s compiled with %v on %v/%v\n", 18 | Version, runtime.Version(), runtime.GOOS, runtime.GOARCH) 19 | }, 20 | } 21 | 22 | func init() { 23 | RootCmd.AddCommand(VersionCmd) 24 | } 25 | -------------------------------------------------------------------------------- /pkg/volume.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/http" 7 | "strconv" 8 | "strings" 9 | "time" 10 | 11 | "github.com/gophercloud/gophercloud/v2" 12 | "github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/backups" 13 | "github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/snapshots" 14 | "github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/transfers" 15 | "github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes" 16 | "github.com/gophercloud/gophercloud/v2/openstack/image/v2/images" 17 | volumes_utils "github.com/gophercloud/utils/v2/openstack/blockstorage/v3/volumes" 18 | "github.com/spf13/cobra" 19 | "github.com/spf13/viper" 20 | ) 21 | 22 | var skipVolumeAttributes = []string{ 23 | "direct_url", 24 | "boot_roles", 25 | "os_hash_algo", 26 | "os_hash_value", 27 | "checksum", 28 | "size", 29 | "container_format", 30 | "disk_format", 31 | "image_id", 32 | // these integer values have to be set separately 33 | "min_disk", 34 | "min_ram", 35 | } 36 | 37 | var ( 38 | waitForVolumeSec float64 39 | waitForSnapshotSec float64 40 | ) 41 | 42 | var volumeNormalStatuses = []string{ 43 | "available", 44 | "in-use", 45 | } 46 | 47 | var snapshotNormalStatuses = []string{ 48 | "available", 49 | } 50 | 51 | func expandVolumeProperties(srcVolume *volumes.Volume) images.UpdateOpts { 52 | // set min_disk and min_ram from a source volume 53 | imgAttrUpdateOpts := images.UpdateOpts{ 54 | images.ReplaceImageMinDisk{NewMinDisk: srcVolume.Size}, 55 | } 56 | if s, ok := srcVolume.VolumeImageMetadata["min_ram"]; ok { 57 | if minRAM, err := strconv.Atoi(s); err == nil { 58 | imgAttrUpdateOpts = append(imgAttrUpdateOpts, images.ReplaceImageMinRam{NewMinRam: minRAM}) 59 | } else { 60 | log.Printf("Cannot convert %q to integer: %s", s, err) 61 | } 62 | } 63 | for key, value := range srcVolume.VolumeImageMetadata { 64 | if isSliceContainsStr(skipVolumeAttributes, key) || value == "" { 65 | continue 66 | } 67 | imgAttrUpdateOpts = append(imgAttrUpdateOpts, images.UpdateImageProperty{ 68 | Op: images.AddOp, 69 | Name: key, 70 | Value: value, 71 | }) 72 | } 73 | return imgAttrUpdateOpts 74 | } 75 | 76 | func createSnapshotSpeed(snapshot *snapshots.Snapshot) { 77 | t := snapshot.UpdatedAt.Sub(snapshot.CreatedAt) 78 | log.Printf("Time to create a snapshot: %s", t) 79 | size := float64(snapshot.Size * 1024) 80 | log.Printf("Size of the snapshot: %.2f Mb", size) 81 | log.Printf("Speed of the snapshot creation: %.2f Mb/sec", size/t.Seconds()) 82 | } 83 | 84 | func waitForSnapshot(ctx context.Context, client *gophercloud.ServiceClient, id string, secs float64) (*snapshots.Snapshot, error) { 85 | var snapshot *snapshots.Snapshot 86 | var err error 87 | err = NewBackoff(int(secs), backoffFactor, backoffMaxInterval).WaitFor(func() (bool, error) { 88 | snapshot, err = snapshots.Get(ctx, client, id).Extract() 89 | if err != nil { 90 | return false, err 91 | } 92 | 93 | log.Printf("Intermediate snapshot status: %s", snapshot.Status) 94 | if isSliceContainsStr(snapshotNormalStatuses, snapshot.Status) { 95 | return true, nil 96 | } 97 | 98 | if strings.Contains(snapshot.Status, "error") { 99 | return false, fmt.Errorf("intermediate snapshot status is %q", snapshot.Status) 100 | } 101 | 102 | // continue status checks 103 | return false, nil 104 | }) 105 | 106 | return snapshot, err 107 | } 108 | 109 | func createVolumeSpeed(volume *volumes.Volume) { 110 | // cinder doesn't update the UpdatedAt attribute, when the volume status is updated 111 | t := time.Since(volume.CreatedAt) 112 | log.Printf("Time to create a volume: %s", t) 113 | size := float64(volume.Size * 1024) 114 | log.Printf("Size of the volume: %.2f Mb", size) 115 | log.Printf("Speed of the volume creation: %.2f Mb/sec", size/t.Seconds()) 116 | } 117 | 118 | func waitForVolume(ctx context.Context, client *gophercloud.ServiceClient, id string, secs float64) (*volumes.Volume, error) { 119 | var volume *volumes.Volume 120 | var err error 121 | err = NewBackoff(int(secs), backoffFactor, backoffMaxInterval).WaitFor(func() (bool, error) { 122 | volume, err = volumes.Get(ctx, client, id).Extract() 123 | if err != nil { 124 | return false, err 125 | } 126 | 127 | log.Printf("Volume status: %s", volume.Status) 128 | // TODO: specify target states in func params 129 | if isSliceContainsStr(volumeNormalStatuses, volume.Status) { 130 | return true, nil 131 | } 132 | 133 | if strings.Contains(volume.Status, "error") { 134 | return false, fmt.Errorf("volume status is %q", volume.Status) 135 | } 136 | 137 | // continue status checks 138 | return false, nil 139 | }) 140 | 141 | return volume, err 142 | } 143 | 144 | func cloneVolume(ctx context.Context, srcVolumeClient, srcObjectClient *gophercloud.ServiceClient, srcVolume *volumes.Volume, name, az string, cloneViaSnapshot bool, loc Locations) (*volumes.Volume, error) { 145 | volOpts := volumes.CreateOpts{ 146 | Name: name, 147 | Size: srcVolume.Size, 148 | Description: fmt.Sprintf("clone of the %q volume", srcVolume.ID), 149 | VolumeType: srcVolume.VolumeType, 150 | } 151 | 152 | reauthClient(ctx, srcVolumeClient, "cloneVolume") 153 | 154 | if cloneViaSnapshot { 155 | // clone via snapshot using cinder storage, because it was explicitly set 156 | log.Printf("Cloning a %q volume using volume snapshot", srcVolume.ID) 157 | 158 | snapshotOpts := snapshots.CreateOpts{ 159 | VolumeID: srcVolume.ID, 160 | Description: fmt.Sprintf("Transition snapshot to clone a %q volume", srcVolume.ID), 161 | Metadata: srcVolume.VolumeImageMetadata, 162 | Force: true, 163 | } 164 | srcSnapshot, err := snapshots.Create(ctx, srcVolumeClient, snapshotOpts).Extract() 165 | if err != nil { 166 | return nil, fmt.Errorf("failed to create a source volume snapshot: %s", err) 167 | } 168 | log.Printf("Intermediate snapshot %q created", srcSnapshot.ID) 169 | 170 | defer func() { 171 | if err := snapshots.Delete(ctx, srcVolumeClient, srcSnapshot.ID).ExtractErr(); err != nil { 172 | log.Printf("Failed to delete a transition snapshot: %s", err) 173 | } 174 | }() 175 | 176 | srcSnapshot, err = waitForSnapshot(ctx, srcVolumeClient, srcSnapshot.ID, waitForSnapshotSec) 177 | if err != nil { 178 | return nil, fmt.Errorf("failed to wait for a snapshot: %s", err) 179 | } 180 | 181 | createSnapshotSpeed(srcSnapshot) 182 | 183 | volOpts.SnapshotID = srcSnapshot.ID 184 | } else { 185 | if !loc.SameRegion || loc.SameAZ { 186 | // clone the volume directly, because we don't care about the availability zone 187 | volOpts.SourceVolID = srcVolume.ID 188 | } else { 189 | // clone via backup using swift storage 190 | 191 | // save initial microversion 192 | mv := srcVolumeClient.Microversion 193 | srcVolumeClient.Microversion = "3.47" 194 | 195 | defer func() { 196 | // restore initial microversion 197 | srcVolumeClient.Microversion = mv 198 | }() 199 | 200 | backupOpts := backups.CreateOpts{ 201 | VolumeID: srcVolume.ID, 202 | Description: fmt.Sprintf("Transition backup to clone a %q volume", srcVolume.ID), 203 | Container: fmt.Sprintf("%s_%d", srcVolume.ID, time.Now().Unix()), 204 | Force: true, 205 | } 206 | srcBackup, err := backups.Create(ctx, srcVolumeClient, backupOpts).Extract() 207 | if err != nil { 208 | return nil, fmt.Errorf("failed to create a source volume backup: %s", err) 209 | } 210 | log.Printf("Intermediate backup %q created", srcBackup.ID) 211 | 212 | defer func() { 213 | if err := backups.Delete(ctx, srcVolumeClient, srcBackup.ID).ExtractErr(); err != nil { 214 | log.Printf("failed to delete a transition backup: %s", err) 215 | } 216 | }() 217 | 218 | srcBackup, err = waitForBackup(ctx, srcVolumeClient, srcBackup.ID, waitForBackupSec) 219 | if err != nil { 220 | return nil, fmt.Errorf("failed to wait for a backup: %s", err) 221 | } 222 | 223 | createBackupSpeed(ctx, srcObjectClient, srcBackup) 224 | 225 | // restoring a volume backup supports non-original availability zone 226 | volOpts.AvailabilityZone = az 227 | volOpts.BackupID = srcBackup.ID 228 | } 229 | } 230 | 231 | reauthClient(ctx, srcVolumeClient, "cloneVolume") 232 | 233 | var newVolume *volumes.Volume 234 | var err error 235 | newVolume, err = volumes.Create(ctx, srcVolumeClient, volOpts, nil).Extract() 236 | if err != nil { 237 | if volOpts.SnapshotID != "" { 238 | return nil, fmt.Errorf("failed to create a source volume from a snapshot: %s", err) 239 | } 240 | if volOpts.SourceVolID != "" { 241 | return nil, fmt.Errorf("failed to create a volume clone: %s", err) 242 | } 243 | return nil, fmt.Errorf("failed to create a source volume from a backup: %s", err) 244 | } 245 | 246 | newVolumeID := newVolume.ID 247 | defer func() { 248 | if err != nil { 249 | if err := volumes.Delete(ctx, srcVolumeClient, newVolumeID, nil).ExtractErr(); err != nil { 250 | log.Printf("Failed to delete a cloned volume: %s", err) 251 | } 252 | } 253 | }() 254 | 255 | newVolume, err = waitForVolume(ctx, srcVolumeClient, newVolume.ID, waitForVolumeSec) 256 | if err != nil { 257 | return nil, fmt.Errorf("failed to wait for a volume: %s", err) 258 | } 259 | 260 | createVolumeSpeed(newVolume) 261 | 262 | return newVolume, nil 263 | } 264 | 265 | func volumeToImage(ctx context.Context, srcImageClient, srcVolumeClient, srcObjectClient *gophercloud.ServiceClient, imageName string, srcVolume *volumes.Volume) (*images.Image, error) { 266 | createSrcImage := volumes.UploadImageOpts{ 267 | ContainerFormat: viper.GetString("container-format"), 268 | DiskFormat: viper.GetString("disk-format"), 269 | Visibility: string(images.ImageVisibilityPrivate), 270 | // for some reason this doesn't work, when volume status is in-use 271 | Force: true, 272 | } 273 | 274 | if imageName != "" { 275 | createSrcImage.ImageName = imageName 276 | } else if v, ok := srcVolume.VolumeImageMetadata["image_name"]; ok && v != "" { 277 | // preserve source image name 278 | createSrcImage.ImageName = v 279 | } else { 280 | createSrcImage.ImageName = srcVolume.ID 281 | } 282 | 283 | // preserve source container format 284 | if v, ok := srcVolume.VolumeImageMetadata["container_format"]; ok && v != "" { 285 | createSrcImage.ContainerFormat = v 286 | } 287 | 288 | // preserve source disk format 289 | if v, ok := srcVolume.VolumeImageMetadata["disk_format"]; ok && v != "" { 290 | createSrcImage.DiskFormat = v 291 | } 292 | 293 | reauthClient(ctx, srcVolumeClient, "volumeToImage") 294 | 295 | srcVolumeClient.Microversion = "3.1" // required to set the image visibility 296 | var srcVolumeImage volumes.VolumeImage 297 | srcVolumeImage, err := volumes.UploadImage(ctx, srcVolumeClient, srcVolume.ID, createSrcImage).Extract() 298 | if err != nil { 299 | return nil, fmt.Errorf("failed to convert a source volume to an image: %s", err) 300 | } 301 | 302 | defer func() { 303 | if err != nil { 304 | log.Printf("Removing transition image %q", srcVolumeImage.ImageID) 305 | if err := images.Delete(ctx, srcImageClient, srcVolumeImage.ImageID).ExtractErr(); err != nil { 306 | log.Printf("Failed to delete transition image: %s", err) 307 | } 308 | } 309 | }() 310 | 311 | var srcImage *images.Image 312 | srcImage, err = waitForImage(ctx, srcImageClient, srcObjectClient, srcVolumeImage.ImageID, 0, waitForImageSec) 313 | if err != nil { 314 | return nil, fmt.Errorf("failed to convert a volume to an image: %s", err) 315 | } 316 | 317 | log.Printf("Created %q image", srcImage.ID) 318 | 319 | createImageSpeed(srcImage) 320 | 321 | // sometimes volume can be still in uploading state 322 | if _, err := waitForVolume(ctx, srcVolumeClient, srcVolume.ID, waitForVolumeSec); err != nil { 323 | // in this case end user can continue the image migration afterwards 324 | return nil, fmt.Errorf("failed to wait for a cloned volume available status: %s", err) 325 | } 326 | 327 | log.Printf("Updating image options") 328 | updateProperties := expandVolumeProperties(srcVolume) 329 | srcImage, err = images.Update(ctx, srcImageClient, srcVolumeImage.ImageID, updateProperties).Extract() 330 | if err != nil { 331 | return nil, fmt.Errorf("failed to update a transition image properties: %s", err) 332 | } 333 | 334 | log.Printf("Updated %q image", srcImage.ID) 335 | 336 | return srcImage, nil 337 | } 338 | 339 | func migrateVolume(ctx context.Context, srcImageClient, srcVolumeClient, srcObjectClient, dstImageClient, dstVolumeClient, dstObjectClient *gophercloud.ServiceClient, srcVolume *volumes.Volume, toVolumeName string, toVolumeType, az string, cloneViaSnapshot bool, loc Locations) (*volumes.Volume, error) { 340 | newVolume, err := cloneVolume(ctx, srcVolumeClient, srcObjectClient, srcVolume, toVolumeName, az, cloneViaSnapshot, loc) 341 | if err != nil { 342 | return nil, err 343 | } 344 | 345 | // volume was cloned, now it requires a migration 346 | srcVolume = newVolume 347 | 348 | if loc.SameAZ || 349 | srcVolume.AvailabilityZone == az { // a volume was cloned via backup 350 | if loc.SameProject { 351 | // we're done 352 | return srcVolume, nil 353 | } 354 | 355 | // just change volume ownership 356 | // don't remove the source volume in case or err, because customer may 357 | // transfer the cloned volume afterwards 358 | return transferVolume(ctx, srcVolumeClient, dstVolumeClient, srcVolume) 359 | } 360 | 361 | defer func() { 362 | // it is safe to remove the cloned volume on exit 363 | if err := volumes.Delete(ctx, srcVolumeClient, srcVolume.ID, nil).ExtractErr(); err != nil { 364 | // it is fine, when the volume was already removed. 365 | if !gophercloud.ResponseCodeIs(err, http.StatusNotFound) { 366 | log.Printf("failed to delete a cloned volume: %s", err) 367 | } 368 | } 369 | }() 370 | 371 | // converting a volume to an image 372 | srcImage, err := volumeToImage(ctx, srcImageClient, srcVolumeClient, srcObjectClient, "", srcVolume) 373 | if err != nil { 374 | return nil, err 375 | } 376 | 377 | // 378 | volumeName := srcVolume.Name 379 | if toVolumeName != "" { 380 | volumeName = toVolumeName 381 | } 382 | volumeType := srcVolume.VolumeType 383 | if toVolumeType != "" { 384 | volumeType = toVolumeType 385 | } 386 | 387 | defer func() { 388 | // remove source region transition image 389 | if err := images.Delete(ctx, srcImageClient, srcImage.ID).ExtractErr(); err != nil { 390 | log.Printf("Failed to delete destination transition image: %s", err) 391 | } 392 | }() 393 | 394 | if !loc.SameRegion { 395 | // migrate the image/volume within different regions 396 | dstImage, err := migrateImage(ctx, srcImageClient, dstImageClient, srcObjectClient, dstObjectClient, srcImage, srcImage.Name) 397 | if err != nil { 398 | return nil, fmt.Errorf("failed to migrate the image: %s", err) 399 | } 400 | defer func() { 401 | // remove destination region transition image 402 | if err := images.Delete(ctx, dstImageClient, dstImage.ID).ExtractErr(); err != nil { 403 | log.Printf("Failed to delete destination transition image: %s", err) 404 | } 405 | }() 406 | return imageToVolume(ctx, dstVolumeClient, dstImageClient, dstImage.ID, volumeName, srcVolume.Description, volumeType, az, srcVolume.Size, srcVolume) 407 | } 408 | 409 | // migrate the image/volume within the same region 410 | dstVolume, err := imageToVolume(ctx, srcVolumeClient, srcImageClient, srcImage.ID, volumeName, srcVolume.Description, volumeType, az, srcVolume.Size, srcVolume) 411 | if err != nil { 412 | return nil, err 413 | } 414 | 415 | if loc.SameProject { 416 | // we're done 417 | return dstVolume, nil 418 | } 419 | 420 | return transferVolume(ctx, srcVolumeClient, dstVolumeClient, dstVolume) 421 | } 422 | 423 | func imageToVolume(ctx context.Context, imgToVolClient, imgDstClient *gophercloud.ServiceClient, imageID, volumeName, volumeDescription, volumeType, az string, volumeSize int, srcVolume *volumes.Volume) (*volumes.Volume, error) { 424 | reauthClient(ctx, imgToVolClient, "imageToVolume") 425 | 426 | dstVolumeCreateOpts := volumes.CreateOpts{ 427 | Size: volumeSize, 428 | Name: volumeName, 429 | Description: volumeDescription, 430 | AvailabilityZone: az, 431 | ImageID: imageID, 432 | VolumeType: volumeType, 433 | } 434 | dstVolume, err := volumes.Create(ctx, imgToVolClient, dstVolumeCreateOpts, nil).Extract() 435 | if err != nil { 436 | return nil, fmt.Errorf("failed to create a destination volume: %s", err) 437 | } 438 | 439 | dstVolume, err = waitForVolume(ctx, imgToVolClient, dstVolume.ID, waitForVolumeSec) 440 | if err != nil { 441 | // TODO: delete volume? 442 | return nil, err 443 | } 444 | 445 | if srcVolume != nil && srcVolume.Bootable != "" && dstVolume.Bootable != srcVolume.Bootable { 446 | // when a non-bootable volume is created from a Glance image, it has a bootable flag set 447 | v, err := strconv.ParseBool(srcVolume.Bootable) 448 | if err != nil { 449 | log.Printf("Failed to parse %s to bool: %s", srcVolume.Bootable, err) 450 | } else { 451 | bootableOpts := volumes.BootableOpts{ 452 | Bootable: v, 453 | } 454 | err = volumes.SetBootable(ctx, imgToVolClient, dstVolume.ID, bootableOpts).ExtractErr() 455 | if err != nil { 456 | log.Printf("Failed to update volume bootable options: %s", err) 457 | } 458 | } 459 | } 460 | 461 | createVolumeSpeed(dstVolume) 462 | 463 | // image can still be in "TODO" state, we need to wait for "available" before defer func will delete it 464 | _, err = waitForImage(ctx, imgDstClient, nil, imageID, 0, waitForImageSec) 465 | if err != nil { 466 | // TODO: delete volume? 467 | return nil, err 468 | } 469 | 470 | return dstVolume, nil 471 | } 472 | 473 | func transferVolume(ctx context.Context, srcVolumeClient, dstVolumeClient *gophercloud.ServiceClient, srcVolume *volumes.Volume) (*volumes.Volume, error) { 474 | // change volume ownership 475 | transferOpts := transfers.CreateOpts{ 476 | VolumeID: srcVolume.ID, 477 | } 478 | transfer, err := transfers.Create(ctx, srcVolumeClient, transferOpts).Extract() 479 | if err != nil { 480 | return nil, fmt.Errorf("failed to create a %q volume transfer request: %s", srcVolume.ID, err) 481 | } 482 | 483 | _, err = transfers.Accept(ctx, dstVolumeClient, transfer.ID, transfers.AcceptOpts{AuthKey: transfer.AuthKey}).Extract() 484 | if err != nil { 485 | if err := transfers.Delete(ctx, srcVolumeClient, transfer.ID).ExtractErr(); err != nil { 486 | log.Printf("Failed to delete a %q volume transfer request: %s", srcVolume.ID, err) 487 | } 488 | return nil, fmt.Errorf("failed to accept a %q volume transfer request: %s", srcVolume.ID, err) 489 | } 490 | 491 | return srcVolume, nil 492 | } 493 | 494 | // VolumeCmd represents the volume command. 495 | var VolumeCmd = &cobra.Command{ 496 | Use: "volume ", 497 | Args: cobra.ExactArgs(1), 498 | Short: "Clone a volume", 499 | PreRunE: func(cmd *cobra.Command, args []string) error { 500 | if err := parseTimeoutArgs(); err != nil { 501 | return err 502 | } 503 | imageWebDownload = viper.GetBool("image-web-download") 504 | return viper.BindPFlags(cmd.Flags()) 505 | }, 506 | RunE: func(cmd *cobra.Command, args []string) error { 507 | // migrate volume 508 | 509 | volume := args[0] 510 | 511 | toAZ := viper.GetString("to-az") 512 | toVolumeName := viper.GetString("to-volume-name") 513 | toVolumeType := viper.GetString("to-volume-type") 514 | cloneViaSnapshot := viper.GetBool("clone-via-snapshot") 515 | 516 | // source and destination parameters 517 | loc, err := getSrcAndDst(toAZ) 518 | if err != nil { 519 | return err 520 | } 521 | 522 | srcProvider, err := newOpenStackClient(cmd.Context(), loc.Src) 523 | if err != nil { 524 | return fmt.Errorf("failed to create a source OpenStack client: %s", err) 525 | } 526 | 527 | srcImageClient, err := newGlanceV2Client(srcProvider, loc.Src.Region) 528 | if err != nil { 529 | return fmt.Errorf("failed to create source image client: %s", err) 530 | } 531 | 532 | srcVolumeClient, err := newBlockStorageV3Client(srcProvider, loc.Src.Region) 533 | if err != nil { 534 | return fmt.Errorf("failed to create source volume client: %s", err) 535 | } 536 | 537 | var srcObjectClient *gophercloud.ServiceClient 538 | if imageWebDownload { 539 | srcObjectClient, err = newObjectStorageV1Client(srcProvider, loc.Src.Region) 540 | if err != nil { 541 | return fmt.Errorf("failed to create source object storage client: %s", err) 542 | } 543 | } 544 | 545 | // resolve volume name to an ID 546 | if v, err := volumes_utils.IDFromName(cmd.Context(), srcVolumeClient, volume); err == nil { 547 | volume = v 548 | } else if err, ok := err.(gophercloud.ErrMultipleResourcesFound); ok { 549 | return err 550 | } 551 | 552 | dstProvider, err := newOpenStackClient(cmd.Context(), loc.Dst) 553 | if err != nil { 554 | return fmt.Errorf("failed to create a destination OpenStack client: %s", err) 555 | } 556 | 557 | dstImageClient, err := newGlanceV2Client(dstProvider, loc.Dst.Region) 558 | if err != nil { 559 | return fmt.Errorf("failed to create destination image client: %s", err) 560 | } 561 | 562 | dstVolumeClient, err := newBlockStorageV3Client(dstProvider, loc.Dst.Region) 563 | if err != nil { 564 | return fmt.Errorf("failed to create destination volume client: %s", err) 565 | } 566 | 567 | dstObjectClient, err := newObjectStorageV1Client(dstProvider, loc.Dst.Region) 568 | if err != nil { 569 | log.Printf("failed to create destination object storage client, detailed image clone statistics will be unavailable: %s", err) 570 | } 571 | 572 | srcVolume, err := waitForVolume(cmd.Context(), srcVolumeClient, volume, waitForVolumeSec) 573 | if err != nil { 574 | return fmt.Errorf("failed to wait for a %q volume: %s", volume, err) 575 | } 576 | 577 | err = checkAvailabilityZone(cmd.Context(), dstVolumeClient, srcVolume.AvailabilityZone, &toAZ, &loc) 578 | if err != nil { 579 | return err 580 | } 581 | 582 | defer measureTime() 583 | 584 | dstVolume, err := migrateVolume(cmd.Context(), srcImageClient, srcVolumeClient, srcObjectClient, dstImageClient, dstVolumeClient, dstObjectClient, srcVolume, toVolumeName, toVolumeType, toAZ, cloneViaSnapshot, loc) 585 | if err != nil { 586 | return err 587 | } 588 | 589 | log.Printf("Migrated target volume name is %q (id: %q) to %q availability zone", dstVolume.Name, dstVolume.ID, dstVolume.AvailabilityZone) 590 | 591 | return nil 592 | }, 593 | } 594 | 595 | // VolumeToImageCmd represents the volume command. 596 | var VolumeToImageCmd = &cobra.Command{ 597 | Use: "to-image ", 598 | Args: cobra.ExactArgs(1), 599 | Short: "Upload a volume to an image", 600 | PreRunE: func(cmd *cobra.Command, args []string) error { 601 | if err := parseTimeoutArgs(); err != nil { 602 | return err 603 | } 604 | return viper.BindPFlags(cmd.Flags()) 605 | }, 606 | RunE: func(cmd *cobra.Command, args []string) error { 607 | // convert a volume to an image 608 | 609 | volume := args[0] 610 | 611 | toImageName := viper.GetString("to-image-name") 612 | cloneViaSnapshot := viper.GetBool("clone-via-snapshot") 613 | 614 | // source and destination parameters 615 | loc, err := getSrcAndDst("") 616 | if err != nil { 617 | return err 618 | } 619 | 620 | srcProvider, err := newOpenStackClient(cmd.Context(), loc.Src) 621 | if err != nil { 622 | return fmt.Errorf("failed to create a source OpenStack client: %s", err) 623 | } 624 | 625 | srcImageClient, err := newGlanceV2Client(srcProvider, loc.Src.Region) 626 | if err != nil { 627 | return fmt.Errorf("failed to create source image client: %s", err) 628 | } 629 | 630 | srcVolumeClient, err := newBlockStorageV3Client(srcProvider, loc.Src.Region) 631 | if err != nil { 632 | return fmt.Errorf("failed to create source volume client: %s", err) 633 | } 634 | 635 | srcObjectClient, err := newObjectStorageV1Client(srcProvider, loc.Src.Region) 636 | if err != nil { 637 | return fmt.Errorf("failed to create source object storage client: %s", err) 638 | } 639 | 640 | // resolve volume name to an ID 641 | if v, err := volumes_utils.IDFromName(cmd.Context(), srcVolumeClient, volume); err == nil { 642 | volume = v 643 | } else if err, ok := err.(gophercloud.ErrMultipleResourcesFound); ok { 644 | return err 645 | } 646 | 647 | srcVolume, err := waitForVolume(cmd.Context(), srcVolumeClient, volume, waitForVolumeSec) 648 | if err != nil { 649 | return fmt.Errorf("failed to wait for a %q volume: %s", volume, err) 650 | } 651 | 652 | var toAZ string 653 | err = checkAvailabilityZone(cmd.Context(), nil, srcVolume.AvailabilityZone, &toAZ, &loc) 654 | if err != nil { 655 | return err 656 | } 657 | 658 | defer measureTime() 659 | 660 | if srcVolume.Status == "in-use" { 661 | // clone the "in-use" volume 662 | newVolume, err := cloneVolume(cmd.Context(), srcVolumeClient, srcObjectClient, srcVolume, "", toAZ, cloneViaSnapshot, loc) 663 | if err != nil { 664 | return err 665 | } 666 | 667 | defer func() { 668 | if err := volumes.Delete(cmd.Context(), srcVolumeClient, newVolume.ID, nil).ExtractErr(); err != nil { 669 | log.Printf("Failed to delete a cloned volume: %s", err) 670 | } 671 | }() 672 | 673 | // volume was cloned, now we can safely convert it to a volume 674 | srcVolume = newVolume 675 | } 676 | 677 | dstImage, err := volumeToImage(cmd.Context(), srcImageClient, srcVolumeClient, srcObjectClient, toImageName, srcVolume) 678 | if err != nil { 679 | return err 680 | } 681 | 682 | log.Printf("Target image name is %q (id: %q)", dstImage.Name, dstImage.ID) 683 | 684 | return nil 685 | }, 686 | } 687 | 688 | func init() { 689 | initVolumeCmdFlags() 690 | VolumeCmd.AddCommand(VolumeToImageCmd) 691 | RootCmd.AddCommand(VolumeCmd) 692 | } 693 | 694 | func initVolumeCmdFlags() { 695 | VolumeCmd.Flags().StringP("to-az", "", "", "destination volume availability zone") 696 | VolumeCmd.Flags().StringP("to-volume-name", "", "", "destination volume name") 697 | VolumeCmd.Flags().StringP("to-volume-type", "", "", "destination volume type") 698 | VolumeCmd.Flags().StringP("container-format", "", "bare", "image container format, when source volume doesn't have this info") 699 | VolumeCmd.Flags().StringP("disk-format", "", "vmdk", "image disk format, when source volume doesn't have this info") 700 | VolumeCmd.Flags().BoolP("clone-via-snapshot", "", false, "clone a volume via snapshot") 701 | 702 | VolumeToImageCmd.Flags().StringP("container-format", "", "bare", "image container format, when source volume doesn't have this info") 703 | VolumeToImageCmd.Flags().StringP("disk-format", "", "vmdk", "image disk format, when source volume doesn't have this info") 704 | VolumeToImageCmd.Flags().BoolP("clone-via-snapshot", "", false, "clone a volume via snapshot") 705 | VolumeToImageCmd.Flags().StringP("to-image-name", "", "", "destination image name") 706 | } 707 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:recommended" 5 | ] 6 | } 7 | --------------------------------------------------------------------------------