├── .github ├── ISSUE_TEMPLATE │ ├── RFE.md │ └── bug-report.md └── workflows │ ├── go.yml │ ├── needs-triage.yaml │ ├── reconcile_gh_issue.yaml │ └── release.yml ├── .gitignore ├── Dockerfile ├── LICENSE ├── OWNERS ├── README.md ├── cmd ├── apply │ └── apply.go ├── export │ ├── cluster.go │ ├── discover.go │ └── export.go ├── plugin-manager │ ├── add │ │ └── add.go │ ├── list │ │ └── list.go │ ├── plugin-manager.go │ └── remove │ │ └── remove.go ├── runfn │ ├── runfn.go │ └── util.go ├── skopeo-sync-gen │ └── skopeo-sync-gen.go ├── transfer-pvc │ ├── README.md │ ├── progress.go │ ├── progress_test.go │ ├── transfer-pvc.go │ └── transfer-pvc_test.go ├── transform │ ├── listplugins │ │ └── listplugins.go │ ├── optionals │ │ └── optionals.go │ └── transform.go ├── tunnel-api │ └── tunnel-api.go └── version │ └── version.go ├── go.mod ├── go.sum ├── hack ├── minikube-clusters-delete.sh └── minikube-clusters-start.sh ├── internal ├── buildinfo │ └── buildinfo.go ├── file │ ├── file_helper.go │ └── file_helper_test.go ├── flags │ └── global_flags.go └── plugin │ ├── plugin_helper.go │ ├── plugin_manager_helper.go │ ├── plugin_manager_helper_test.go │ └── plugin_types.go └── main.go /.github/ISSUE_TEMPLATE/RFE.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Request for Enhancement (RFE) 3 | about: Request features and enhancements to crane using this template 4 | title: "[RFE] ..." 5 | labels: kind/feature 6 | 7 | --- 8 | 9 | **What is your persona?** 10 | 11 | Replace: Consider your role as an end-user so we can understand more about 12 | your concerns. Some examples: 13 | 14 | * I'm an application developer that is also responsible for the deployment of 15 | my application, but I do not have cluster administrative privilege within my 16 | multi-tenant cluster. 17 | * I'm an application admin that plays an SRE role, responsible for the health 18 | deployment of the application, but I am not necessarily a developer of the application 19 | itself. 20 | * I'm a cluster administrator with a high level of privilege. I'm 21 | not aware of individual application details or their concerns, but I'm responsible 22 | for the cluster as a whole. 23 | 24 | **What is your story / use case?** 25 | 26 | As a , I [want to do a thing], [so that I'm able to do...] 27 | 28 | What is your specific intent, not necessarily the feature you want, but what are 29 | you trying to achieve? 30 | 31 | How does your immediate need fit into your bigger picture? What's the overall 32 | benefit? 33 | 34 | **Do you have a suggestion for implementation?** 35 | 36 | It's important to decouple implementation from the story and use case itself, 37 | but do you have a proposed solution that would serve your needs? 38 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug-report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug Report 3 | about: Generic bug report template 4 | title: "[BUG] ..." 5 | labels: kind/bug 6 | 7 | --- 8 | 9 | **What version of crane are you running, and what are your clutsters+platform** 10 | 11 | **What did you expect to happen?** 12 | 13 | **What actually happened?** 14 | 15 | **Please include any relevant logs or errors** 16 | -------------------------------------------------------------------------------- /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | name: Build and test Go 2 | on: [push, pull_request] 3 | jobs: 4 | build: 5 | name: Build 6 | runs-on: ubuntu-latest 7 | steps: 8 | - name: Set up Go 1.21 9 | uses: actions/setup-go@v2 10 | with: 11 | go-version: 1.21 12 | 13 | - name: Check out source code 14 | uses: actions/checkout@v2 15 | with: 16 | path: main 17 | ref: ${{ github.event.pull_request.head.sha }} 18 | 19 | - name: Build 20 | env: 21 | GOPROXY: "https://proxy.golang.org" 22 | run: cd main && go build ./... 23 | 24 | - name: Test 25 | env: 26 | GOPROXY: "https://proxy.golang.org" 27 | run: cd main && go test -v ./... 28 | -------------------------------------------------------------------------------- /.github/workflows/needs-triage.yaml: -------------------------------------------------------------------------------- 1 | name: Needs Triage 2 | 3 | on: 4 | # At 0900 every Tues/Thursday 5 | #schedule: 6 | # - cron: "0 9 * * 2,4" 7 | workflow_dispatch: 8 | 9 | jobs: 10 | needs-triage: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Needs Triage 14 | id: create 15 | uses: konveyor/github-actions/gh-issue-slack-digest@main 16 | with: 17 | slackWebhookUrl: ${{ secrets.SLACK_WEBHOOK_URL }} 18 | messageHeaderPrefix: "Issues needing triage" 19 | hasLabels: "needs-triage" 20 | mentionUsers: "@mtrho-triage-team" 21 | -------------------------------------------------------------------------------- /.github/workflows/reconcile_gh_issue.yaml: -------------------------------------------------------------------------------- 1 | name: Reconcile GitHub Issue 2 | 3 | on: 4 | issues: 5 | types: 6 | - opened 7 | - edited 8 | - closed 9 | - reopened 10 | - labeled 11 | - unlabeled 12 | 13 | # This prevents potential race conditions by only allowing this action to handle 14 | # one update at a time for a given issue. 15 | concurrency: 16 | group: reconcile-issue-${{ github.event.issue.number }} 17 | cancel-in-progress: true 18 | 19 | jobs: 20 | reconcile-issue: 21 | runs-on: ubuntu-latest 22 | steps: 23 | - uses: actions/checkout@v3 24 | 25 | - name: needs-triage 26 | uses: konveyor/github-actions/require-matching-label@main 27 | with: 28 | missingComment: | 29 | This issue is currently awaiting triage. 30 | If contributors determine this is a relevant issue, they will accept it by applying the `triage/accepted` label and provide further guidance. 31 | The `triage/accepted` label can be added by org members by writing `/triage accepted` in a comment. 32 | missingLabel: "needs-triage" 33 | regexp: "^triage/accepted$" 34 | 35 | - name: needs-kind 36 | uses: konveyor/github-actions/require-matching-label@main 37 | with: 38 | missingLabel: "needs-kind" 39 | regexp: "^kind/" 40 | 41 | - name: needs-priority 42 | uses: konveyor/github-actions/require-matching-label@main 43 | with: 44 | missingLabel: "needs-priority" 45 | regexp: "^priority/" 46 | 47 | - name: Reconcile Issue 48 | id: create 49 | uses: konveyor/github-actions/reconcile-issue@main 50 | with: 51 | jiraBaseUrl: https://issues.redhat.com 52 | jiraToken: ${{ secrets.JIRA_API_TOKEN }} 53 | jiraProject: MTRHO 54 | requireMissingLabels: needs-triage, needs-kind, needs-priority 55 | additionalLabels: community 56 | addWatchers: ernelson@redhat.com 57 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Releases 2 | on: 3 | workflow_dispatch: 4 | inputs: 5 | version: 6 | description: Bump Version 7 | default: v1.0.0 8 | required: true 9 | jobs: 10 | build: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Set up Go 1.18 14 | uses: actions/setup-go@v2 15 | with: 16 | go-version: 1.18 17 | 18 | - name: Check out source code 19 | uses: actions/checkout@v2 20 | with: 21 | path: main 22 | ref: ${{ github.event.pull_request.head.sha }} 23 | - name: Build Linux AMD64 24 | env: 25 | GOPROXY: "https://proxy.golang.org" 26 | run: cd main && GOOS=linux GOARCH=amd64 go build -o bin/amd64-linux-crane-${{ github.event.inputs.version }} main.go 27 | - name: Build Darwin AMD64 28 | env: 29 | GOPROXY: "https://proxy.golang.org" 30 | run: cd main && GOOS=darwin GOARCH=amd64 go build -o bin/amd64-darwin-crane-${{ github.event.inputs.version }} main.go 31 | - name: Build Darwin ARM 32 | env: 33 | GOPROXY: "https://proxy.golang.org" 34 | run: cd main && GOOS=darwin GOARCH=arm64 go build -o bin/arm64-darwin-crane-${{ github.event.inputs.version }} main.go 35 | - name: release 36 | uses: ncipollo/release-action@v1 37 | with: 38 | artifacts: "main/bin/*" 39 | token: ${{ secrets.GITHUB_TOKEN }} 40 | tag: ${{ github.event.inputs.version }} -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | .vscode 14 | 15 | # Dependency directories 16 | vendor/ 17 | 18 | crane 19 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Build the manager binary 2 | FROM quay.io/konveyor/builder:latest as builder 3 | ENV GOFLAGS "-mod=mod" 4 | WORKDIR /go/src/github.com/konveyor/crane 5 | 6 | # Copy the Go Modules manifests 7 | COPY go.mod go.mod 8 | COPY go.sum go.sum 9 | # cache deps before building and copying source so that we don't need to re-download as much 10 | # and so that source changes don't invalidate our downloaded layer 11 | RUN go mod download 12 | COPY cmd/ cmd/ 13 | COPY internal/ internal/ 14 | COPY main.go main.go 15 | 16 | RUN go build -a -o /build/crane main.go 17 | 18 | FROM registry.access.redhat.com/ubi8-minimal 19 | COPY --from=builder /build/crane /usr/local/bin/crane 20 | 21 | ENTRYPOINT ["/usr/local/bin/crane"] 22 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | Copyright 2016 Red Hat, Inc. 180 | 181 | Licensed under the Apache License, Version 2.0 (the "License"); 182 | you may not use this file except in compliance with the License. 183 | You may obtain a copy of the License at 184 | 185 | http://www.apache.org/licenses/LICENSE-2.0 186 | 187 | Unless required by applicable law or agreed to in writing, software 188 | distributed under the License is distributed on an "AS IS" BASIS, 189 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 190 | See the License for the specific language governing permissions and 191 | limitations under the License. 192 | -------------------------------------------------------------------------------- /OWNERS: -------------------------------------------------------------------------------- 1 | reviewers: 2 | - jmontleon 3 | - shawn-hurley 4 | - pranavgaikwad 5 | - eriknelson 6 | - dymurray 7 | - sseago 8 | - rayfordj 9 | - jaydipgabani 10 | - djzager 11 | - hhpatel14 12 | 13 | approvers: 14 | - jmontleon 15 | - djzager 16 | - shawn-hurley 17 | - pranavgaikwad 18 | - eriknelson 19 | - jaydipgabani 20 | - sseago 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Crane 2 | 3 | ## Intro 4 | [Crane](https://konveyor.github.io/crane/overview/) is a migration tool under the [Konveyor](https://www.konveyor.io/) community that helps application owners migrate Kubernetes workloads and their state between clusters. 5 | 6 | ## YouTube Demo 7 | [![Alt text](https://img.youtube.com/vi/PoSivlgVLf8/0.jpg)](https://www.youtube.com/watch?v=PoSivlgVLf8) 8 | 9 | ## Overview 10 | 11 | Migrating an application between Kubernetes clusters may be more nuanced than one would imagine. In an ideal situation, this would be as simple as applying the YAML manifests to the new cluster and adjusting DNS records to redirect external traffic, yet often there is more that is needed. Below are a few of the common concerns that need to be addressed: 12 | * _YAML manifests_ - do we have the original YAML manifests stored in version control or accessible so we can reapply to the new cluster? 13 | * _Configuration Drift_ - if we do have the YAML manifests, do we have confidence they are still accurate and represent the application as it’s running in the cluster? Perhaps the application has been running for a period of time, been modified, and we no longer have confidence we can reproduce it exactly as it’s currently running. 14 | * _State_ - we may need to address persisted state that has been generated inside of the cluster, either small elements of state such as generated certificates stored in a Secret, data stored in Custom Resources, or gigabytes of data in persistent volumes. 15 | * _Customizations needed for new environment_ - we may be migrating across cloud vendors or environments that require transformations to the applications so they run in the new environment. 16 | 17 | Crane helps users do more than just handle a point in time migration of a workload, it is intended to help users adopt current best practices such as onboarding to GitOps by reconstructing redeployable YAML manifests from inspecting a running application. The project is the result of several years of experience performing large-scale production Kubernetes migrations and addressing the lessons learned. 18 | 19 | Crane follows the Unix philosophy of building small sharply focused tools that can be assembled in powerful ways. It is designed with transparency and ease-of-diagnostics in mind. It drives migration through a pipeline of non-destructive tasks that output results to disk so the operation can be easily audited and versioned without impacting live workloads. The tasks can be run repeatedly and will output consistent results given the same inputs without side-effects on the system at large. 20 | 21 | Crane is composed of several repositories: 22 | * [konveyor/crane](https://github.com/konveyor/crane): (this repo) The command line tool that migrates applications to the terminal. 23 | * [konveyor/crane-lib](https://github.com/konveyor/crane-lib): The brains behind Crane functionality responsible for transforming resources. 24 | * [konveyor/crane-plugins](https://github.com/konveyor/crane-plugins): Collection of plugins from the Konveyor community based on experience from performing Kube migrations. 25 | * [konveyor/crane-plugin-openshift](https://github.com/konveyor/crane-plugin-openshift): An optional plugin specifically tailored to manage OpenShift migration workloads and an example of a repeatable best-practice. 26 | * [backube/pvc-transfer](https://github.com/backube/pvc-transfer): The library that powers the Persistent Volume migration ability, shared with the [VolSync](https://volsync.readthedocs.io/en/stable/index.html) project. State migration of Persistent Volumes is handled by rsync allowing storage migrations between different storage classes. 27 | * [konveyor/crane-runner](https://github.com/konveyor/crane-runner): A collection of resources showing how to leverage Tekton to build migration workflows with Crane 28 | * [konveyor/crane-ui-plugin](https://github.com/konveyor/crane-ui-plugin): A dynamic UI plugin for the [openshift/console](https://github.com/openshift/console) 29 | * [konveyor/mtrho-operator](https://github.com/konveyor/mtrho-operator): An Operator which deploys Crane in an opinionated manner leveraging Tekton for migrating applications 30 | 31 | How does it work? Crane works by: 32 | 1) Inspecting a running application and exporting all associated resources 33 | 2) Leveraging a library of plugins to aid in transforming the exported manifests to yield redeployable manifests 34 | 3) Applying the transformed manifests into the destination cluster 35 | 4) Optionally orchestrating persistent state migrations 36 | 37 | ## Install 38 | 39 | * Obtain the `crane` binary from either: 40 | * Download a prebuilt release from https://github.com/konveyor/crane/releases 41 | * Clone this repo and build the crane binary via: `go build -o crane main.go` 42 | * Install the `crane` binary in your `$PATH` 43 | 44 | ## Usage Example 45 | 1. `$ kubectl create namespace guestbook` 46 | 1. `$ kubectl --namespace guestbook apply -k github.com/konveyor/crane-runner/examples/resources/guestbook` 47 | 1. `$ crane export -n guestbook` 48 | * Discovers and exports all resources in the 'guestbook' namespace 49 | * A directory 'export/resources/guestbook' is populated with the raw YAML content of each exported resource 50 | * Example: 51 | * `$ cat export/resources/guestbook/Secret_guestbook_builder-dockercfg-5ztj6.yaml` 52 | 53 | kind: Secret 54 | apiVersion: v1 55 | metadata: 56 | name: builder-dockercfg-5ztj6 57 | namespace: guestbook 58 | resourceVersion: "3213488" 59 | uid: 8fb75dcd-68b2-4939-bfb9-1c8241a7b146 60 | ... 61 | data: 62 | .dockercfg: < ...SNIP.... > 63 | 64 | 4. `$ crane transform` 65 | * A directory 'transform/resources/guestbook' is populated with 'transforms' which are JSONPatch content to be applied to each of the YAML files produced by the prior 'export' 66 | * Example: 67 | * `$ cat transform/resources/guestbook/transform-Secret_guestbook_builder-dockercfg-5ztj6.yaml` 68 | 69 | [{"op":"remove","path":"/metadata/uid"},{"op":"remove","path":"/metadata/resourceVersion"},{"op":"remove","path":"/metadata/creationTimestamp"}] 70 | 71 | * We can see that this transform is part of the standard Kubernetes plugin included with Crane and is configured to remove several fields from the 'metadata' section. 72 | 73 | 5. `$ crane apply` 74 | * A directory `output/resources/guestbook/` is populated with redeployable YAML content, this is the result of the 'export' content modified via the transforms produced by 'transform'. 75 | * Example: 76 | * `$ cat output/resources/guestbook/Secret_guestbook_builder-dockercfg-5ztj6.yaml` 77 | 78 | kind: Secret 79 | apiVersion: v1 80 | metadata: 81 | name: builder-dockercfg-5ztj6 82 | namespace: guestbook 83 | ... 84 | data: 85 | .dockercfg: < ...SNIP.... > 86 | 87 | * Note, that the fields 'metadata.uid', 'metadata.resourceVersion', and 'metadata.creationTimestamp' are removed from this YAML. 88 | 6. The content in the `output/resources/guestbook` directory is now ready to be used as needed, this could be redeployed to a new cluster or checked into Git to be leveraged with a GitOps solution. 89 | 90 | ## Further Examples 91 | 92 | Please see [konveyor/crane-runner/main/examples](https://github.com/konveyor/crane-runner/tree/main/examples#readme) for further scenarios to explore what can be done with Crane + Tekton for migrating applications. 93 | 94 | ## Known issues 95 | 96 | - v0.0.2 (alpha1) 97 | - The new-namespace optional arg (and associated functionality) in the 98 | built-in kubernetes plugin is incomplete. `metadata.namespace` will be 99 | modified, but other required changes will not be made. It will be 100 | removed from this plugin in the next release and expanded 101 | functionality will most likely be added via a separate (optional) 102 | plugin. -------------------------------------------------------------------------------- /cmd/apply/apply.go: -------------------------------------------------------------------------------- 1 | package apply 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "os" 7 | "path/filepath" 8 | 9 | "github.com/konveyor/crane-lib/apply" 10 | "github.com/konveyor/crane/internal/file" 11 | "github.com/konveyor/crane/internal/flags" 12 | "github.com/spf13/cobra" 13 | "github.com/spf13/viper" 14 | "sigs.k8s.io/yaml" 15 | ) 16 | 17 | type Options struct { 18 | // Two GlobalFlags struct fields are needed 19 | // 1. cobraGlobalFlags for explicit CLI args parsed by cobra 20 | // 2. globalFlags for the args merged with values from the viper config file 21 | cobraGlobalFlags *flags.GlobalFlags 22 | globalFlags *flags.GlobalFlags 23 | // Two Flags struct fields are needed 24 | // 1. cobraFlags for explicit CLI args parsed by cobra 25 | // 2. Flags for the args merged with values from the viper config file 26 | cobraFlags Flags 27 | Flags 28 | } 29 | 30 | type Flags struct { 31 | ExportDir string `mapstructure:"export-dir"` 32 | TransformDir string `mapstructure:"transform-dir"` 33 | OutputDir string `mapstructure:"output-dir"` 34 | } 35 | 36 | func (o *Options) Complete(c *cobra.Command, args []string) error { 37 | // TODO: @shawn-hurley 38 | return nil 39 | } 40 | 41 | func (o *Options) Validate() error { 42 | // TODO: @shawn-hurley 43 | return nil 44 | } 45 | 46 | func (o *Options) Run() error { 47 | return o.run() 48 | } 49 | 50 | func NewApplyCommand(f *flags.GlobalFlags) *cobra.Command { 51 | o := &Options{ 52 | cobraGlobalFlags: f, 53 | } 54 | cmd := &cobra.Command{ 55 | Use: "apply", 56 | Short: "Apply the transformations to the exported resources and save results in an output directory", 57 | RunE: func(c *cobra.Command, args []string) error { 58 | if err := o.Complete(c, args); err != nil { 59 | return err 60 | } 61 | if err := o.Validate(); err != nil { 62 | return err 63 | } 64 | if err := o.Run(); err != nil { 65 | return err 66 | } 67 | 68 | return nil 69 | }, 70 | PreRun: func(cmd *cobra.Command, args []string) { 71 | viper.BindPFlags(cmd.Flags()) 72 | viper.Unmarshal(&o.Flags) 73 | viper.Unmarshal(&o.globalFlags) 74 | }, 75 | } 76 | 77 | addFlagsForOptions(&o.cobraFlags, cmd) 78 | 79 | return cmd 80 | } 81 | 82 | func addFlagsForOptions(o *Flags, cmd *cobra.Command) { 83 | cmd.Flags().StringVarP(&o.ExportDir, "export-dir", "e", "export", "The path where the kubernetes resources are saved") 84 | cmd.Flags().StringVarP(&o.TransformDir, "transform-dir", "t", "transform", "The path where files that contain the transformations are saved") 85 | cmd.Flags().StringVarP(&o.OutputDir, "output-dir", "o", "output", "The path where files are to be saved after transformation are applied") 86 | } 87 | 88 | func (o *Options) run() error { 89 | log := o.globalFlags.GetLogger() 90 | a := apply.Applier{} 91 | 92 | // Load all the resources from the export dir 93 | exportDir, err := filepath.Abs(o.ExportDir) 94 | if err != nil { 95 | // Handle errors better for users. 96 | return err 97 | } 98 | 99 | transformDir, err := filepath.Abs(o.TransformDir) 100 | if err != nil { 101 | return err 102 | } 103 | 104 | outputDir, err := filepath.Abs(o.OutputDir) 105 | if err != nil { 106 | return err 107 | } 108 | 109 | files, err := file.ReadFiles(context.TODO(), exportDir) 110 | if err != nil { 111 | return err 112 | } 113 | 114 | opts := file.PathOpts{ 115 | TransformDir: transformDir, 116 | ExportDir: exportDir, 117 | OutputDir: outputDir, 118 | } 119 | 120 | //TODO: @shawn-hurley handle case where transform or whiteout file is not present. 121 | for _, f := range files { 122 | whPath := opts.GetWhiteOutFilePath(f.Path) 123 | _, statErr := os.Stat(whPath) 124 | if !errors.Is(statErr, os.ErrNotExist) { 125 | log.Infof("resource file: %v is skipped due to white file: %v", f.Info.Name(), whPath) 126 | continue 127 | } 128 | 129 | // Set doc to the object, only update the file if the transfrom file exists 130 | doc, err := f.Unstructured.MarshalJSON() 131 | if err != nil { 132 | return err 133 | } 134 | 135 | tfPath := opts.GetTransformPath(f.Path) 136 | // Check if transform file exists 137 | // If the transform does not exist, assume that the resource file is 138 | // not needed and ignore for now. 139 | _, tfStatErr := os.Stat(tfPath) 140 | if err != nil && !errors.Is(tfStatErr, os.ErrNotExist) { 141 | // Some other error here err out 142 | return err 143 | } 144 | 145 | if !errors.Is(tfStatErr, os.ErrNotExist) { 146 | transformfile, err := os.ReadFile(tfPath) 147 | if err != nil { 148 | return err 149 | } 150 | 151 | doc, err = a.Apply(f.Unstructured, transformfile) 152 | if err != nil { 153 | return err 154 | } 155 | } 156 | 157 | y, err := yaml.JSONToYAML(doc) 158 | if err != nil { 159 | return err 160 | } 161 | outputFilePath := opts.GetOutputFilePath(f.Path) 162 | // We must create all the directories here. 163 | err = os.MkdirAll(filepath.Dir(outputFilePath), 0777) 164 | if err != nil { 165 | return err 166 | } 167 | outputFile, err := os.Create(outputFilePath) 168 | if err != nil { 169 | return err 170 | } 171 | defer outputFile.Close() 172 | i, err := outputFile.Write(y) 173 | if err != nil { 174 | return err 175 | } 176 | log.Debugf("wrote %v bytes for file: %v", i, outputFilePath) 177 | } 178 | 179 | return nil 180 | 181 | } 182 | -------------------------------------------------------------------------------- /cmd/export/cluster.go: -------------------------------------------------------------------------------- 1 | package export 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | authv1 "github.com/openshift/api/authorization/v1" 8 | securityv1 "github.com/openshift/api/security/v1" 9 | "github.com/sirupsen/logrus" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 12 | "k8s.io/apimachinery/pkg/runtime" 13 | ) 14 | 15 | type ClusterScopeHandler struct { 16 | } 17 | 18 | type admittedResource struct { 19 | APIgroup string 20 | Kind string 21 | } 22 | 23 | var admittedClusterScopeResources = []admittedResource{ 24 | {Kind: "ClusterRoleBinding", APIgroup: "rbac.authorization.k8s.io"}, 25 | {Kind: "ClusterRole", APIgroup: "rbac.authorization.k8s.io"}, 26 | {Kind: "SecurityContextConstraints", APIgroup: "security.openshift.io"}, 27 | } 28 | 29 | func NewClusterScopeHandler() *ClusterScopeHandler { 30 | clusterScopeHandler := &ClusterScopeHandler{} 31 | 32 | return clusterScopeHandler 33 | } 34 | 35 | func isClusterScopedResource(apiGroup string, kind string) bool { 36 | for _, admitted := range admittedClusterScopeResources { 37 | if admitted.Kind == kind && admitted.APIgroup == apiGroup { 38 | return true 39 | } 40 | } 41 | return false 42 | } 43 | 44 | func (c *ClusterScopeHandler) filterRbacResources(resources []*groupResource, log logrus.FieldLogger) []*groupResource { 45 | log.Debug("Looking for ServiceAccount resources") 46 | 47 | handler := NewClusterScopedRbacHandler(log) 48 | var filteredResources []*groupResource 49 | for _, r := range resources { 50 | kind := r.APIResource.Kind 51 | if kind == "ServiceAccount" { 52 | for _, obj := range r.objects.Items { 53 | log.Debugf("Adding ServiceAccount %s in namespace %s", obj.GetName(), obj.GetNamespace()) 54 | handler.serviceAccounts = append(handler.serviceAccounts, obj) 55 | } 56 | } 57 | if isClusterScopedResource(r.APIGroup, kind) { 58 | log.Debugf("Adding %d Cluster resource of type %s", len(r.objects.Items), kind) 59 | handler.clusterResources[kind] = r 60 | } else { 61 | filteredResources = append(filteredResources, r) 62 | } 63 | } 64 | 65 | for _, k := range admittedClusterScopeResources { 66 | filtered, ok := handler.filteredResourcesOfKind(k) 67 | if ok && len(filtered.objects.Items) > 0 { 68 | filteredResources = append(filteredResources, filtered) 69 | } 70 | } 71 | 72 | return filteredResources 73 | } 74 | 75 | type ClusterScopedRbacHandler struct { 76 | log logrus.FieldLogger 77 | readyToFilter bool 78 | serviceAccounts []unstructured.Unstructured 79 | clusterResources map[string]*groupResource 80 | 81 | filteredClusterRoleBindings *groupResource 82 | } 83 | 84 | func NewClusterScopedRbacHandler(log logrus.FieldLogger) *ClusterScopedRbacHandler { 85 | handler := &ClusterScopedRbacHandler{log: log, readyToFilter: false} 86 | handler.clusterResources = make(map[string]*groupResource) 87 | return handler 88 | } 89 | 90 | func (c *ClusterScopedRbacHandler) prepareForFiltering() { 91 | c.readyToFilter = true 92 | c.log.Infof("Preparing matching ClusterRoleBindings") 93 | 94 | clusterRoleBindings, ok := c.clusterResources["ClusterRoleBinding"] 95 | if ok { 96 | c.filteredClusterRoleBindings = &groupResource{ 97 | APIGroup: clusterRoleBindings.APIGroup, 98 | APIVersion: clusterRoleBindings.APIVersion, 99 | APIGroupVersion: clusterRoleBindings.APIGroupVersion, 100 | APIResource: clusterRoleBindings.APIResource, 101 | } 102 | filteredClusterRoleBindings := unstructured.UnstructuredList{Items: []unstructured.Unstructured{}} 103 | for _, crb := range clusterRoleBindings.objects.Items { 104 | if c.acceptClusterRoleBinding(crb) { 105 | filteredClusterRoleBindings.Items = append(filteredClusterRoleBindings.Items, crb) 106 | c.log.Infof("Found matching %s %s", crb.GetKind(), crb.GetName()) 107 | } 108 | } 109 | c.filteredClusterRoleBindings.objects = &filteredClusterRoleBindings 110 | return 111 | } 112 | 113 | c.filteredClusterRoleBindings = &groupResource{ 114 | APIGroup: "NA", 115 | APIVersion: "NA", 116 | APIGroupVersion: "NA", 117 | APIResource: metav1.APIResource{}, 118 | } 119 | c.filteredClusterRoleBindings.objects = &unstructured.UnstructuredList{Items: []unstructured.Unstructured{}} 120 | c.log.Error("The export of cluster level RBAC resources is enabled but no ClusterRoleBinding resources have been collected:" + 121 | " the actual error message can be found under the failures folder") 122 | } 123 | 124 | func (c *ClusterScopedRbacHandler) filteredResourcesOfKind(resource admittedResource) (*groupResource, bool) { 125 | if !c.readyToFilter { 126 | c.prepareForFiltering() 127 | } 128 | 129 | kind := resource.Kind 130 | clusterGroupResource, ok := c.clusterResources[kind] 131 | if ok { 132 | if kind == "ClusterRoleBinding" { 133 | return c.filteredClusterRoleBindings, true 134 | } 135 | 136 | filtered := make([]unstructured.Unstructured, 0) 137 | initialLen := len(clusterGroupResource.objects.Items) 138 | c.log.Infof("Filtering for kind %s (%d found)", kind, initialLen) 139 | for _, r := range clusterGroupResource.objects.Items { 140 | if c.accept(kind, r) { 141 | filtered = append(filtered, r) 142 | } 143 | } 144 | clusterGroupResource.objects.Items = filtered 145 | c.log.Infof("After filtering %d remained out of %d", len(clusterGroupResource.objects.Items), initialLen) 146 | } 147 | return clusterGroupResource, ok 148 | } 149 | 150 | func (c *ClusterScopedRbacHandler) accept(kind string, clusterResource unstructured.Unstructured) bool { 151 | c.log.Debugf("Checking acceptance for %s of kind %s", clusterResource.GetName(), kind) 152 | if clusterResource.GroupVersionKind().Kind == "ClusterRole" { 153 | return c.acceptClusterRole(clusterResource) 154 | } else if clusterResource.GroupVersionKind().Kind == "SecurityContextConstraints" { 155 | return c.acceptSecurityContextConstraints(clusterResource) 156 | } 157 | return false 158 | } 159 | 160 | func (c *ClusterScopedRbacHandler) acceptClusterRoleBinding(clusterResource unstructured.Unstructured) bool { 161 | var crb authv1.ClusterRoleBinding 162 | err := runtime.DefaultUnstructuredConverter. 163 | FromUnstructured(clusterResource.Object, &crb) 164 | if err != nil { 165 | c.log.Warnf("Cannot convert to authv1.ClusterRoleBinding: %s", err) 166 | } else { 167 | for _, s := range crb.Subjects { 168 | if s.Kind == "ServiceAccount" && c.anyServiceAccountInNamespace(s.Namespace, s.Name) { 169 | c.log.Infof("Accepted %s of kind %s", clusterResource.GetName(), clusterResource.GetKind()) 170 | return true 171 | } 172 | } 173 | } 174 | return false 175 | } 176 | 177 | func (c *ClusterScopedRbacHandler) acceptClusterRole(clusterResource unstructured.Unstructured) bool { 178 | var cr authv1.ClusterRole 179 | err := runtime.DefaultUnstructuredConverter. 180 | FromUnstructured(clusterResource.Object, &cr) 181 | if err != nil { 182 | c.log.Warnf("Cannot convert to authv1.ClusterRole: %s", err) 183 | } else { 184 | for _, f := range c.filteredClusterRoleBindings.objects.Items { 185 | var crb authv1.ClusterRoleBinding 186 | err := runtime.DefaultUnstructuredConverter. 187 | FromUnstructured(f.Object, &crb) 188 | if err != nil { 189 | c.log.Warnf("Cannot convert to authv1.ClusterRoleBinding: %s", err) 190 | } else { 191 | if crb.RoleRef.Kind == "ClusterRole" && crb.RoleRef.Name == cr.Name { 192 | c.log.Infof("Accepted %s of kind %s", clusterResource.GetName(), clusterResource.GetKind()) 193 | return true 194 | } 195 | } 196 | } 197 | } 198 | return false 199 | } 200 | 201 | func (c *ClusterScopedRbacHandler) acceptSecurityContextConstraints(clusterResource unstructured.Unstructured) bool { 202 | var scc securityv1.SecurityContextConstraints 203 | err := runtime.DefaultUnstructuredConverter. 204 | FromUnstructured(clusterResource.Object, &scc) 205 | if err != nil { 206 | c.log.Warnf("Cannot convert to securityv1.SecurityContextConstraints: %s", err) 207 | return false 208 | } 209 | 210 | for _, f := range c.filteredClusterRoleBindings.objects.Items { 211 | var crb authv1.ClusterRoleBinding 212 | err := runtime.DefaultUnstructuredConverter. 213 | FromUnstructured(f.Object, &crb) 214 | if err != nil { 215 | c.log.Warnf("Cannot convert to authv1.ClusterRoleBinding: %s", err) 216 | continue 217 | } 218 | 219 | if crb.RoleRef.Kind == "SecurityContextConstraints" && crb.RoleRef.Name == scc.Name { 220 | c.log.Infof("Accepted %s of kind %s", clusterResource.GetName(), clusterResource.GetKind()) 221 | return true 222 | } else { 223 | sccSystemName := fmt.Sprintf("system:openshift:scc:%s", clusterResource.GetName()) 224 | if crb.RoleRef.Kind == "ClusterRole" && crb.RoleRef.Name == sccSystemName { 225 | c.log.Infof("Accepted %s of kind %s (match via ClusterRoleBinding %s)", 226 | clusterResource.GetName(), clusterResource.GetKind(), crb.Name) 227 | return true 228 | } 229 | } 230 | } 231 | 232 | // Last option, look at the users field if it contains one of the exported serviceaccounts 233 | for _, u := range scc.Users { 234 | if strings.Contains(u, "system:serviceaccount:") && len(strings.Split(u, ":")) == 4 { 235 | namespaceName := strings.Split(u, ":")[2] 236 | serviceAccountName := strings.Split(u, ":")[3] 237 | if c.anyServiceAccountInNamespace(namespaceName, serviceAccountName) { 238 | c.log.Infof("Accepted %s of kind %s (match wia user %s)", 239 | clusterResource.GetName(), clusterResource.GetKind(), u) 240 | return true 241 | } 242 | } 243 | } 244 | 245 | return false 246 | } 247 | 248 | func (c *ClusterScopedRbacHandler) anyServiceAccountInNamespace(namespaceName string, serviceAccountName string) bool { 249 | c.log.Debugf("Looking for SA %s in %s", serviceAccountName, namespaceName) 250 | for _, sa := range c.serviceAccounts { 251 | if sa.GetName() == serviceAccountName && sa.GetNamespace() == namespaceName { 252 | return true 253 | } 254 | } 255 | return false 256 | } 257 | -------------------------------------------------------------------------------- /cmd/export/discover.go: -------------------------------------------------------------------------------- 1 | package export 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "path/filepath" 8 | "strings" 9 | 10 | "github.com/sirupsen/logrus" 11 | apierrors "k8s.io/apimachinery/pkg/api/errors" 12 | "k8s.io/apimachinery/pkg/api/meta" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 15 | "k8s.io/apimachinery/pkg/runtime" 16 | "k8s.io/apimachinery/pkg/runtime/schema" 17 | "k8s.io/client-go/dynamic" 18 | "k8s.io/client-go/tools/pager" 19 | "sigs.k8s.io/yaml" 20 | ) 21 | 22 | // groupResource contains the APIGroup and APIResource 23 | type groupResource struct { 24 | APIGroup string 25 | APIVersion string 26 | APIGroupVersion string 27 | APIResource metav1.APIResource 28 | objects *unstructured.UnstructuredList 29 | } 30 | 31 | type groupResourceError struct { 32 | APIResource metav1.APIResource `json:",inline"` 33 | Error error `json:"error"` 34 | } 35 | 36 | func writeResources(resources []*groupResource, clusterResourceDir string, resourceDir string, log logrus.FieldLogger) []error { 37 | errs := []error{} 38 | for _, r := range resources { 39 | log.Infof("Writing objects of resource: %s to the output directory\n", r.APIResource.Name) 40 | 41 | kind := r.APIResource.Kind 42 | 43 | if kind == "" { 44 | continue 45 | } 46 | 47 | for _, obj := range r.objects.Items { 48 | targetDir := resourceDir 49 | if obj.GetNamespace() == "" { 50 | targetDir = clusterResourceDir 51 | } 52 | path := filepath.Join(targetDir, getFilePath(obj)) 53 | f, err := os.Create(path) 54 | if err != nil { 55 | errs = append(errs, err) 56 | continue 57 | } 58 | 59 | objBytes, err := yaml.Marshal(obj.Object) 60 | if err != nil { 61 | errs = append(errs, err) 62 | continue 63 | } 64 | 65 | _, err = f.Write(objBytes) 66 | if err != nil { 67 | errs = append(errs, err) 68 | continue 69 | } 70 | 71 | err = f.Close() 72 | if err != nil { 73 | errs = append(errs, err) 74 | continue 75 | } 76 | 77 | } 78 | } 79 | 80 | return errs 81 | } 82 | 83 | func writeErrors(errors []*groupResourceError, failuresDir string, log logrus.FieldLogger) []error { 84 | errs := []error{} 85 | for _, r := range errors { 86 | log.Debugf("Writing error for resource %s, error: %#v\n", r.APIResource.Name, r.Error) 87 | 88 | kind := r.APIResource.Kind 89 | 90 | if kind == "" { 91 | continue 92 | } 93 | 94 | path := filepath.Join(failuresDir, r.APIResource.Name+".yaml") 95 | f, err := os.Create(path) 96 | if err != nil { 97 | errs = append(errs, err) 98 | continue 99 | } 100 | 101 | errBytes, err := yaml.Marshal(&r) 102 | if err != nil { 103 | errs = append(errs, err) 104 | continue 105 | } 106 | 107 | _, err = f.Write(errBytes) 108 | if err != nil { 109 | errs = append(errs, err) 110 | continue 111 | } 112 | 113 | err = f.Close() 114 | if err != nil { 115 | errs = append(errs, err) 116 | continue 117 | } 118 | } 119 | 120 | return errs 121 | } 122 | 123 | func getFilePath(obj unstructured.Unstructured) string { 124 | namespace := obj.GetNamespace() 125 | if namespace == "" { 126 | namespace = "clusterscoped" 127 | } 128 | return strings.Join([]string{obj.GetKind(), obj.GetObjectKind().GroupVersionKind().GroupKind().Group, obj.GetObjectKind().GroupVersionKind().Version, namespace, obj.GetName()}, "_") + ".yaml" 129 | } 130 | 131 | func resourceToExtract(namespace string, labelSelector string, clusterScopedRbac bool, dynamicClient dynamic.Interface, lists []*metav1.APIResourceList, apiGroups []metav1.APIGroup, log logrus.FieldLogger) ([]*groupResource, []*groupResourceError) { 132 | resources := []*groupResource{} 133 | errors := []*groupResourceError{} 134 | 135 | for _, list := range lists { 136 | if len(list.APIResources) == 0 { 137 | continue 138 | } 139 | gv, err := schema.ParseGroupVersion(list.GroupVersion) 140 | if err != nil { 141 | continue 142 | } 143 | for _, resource := range list.APIResources { 144 | if len(resource.Verbs) == 0 { 145 | continue 146 | } 147 | 148 | // TODO: alpatel: put this behing a flag 149 | if resource.Kind == "Event" { 150 | log.Debugf("skipping extracting events\n") 151 | continue 152 | } 153 | 154 | if !isAdmittedResource(clusterScopedRbac, gv, resource) { 155 | log.Debugf("resource: %s.%s is clusterscoped or not admitted kind, skipping\n", gv.String(), resource.Kind) 156 | continue 157 | } 158 | 159 | log.Debugf("processing resource: %s.%s\n", gv.String(), resource.Kind) 160 | 161 | g := &groupResource{ 162 | APIGroup: gv.Group, 163 | APIVersion: gv.Version, 164 | APIGroupVersion: gv.String(), 165 | APIResource: resource, 166 | } 167 | 168 | objs, err := getObjects(g, namespace, labelSelector, dynamicClient, log) 169 | if err != nil { 170 | switch { 171 | case apierrors.IsForbidden(err): 172 | log.Errorf("cannot list obj in namespace for groupVersion %s, kind: %s\n", g.APIGroupVersion, g.APIResource.Kind) 173 | case apierrors.IsMethodNotSupported(err): 174 | log.Errorf("list method not supported on the groupVersion %s, kind: %s\n", g.APIGroupVersion, g.APIResource.Kind) 175 | case apierrors.IsNotFound(err): 176 | log.Errorf("could not find the resource, most likely this is a virtual resource, groupVersion %s, kind: %s\n", g.APIGroupVersion, g.APIResource.Kind) 177 | default: 178 | log.Errorf("error listing objects: %#v, groupVersion %s, kind: %s\n", err, g.APIGroupVersion, g.APIResource.Kind) 179 | } 180 | errors = append(errors, &groupResourceError{resource, err}) 181 | continue 182 | } 183 | 184 | preferred := false 185 | for _, a := range apiGroups { 186 | if a.Name == gv.Group && a.PreferredVersion.Version == gv.Version { 187 | preferred = true 188 | } 189 | } 190 | if !preferred { 191 | continue 192 | } 193 | 194 | if len(objs.Items) > 0 { 195 | g.objects = objs 196 | log.Infof("adding resource: %s to the list of GVRs to be extracted", resource.Name) 197 | resources = append(resources, g) 198 | continue 199 | } 200 | 201 | log.Debugf("0 objects found, for resource %s, skipping\n", resource.Name) 202 | } 203 | } 204 | 205 | return resources, errors 206 | } 207 | 208 | func isAdmittedResource(clusterScopedRbac bool, gv schema.GroupVersion, resource metav1.APIResource) bool { 209 | if !resource.Namespaced { 210 | return clusterScopedRbac && isClusterScopedResource(gv.Group, resource.Kind) 211 | } 212 | return true 213 | } 214 | 215 | func getObjects(g *groupResource, namespace string, labelSelector string, d dynamic.Interface, logger logrus.FieldLogger) (*unstructured.UnstructuredList, error) { 216 | c := d.Resource(schema.GroupVersionResource{ 217 | Group: g.APIGroup, 218 | Version: g.APIVersion, 219 | Resource: g.APIResource.Name, 220 | }) 221 | p := pager.New(func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) { 222 | if g.APIResource.Namespaced { 223 | return c.Namespace(namespace).List(context.Background(), opts) 224 | } else { 225 | return c.List(context.Background(), opts) 226 | } 227 | }) 228 | listOptions := metav1.ListOptions{} 229 | if labelSelector != "" { 230 | listOptions.LabelSelector = labelSelector 231 | } 232 | 233 | list, _, err := p.List(context.TODO(), listOptions) 234 | if err != nil { 235 | return nil, err 236 | } 237 | if g.APIResource.Name == "imagestreamtags" || g.APIResource.Name == "imagetags" { 238 | unstructuredList, err := iterateItemsByGet(c, g, list, namespace, logger) 239 | if err != nil { 240 | return nil, err 241 | } 242 | return unstructuredList, nil 243 | } 244 | return iterateItemsInList(list, g, logger) 245 | } 246 | 247 | func iterateItemsByGet(c dynamic.NamespaceableResourceInterface, g *groupResource, list runtime.Object, namespace string, logger logrus.FieldLogger) (*unstructured.UnstructuredList, error) { 248 | unstructuredList := &unstructured.UnstructuredList{Items: []unstructured.Unstructured{}} 249 | err := meta.EachListItem(list, func(object runtime.Object) error { 250 | u, ok := object.(*unstructured.Unstructured) 251 | if !ok { 252 | // TODO: explore aggregating all the errors here instead of terminating the loop 253 | logger.Errorf("expected unstructured.Unstructured but got %T for groupResource %s and object: %#v\n", g, object) 254 | return fmt.Errorf("expected *unstructured.Unstructured but got %T", u) 255 | } 256 | obj, err := c.Namespace(namespace).Get(context.TODO(), u.GetName(), metav1.GetOptions{}) 257 | if err != nil { 258 | return err 259 | } 260 | unstructuredList.Items = append(unstructuredList.Items, *obj) 261 | return nil 262 | }) 263 | if err != nil { 264 | return nil, fmt.Errorf("unable to process the list for group: %s, kind: %s", g.APIGroup, g.APIResource.Kind) 265 | } 266 | return unstructuredList, nil 267 | } 268 | 269 | func iterateItemsInList(list runtime.Object, g *groupResource, logger logrus.FieldLogger) (*unstructured.UnstructuredList, error) { 270 | unstructuredList := &unstructured.UnstructuredList{Items: []unstructured.Unstructured{}} 271 | err := meta.EachListItem(list, func(object runtime.Object) error { 272 | u, ok := object.(*unstructured.Unstructured) 273 | if !ok { 274 | // TODO: explore aggregating all the errors here instead of terminating the loop 275 | logger.Errorf("expected unstructured.Unstructured but got %T for groupResource %s and object: %#v\n", g, object) 276 | return fmt.Errorf("expected *unstructured.Unstructured but got %T", u) 277 | } 278 | unstructuredList.Items = append(unstructuredList.Items, *u) 279 | return nil 280 | }) 281 | if err != nil { 282 | return nil, fmt.Errorf("unable to process the list for group: %s, kind: %s", g.APIGroup, g.APIResource.Kind) 283 | } 284 | return unstructuredList, nil 285 | } 286 | -------------------------------------------------------------------------------- /cmd/export/export.go: -------------------------------------------------------------------------------- 1 | package export 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "strings" 8 | 9 | "github.com/konveyor/crane/internal/flags" 10 | "github.com/spf13/cobra" 11 | "github.com/spf13/viper" 12 | velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" 13 | "github.com/vmware-tanzu/velero/pkg/discovery" 14 | "github.com/vmware-tanzu/velero/pkg/features" 15 | errorsutil "k8s.io/apimachinery/pkg/util/errors" 16 | "k8s.io/cli-runtime/pkg/genericclioptions" 17 | "k8s.io/client-go/dynamic" 18 | "k8s.io/client-go/tools/clientcmd/api" 19 | ) 20 | 21 | type ExportOptions struct { 22 | configFlags *genericclioptions.ConfigFlags 23 | 24 | // Two GlobalFlags struct fields are needed 25 | // 1. cobraGlobalFlags for explicit CLI args parsed by cobra 26 | // 2. globalFlags for the args merged with values from the viper config file 27 | cobraGlobalFlags *flags.GlobalFlags 28 | globalFlags *flags.GlobalFlags 29 | 30 | rawConfig api.Config 31 | exportDir string 32 | labelSelector string 33 | userSpecifiedNamespace string 34 | clusterScopedRbac bool 35 | asExtras string 36 | extras map[string][]string 37 | QPS float32 38 | Burst int 39 | 40 | genericclioptions.IOStreams 41 | } 42 | 43 | func (o *ExportOptions) Complete(c *cobra.Command, args []string) error { 44 | var err error 45 | 46 | o.rawConfig, err = o.configFlags.ToRawKubeConfigLoader().RawConfig() 47 | if err != nil { 48 | return err 49 | } 50 | 51 | o.userSpecifiedNamespace, _, err = o.configFlags.ToRawKubeConfigLoader().Namespace() 52 | if err != nil { 53 | return err 54 | } 55 | 56 | if o.asExtras != "" { 57 | keysAndStrings := strings.Split(o.asExtras, ";") 58 | o.extras = map[string][]string{} 59 | for _, keysAndString := range keysAndStrings { 60 | keyString := strings.Split(keysAndString, "=") 61 | if len(keyString) != 2 { 62 | return fmt.Errorf("extra options (%v) formatted incorrectly", o.asExtras) 63 | } 64 | o.extras[keyString[0]] = strings.Split(keyString[1], ",") 65 | } 66 | } 67 | 68 | return nil 69 | } 70 | 71 | func (o *ExportOptions) Validate() error { 72 | if o.asExtras != "" && *o.configFlags.Impersonate == "" && len(*o.configFlags.ImpersonateGroup) == 0 { 73 | return fmt.Errorf("extras requires specifying a user or group to impersonate") 74 | } 75 | return nil 76 | } 77 | 78 | func (o *ExportOptions) Run() error { 79 | var err error 80 | 81 | log := o.globalFlags.GetLogger() 82 | 83 | // create export directory if it doesnt exist 84 | resourceDir := filepath.Join(o.exportDir, "resources", o.userSpecifiedNamespace) 85 | err = os.MkdirAll(resourceDir, 0700) 86 | switch { 87 | case os.IsExist(err): 88 | case err != nil: 89 | log.Errorf("error creating the resources directory: %#v", err) 90 | return err 91 | } 92 | // create _clluster directory if it doesnt exist 93 | clusterResourceDir := filepath.Join(o.exportDir, "resources", o.userSpecifiedNamespace, "_cluster") 94 | if o.clusterScopedRbac { 95 | err = os.MkdirAll(clusterResourceDir, 0700) 96 | switch { 97 | case os.IsExist(err): 98 | case err != nil: 99 | log.Errorf("error creating the cluster resources directory: %#v", err) 100 | return err 101 | } 102 | } 103 | // create export directory if it doesnt exist 104 | err = os.MkdirAll(filepath.Join(o.exportDir, "failures", o.userSpecifiedNamespace), 0700) 105 | switch { 106 | case os.IsExist(err): 107 | case err != nil: 108 | log.Errorf("error creating the failures directory: %#v", err) 109 | return err 110 | } 111 | 112 | discoveryClient, err := o.configFlags.ToDiscoveryClient() 113 | if err != nil { 114 | log.Errorf("cannot create discovery client: %#v", err) 115 | return err 116 | } 117 | 118 | // Always request fresh data from the server 119 | discoveryClient.Invalidate() 120 | 121 | restConfig, err := o.configFlags.ToRESTConfig() 122 | if err != nil { 123 | log.Errorf("cannot create rest config: %#v", err) 124 | return err 125 | } 126 | 127 | // user/group impersonation is handled from genericclioptions.ConfigFlags 128 | restConfig.Impersonate.Extra = o.extras 129 | restConfig.Burst = o.Burst 130 | restConfig.QPS = o.QPS 131 | 132 | dynamicClient := dynamic.NewForConfigOrDie(restConfig) 133 | 134 | features.NewFeatureFlagSet() 135 | features.Enable(velerov1api.APIGroupVersionsFeatureFlag) 136 | 137 | discoveryHelper, err := discovery.NewHelper(discoveryClient, log) 138 | if err != nil { 139 | log.Errorf("cannot create discovery helper: %#v", err) 140 | return err 141 | } 142 | 143 | var errs []error 144 | 145 | resources, resourceErrs := resourceToExtract(o.userSpecifiedNamespace, o.labelSelector, o.clusterScopedRbac, dynamicClient, discoveryHelper.Resources(), discoveryHelper.APIGroups(), log) 146 | clusterScopeHandler := NewClusterScopeHandler() 147 | if o.clusterScopedRbac { 148 | resources = clusterScopeHandler.filterRbacResources(resources, log) 149 | } 150 | 151 | log.Debugf("attempting to write resources to files\n") 152 | writeResourcesErrors := writeResources(resources, clusterResourceDir, resourceDir, log) 153 | for _, e := range writeResourcesErrors { 154 | log.Warnf("error writing manifests to file: %#v, ignoring\n", e) 155 | } 156 | 157 | writeErrorsErrors := writeErrors(resourceErrs, filepath.Join(o.exportDir, "failures", o.userSpecifiedNamespace), log) 158 | for _, e := range writeErrorsErrors { 159 | log.Warnf("error writing errors to file: %#v, ignoring\n", e) 160 | } 161 | 162 | errs = append(errs, writeResourcesErrors...) 163 | errs = append(errs, writeErrorsErrors...) 164 | 165 | return errorsutil.NewAggregate(errs) 166 | } 167 | 168 | func NewExportCommand(streams genericclioptions.IOStreams, f *flags.GlobalFlags) *cobra.Command { 169 | o := &ExportOptions{ 170 | configFlags: genericclioptions.NewConfigFlags(true), 171 | 172 | IOStreams: streams, 173 | cobraGlobalFlags: f, 174 | } 175 | cmd := &cobra.Command{ 176 | Use: "export", 177 | Short: "Export the namespace resources in an output directory", 178 | RunE: func(c *cobra.Command, args []string) error { 179 | if err := o.Complete(c, args); err != nil { 180 | return err 181 | } 182 | if err := o.Validate(); err != nil { 183 | return err 184 | } 185 | if err := o.Run(); err != nil { 186 | return err 187 | } 188 | 189 | return nil 190 | }, 191 | PreRun: func(cmd *cobra.Command, args []string) { 192 | viper.BindPFlags(cmd.Flags()) 193 | viper.Unmarshal(&o.globalFlags) 194 | viper.Unmarshal(&o.configFlags) 195 | viper.UnmarshalKey("export-dir", &o.exportDir) 196 | }, 197 | } 198 | 199 | cmd.Flags().StringVarP(&o.exportDir, "export-dir", "e", "export", "The path where files are to be exported") 200 | cmd.Flags().StringVarP(&o.labelSelector, "label-selector", "l", "", "Restrict export to resources matching a label selector") 201 | cmd.Flags().BoolVarP(&o.clusterScopedRbac, "cluster-scoped-rbac", "c", false, "Include cluster-scoped RBAC resources") 202 | cmd.Flags().StringVar(&o.asExtras, "as-extras", "", "The extra info for impersonation can only be used with User or Group but is not required. An example is --as-extras key=string1,string2;key2=string3") 203 | cmd.Flags().Float32VarP(&o.QPS, "qps", "q", 100, "Query Per Second Rate.") 204 | cmd.Flags().IntVarP(&o.Burst, "burst", "b", 1000, "API Burst Rate.") 205 | o.configFlags.AddFlags(cmd.Flags()) 206 | 207 | return cmd 208 | } 209 | -------------------------------------------------------------------------------- /cmd/plugin-manager/add/add.go: -------------------------------------------------------------------------------- 1 | package add 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "io" 7 | "io/ioutil" 8 | "net/http" 9 | "os" 10 | "path/filepath" 11 | "syscall" 12 | 13 | "github.com/konveyor/crane/internal/flags" 14 | "github.com/konveyor/crane/internal/plugin" 15 | "github.com/sirupsen/logrus" 16 | "github.com/spf13/cobra" 17 | "github.com/spf13/viper" 18 | "golang.org/x/mod/semver" 19 | ) 20 | 21 | type Options struct { 22 | // Two GlobalFlags struct fields are needed 23 | // 1. cobraGlobalFlags for explicit CLI args parsed by cobra 24 | // 2. globalFlags for the args merged with values from the viper config fileno go-import meta tags () 25 | 26 | cobraGlobalFlags *flags.GlobalFlags 27 | globalFlags *flags.GlobalFlags 28 | // Two Flags struct fields are needed 29 | // 1. cobraFlags for explicit CLI args parsed by cobra 30 | // 2. Flags for the args merged with values from the viper config file 31 | cobraFlags Flags 32 | Flags 33 | } 34 | 35 | type Flags struct { 36 | Repo string `mapstructure:"repo"` 37 | PluginDir string `mapstructure:"plugin-dir"` 38 | Version string `mapstructure:"version"` 39 | Global bool `mapstructure:"global"` 40 | } 41 | 42 | func (o *Options) Complete(c *cobra.Command, args []string) error { 43 | // TODO: @jgabani 44 | return nil 45 | } 46 | 47 | func (o *Options) Validate(args []string) error { 48 | // TODO: @jgabani 49 | 50 | if len(args) != 1 { 51 | return errors.New("please input only one plugin name") 52 | } 53 | 54 | if o.Global { 55 | if o.PluginDir == os.Getenv("HOME")+plugin.DefaultLocalPluginDir { 56 | o.PluginDir = plugin.GlobalPluginDir 57 | } else { 58 | return errors.New("--plugin-dir and --global should not be used together.") 59 | } 60 | } 61 | 62 | pluginDir, err := filepath.Abs(o.PluginDir) 63 | if err != nil { 64 | return err 65 | } 66 | 67 | files, err := ioutil.ReadDir(pluginDir) 68 | if err != nil { 69 | if os.IsNotExist(err) { 70 | return nil 71 | } 72 | return err 73 | } 74 | 75 | paths, err := plugin.LocateBinaryInPluginDir(o.PluginDir, args[0], files) 76 | if err != nil { 77 | return err 78 | } 79 | 80 | if len(paths) > 0 { 81 | // TODO: if a version is specified and the plugin is installed, have the discussion on what to do here 82 | for _, path := range paths { 83 | fmt.Printf("%s \n", path) 84 | } 85 | return errors.New("the binary is already installed in the above path, either delete the binary or mention a repo from which the binary is needed") 86 | } 87 | return nil 88 | } 89 | 90 | func (o *Options) Run(args []string) error { 91 | return o.run(args) 92 | } 93 | 94 | func NewAddCommand(f *flags.GlobalFlags) *cobra.Command { 95 | o := &Options{ 96 | globalFlags: f, 97 | } 98 | log := o.globalFlags.GetLogger() 99 | cmd := &cobra.Command{ 100 | Use: "add ", 101 | Short: "installs the desired plugin", 102 | RunE: func(c *cobra.Command, args []string) error { 103 | if err := o.Complete(c, args); err != nil { 104 | return err 105 | } 106 | if err := o.Validate(args); err != nil { 107 | log.Errorf(fmt.Sprintf("%s", err.Error())) 108 | return nil 109 | } 110 | if err := o.Run(args); err != nil { 111 | return err 112 | } 113 | return nil 114 | }, 115 | PreRun: func(cmd *cobra.Command, args []string) { 116 | viper.BindPFlags(cmd.Flags()) 117 | viper.Unmarshal(&o.Flags) 118 | viper.Unmarshal(&o.globalFlags) 119 | }, 120 | } 121 | 122 | addFlagsForOptions(&o.cobraFlags, cmd) 123 | return cmd 124 | } 125 | 126 | func addFlagsForOptions(o *Flags, cmd *cobra.Command) { 127 | cmd.Flags().StringVarP(&o.Version, "version", "", "", "Install specific plugin version (if not passed, installs latest plugin version or the only available one)") 128 | cmd.Flags().BoolVar(&o.Global, "global", false, "Perform a global plugin install to /usr/local/share/crane/plugins") 129 | } 130 | 131 | func (o *Options) run(args []string) error { 132 | log := o.globalFlags.GetLogger() 133 | 134 | manifestMap, err := plugin.BuildManifestMap(log, args[0], o.Repo) 135 | if err != nil { 136 | return nil 137 | } 138 | 139 | installVersion := "" 140 | if o.Version != "" { 141 | installVersion = o.Version 142 | } 143 | 144 | switch { 145 | case len(manifestMap) > 1: 146 | // if the plugin is found across multiple repository then fail and ask for a specific repo 147 | // TODO: if the version is mentioned look for a plugin with the same version, if found in only one repo add the same else fail and ask for the repo 148 | log.Errorf(fmt.Sprintf("The plugin %s is found across multiple repos, please specify one repo with --repo flag", args[0])) 149 | case len(manifestMap) == 1: 150 | // the plugin is found in only one repo 151 | for _, pluginsMap := range manifestMap { 152 | switch { 153 | // install the only available version of the plugin 154 | case len(pluginsMap[args[0]]) == 1: 155 | for _, value := range pluginsMap[args[0]] { 156 | // check if the version is mentioned and matches the version in pluginsMap file 157 | if value.Name != "" && (o.Version == "" || string(value.Version) == o.Version) { 158 | return downloadBinary(o.PluginDir, value.Name, value.Binaries[0].URI, log) 159 | } else { 160 | log.Errorf(fmt.Sprintf("The version %s of plugin %s is not available", installVersion, value.Name)) 161 | fmt.Printf("Run \"crane plugin-manager list --name %s --params\" to see available versions along with additional information \n", args[0]) 162 | } 163 | } 164 | case len(pluginsMap[args[0]]) > 1: 165 | // if there are multiple version of the plugins are available then look for the latest or mentioned version and if not found fail and ask user to input a version using --version flag 166 | if installVersion == "" { 167 | availableVersions := []string{} 168 | for _, value := range pluginsMap[args[0]] { 169 | availableVersions = append(availableVersions, string(value.Version)) 170 | } 171 | semver.Sort(availableVersions) 172 | installVersion = availableVersions[len(availableVersions)-1] 173 | } 174 | for _, value := range pluginsMap[args[0]] { 175 | if string(value.Version) == installVersion { 176 | return downloadBinary(o.PluginDir, value.Name, value.Binaries[0].URI, log) 177 | } 178 | } 179 | log.Errorf(fmt.Sprintf("The %s version of the plugin %s is not found", installVersion, args[0])) 180 | fmt.Printf("Run \"crane plugin-manager list --name %s --params\" to see available versions along with additional information \n", args[0]) 181 | default: 182 | // throw error saying that the plugin doest exists 183 | log.Errorf(fmt.Sprintf("The plugin %s is not found", args[0])) 184 | fmt.Println(fmt.Sprintf("Run \"crane plugin-manager list\" to list all the available plugins \n")) 185 | } 186 | } 187 | default: 188 | // throw error saying that the plugin doest exists 189 | fmt.Println(fmt.Sprintf("Run \"crane plugin-manager list\" to list all the available plugins \n")) 190 | return errors.New(fmt.Sprintf("The plugin %s is not found", args[0])) 191 | } 192 | return nil 193 | } 194 | 195 | func downloadBinary(filepath string, filename string, url string, log *logrus.Logger) error { 196 | var binaryContents io.Reader 197 | isUrl, url := plugin.IsUrl(url) 198 | if !isUrl { 199 | srcPlugin, err := os.Open(url) 200 | if err != nil { 201 | return err 202 | } 203 | defer srcPlugin.Close() 204 | binaryContents = srcPlugin 205 | } else { 206 | // Get the data 207 | resp, err := http.Get(url) 208 | if err != nil { 209 | return err 210 | } 211 | defer resp.Body.Close() 212 | binaryContents = resp.Body 213 | } 214 | // Create dir if not exists 215 | if _, err := os.Stat(filepath); os.IsNotExist(err) { 216 | err = os.MkdirAll(filepath, os.ModePerm) 217 | if err != nil { 218 | return err 219 | } 220 | } 221 | 222 | // Create the file 223 | pluginBinary, err := os.OpenFile(filepath+"/"+filename, syscall.O_RDWR|syscall.O_CREAT|syscall.O_TRUNC, 0777) 224 | if err != nil { 225 | return err 226 | } 227 | defer pluginBinary.Close() 228 | 229 | // Write the body to filePluginDir 230 | _, err = io.Copy(pluginBinary, binaryContents) 231 | if err != nil { 232 | return err 233 | } 234 | err = pluginBinary.Sync() 235 | if err != nil { 236 | return err 237 | } 238 | log.Infof("pluginBinary %s added to the path - %s", filename, filepath) 239 | return err 240 | } 241 | -------------------------------------------------------------------------------- /cmd/plugin-manager/list/list.go: -------------------------------------------------------------------------------- 1 | package list 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "reflect" 7 | "strings" 8 | 9 | transform2 "github.com/konveyor/crane-lib/transform" 10 | "github.com/konveyor/crane/internal/flags" 11 | "github.com/konveyor/crane/internal/plugin" 12 | "github.com/olekukonko/tablewriter" 13 | "github.com/spf13/cobra" 14 | "github.com/spf13/viper" 15 | ) 16 | 17 | type Options struct { 18 | // Two GlobalFlags struct fields are needed 19 | // 1. cobraGlobalFlags for explicit CLI args parsed by cobra 20 | // 2. globalFlags for the args merged with values from the viper config file 21 | cobraGlobalFlags *flags.GlobalFlags 22 | globalFlags *flags.GlobalFlags 23 | // Two Flags struct fields are needed 24 | // 1. cobraFlags for explicit CLI args parsed by cobra 25 | // 2. Flags for the args merged with values from the viper config file 26 | cobraFlags Flags 27 | Flags 28 | } 29 | 30 | type Flags struct { 31 | Repo string `mapstructure:"repo"` 32 | Installed bool `mapstructure:"installed"` 33 | PluginDir string `mapstructure:"plugin-dir"` 34 | Params bool `mapstructure:"params"` 35 | Name string `mapstructure:"name"` 36 | Versions bool `mapstructure:"versions"` 37 | } 38 | 39 | type AvailablePlugins struct { 40 | Name string 41 | ShortDescription string 42 | Description string 43 | Versions []string 44 | } 45 | 46 | func (o *Options) Complete(c *cobra.Command, args []string) error { 47 | // TODO: @jgabani 48 | return nil 49 | } 50 | 51 | func (o *Options) Validate() error { 52 | // TODO: @jgabani 53 | return nil 54 | } 55 | 56 | func (o *Options) Run() error { 57 | return o.run() 58 | } 59 | 60 | func NewListCommand(f *flags.GlobalFlags) *cobra.Command { 61 | o := &Options{ 62 | globalFlags: f, 63 | } 64 | cmd := &cobra.Command{ 65 | Use: "list", 66 | Short: "Lists all the available plugins", 67 | RunE: func(c *cobra.Command, args []string) error { 68 | if err := o.Complete(c, args); err != nil { 69 | return err 70 | } 71 | if err := o.Validate(); err != nil { 72 | return err 73 | } 74 | if err := o.Run(); err != nil { 75 | return err 76 | } 77 | 78 | return nil 79 | }, 80 | PreRun: func(cmd *cobra.Command, args []string) { 81 | viper.BindPFlags(cmd.Flags()) 82 | viper.Unmarshal(&o.Flags) 83 | viper.Unmarshal(&o.globalFlags) 84 | }, 85 | } 86 | addFlagsForOptions(&o.cobraFlags, cmd) 87 | return cmd 88 | } 89 | 90 | func addFlagsForOptions(o *Flags, cmd *cobra.Command) { 91 | // TODO: display installed plugin information 92 | cmd.Flags().BoolVarP(&o.Installed, "installed", "", false, "Flag to list installed plugins.") 93 | cmd.Flags().BoolVarP(&o.Params, "params", "", false, "If passed, returns with metadata information for all the version of specific plugin. This flag is to be used with \"--name\" flag. Takes precedence over \"--versions\" if both passed.") 94 | cmd.Flags().StringVarP(&o.Name, "name", "", "", "To be used with \"--params\" or \"--versions\" flag to specify the plugin for which additional information is needed. In case of conflict, command fails and asks for a specific repository information.") 95 | cmd.Flags().BoolVarP(&o.Versions, "versions", "", false, "If passed, returns with all the versions available for a plugin. This flag is to be used with \"--name\" flag.") 96 | } 97 | 98 | func (o *Options) run() error { 99 | log := o.globalFlags.GetLogger() 100 | if o.Installed { 101 | // retrieve list of all the plugins that are installed within plugin dir 102 | // TODO: differentiate between multiple repos 103 | plugins, err := plugin.GetFilteredPlugins(o.PluginDir, []string{}, log) 104 | if err != nil { 105 | return err 106 | } 107 | fmt.Println(fmt.Sprintf("Listing plugins from path - %s, along with default plugin", o.PluginDir)) 108 | printInstalledInformation(plugins) 109 | return nil 110 | } 111 | 112 | // if the name flag is used then either get all the information of the plugin or get all the version of the plugin 113 | if o.Name != "" && (o.Params || o.Versions) { 114 | manifestMap, err := plugin.BuildManifestMap(log, o.Name, o.Repo) 115 | if err != nil { 116 | return nil 117 | } 118 | if len(manifestMap) == 0 { 119 | log.Errorf("The plugin %s is not found", o.Name) 120 | return nil 121 | } 122 | if o.Params { 123 | // retrieve all the information for all the versions available for a specific plugin 124 | for repo, pluginsMap := range manifestMap { 125 | fmt.Printf("Listing from the repo %s\n", repo) 126 | printParamsInformation(pluginsMap) 127 | } 128 | } else if o.Versions { 129 | // retrieve all the available version of the plugin 130 | for repo, pluginsMap := range manifestMap { 131 | for name, pluginVersions := range pluginsMap { 132 | fmt.Printf("Listing versions of plugin %s from the repo %s\n", name, repo) 133 | for _, pluginVersion := range pluginVersions { 134 | fmt.Printf("Version: %s\n", pluginVersion.Version) 135 | } 136 | } 137 | } 138 | } else { 139 | log.Errorf(fmt.Sprintf("name flag must be used with wither params or versions")) 140 | } 141 | } else { 142 | manifestMap, err := plugin.BuildManifestMap(log, "", o.Repo) 143 | if err != nil { 144 | return nil 145 | } 146 | 147 | if o.Name != "" { 148 | log.Info(fmt.Sprintf("\"--name\" flag should be used with either \"--versions\" or \"--params\" flag to get more information about the plugin, example: \"crane plugin-manager --name %s --versions\" or \"crane plugin-manager --name %s --params\"\n", o.Name, o.Name)) 149 | } else if o.Params { 150 | // retrieve all the information for all the versions available for a specific plugin 151 | for repo, pluginsMap := range manifestMap { 152 | fmt.Printf("Listing from the repo %s\n", repo) 153 | printParamsInformation(pluginsMap) 154 | } 155 | } else { 156 | for repo, pluginsMap := range manifestMap { 157 | // output information 158 | fmt.Printf("Listing from the repo %s\n", repo) 159 | groupInformationForPlugins(pluginsMap) 160 | } 161 | } 162 | } 163 | return nil 164 | } 165 | 166 | //TODO: this can be merged with printParamsInformation 167 | func printInstalledInformation(plugins []transform2.Plugin) { 168 | for _, thisPlugin := range plugins { 169 | printTable([][]string{ 170 | {"Name", thisPlugin.Metadata().Name}, 171 | {"Version", thisPlugin.Metadata().Version}, 172 | {"OptionalFields", getOptionalFields(thisPlugin.Metadata().OptionalFields)}, 173 | }) 174 | } 175 | } 176 | 177 | func groupInformationForPlugins(pluginsMap map[string][]plugin.PluginVersion) { 178 | availablePlugin := map[string]AvailablePlugins{} 179 | for _, pluginVersions := range pluginsMap { 180 | for _, pluginVersion := range pluginVersions { 181 | if _, ok := availablePlugin[pluginVersion.Name]; ok { 182 | availablePlugin[pluginVersion.Name] = AvailablePlugins{Name: pluginVersion.Name, ShortDescription: pluginVersion.ShortDescription, Versions: append(availablePlugin[pluginVersion.Name].Versions, string(pluginVersion.Version))} 183 | } else { 184 | availablePlugin[pluginVersion.Name] = AvailablePlugins{ 185 | Name: pluginVersion.Name, 186 | ShortDescription: pluginVersion.ShortDescription, 187 | Versions: []string{string(pluginVersion.Version)}, 188 | } 189 | } 190 | } 191 | } 192 | 193 | printInformation(availablePlugin) 194 | } 195 | 196 | func printInformation(availablePlugins map[string]AvailablePlugins) { 197 | for _, availablePlugin := range availablePlugins { 198 | if availablePlugin.Name != "" { 199 | printTable([][]string{ 200 | {"Name", availablePlugin.Name}, 201 | {"ShortDescription", availablePlugin.ShortDescription}, 202 | {"AvailableVersions", strings.Join(availablePlugin.Versions, ", ")}, 203 | }) 204 | } 205 | } 206 | } 207 | 208 | func printParamsInformation(pluginsMap map[string][]plugin.PluginVersion) { 209 | for _, pluginVersions := range pluginsMap { 210 | for _, pluginVersion := range pluginVersions { 211 | printTable([][]string{ 212 | {"Name", pluginVersion.Name}, 213 | {"ShortDescription", pluginVersion.ShortDescription}, 214 | {"Description", pluginVersion.Description}, 215 | {"AvailableVersions", string(pluginVersion.Version)}, 216 | {"OptionalFields", getOptionalFields(pluginVersion.OptionalFields)}, 217 | }) 218 | } 219 | } 220 | } 221 | 222 | func getOptionalFields(fields []transform2.OptionalFields) string { 223 | var retstr string 224 | if len(fields) > 0 { 225 | var strs []string 226 | for _, field := range fields { 227 | optionalField := reflect.ValueOf(&field).Elem() 228 | typeOfT := optionalField.Type() 229 | 230 | for i := 0; i < optionalField.NumField(); i++ { 231 | f := optionalField.Field(i) 232 | var prefix string 233 | if i == 0 { 234 | prefix = "- " 235 | } else { 236 | prefix = " " 237 | } 238 | strs = append(strs, fmt.Sprintf("%s%s: %v", prefix, 239 | typeOfT.Field(i).Name, f.Interface())) 240 | } 241 | } 242 | retstr = strings.Join(strs, "\n") 243 | } 244 | return retstr 245 | } 246 | 247 | func printTable(data [][]string) { 248 | table := tablewriter.NewWriter(os.Stdout) 249 | table.SetAutoWrapText(false) 250 | table.AppendBulk(data) 251 | table.Render() 252 | } 253 | -------------------------------------------------------------------------------- /cmd/plugin-manager/plugin-manager.go: -------------------------------------------------------------------------------- 1 | package plugin_manager 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/konveyor/crane/cmd/plugin-manager/add" 7 | "github.com/konveyor/crane/cmd/plugin-manager/list" 8 | "github.com/konveyor/crane/cmd/plugin-manager/remove" 9 | "github.com/konveyor/crane/internal/flags" 10 | "github.com/konveyor/crane/internal/plugin" 11 | "github.com/spf13/cobra" 12 | "github.com/spf13/viper" 13 | ) 14 | 15 | type Options struct { 16 | // Two GlobalFlags struct fields are needed 17 | // 1. cobraGlobalFlags for explicit CLI args parsed by cobra 18 | // 2. globalFlags for the args merged with values from the viper config file 19 | cobraGlobalFlags *flags.GlobalFlags 20 | globalFlags *flags.GlobalFlags 21 | // Two Flags struct fields are needed 22 | // 1. cobraFlags for explicit CLI args parsed by cobra 23 | // 2. Flags for the args merged with values from the viper config file 24 | cobraFlags Flags 25 | Flags 26 | } 27 | 28 | type Flags struct { 29 | PluginDir string `mapstructure:"plugin-dir"` 30 | Repo string `mapstructure:"repo"` 31 | } 32 | 33 | func (o *Options) Complete(c *cobra.Command, args []string) error { 34 | // TODO: @jgabani 35 | return nil 36 | } 37 | 38 | func (o *Options) Validate() error { 39 | // TODO: @jgabani 40 | return nil 41 | } 42 | 43 | func (o *Options) Run() error { 44 | return o.run() 45 | } 46 | 47 | func NewPluginManagerCommand(f *flags.GlobalFlags) *cobra.Command { 48 | o := &Options{ 49 | cobraGlobalFlags: f, 50 | } 51 | cmd := &cobra.Command{ 52 | Use: "plugin-manager", 53 | Short: "Plugin-manager is command that helps manage plugins", 54 | RunE: func(c *cobra.Command, args []string) error { 55 | if err := o.Complete(c, args); err != nil { 56 | return err 57 | } 58 | if err := o.Validate(); err != nil { 59 | return err 60 | } 61 | if err := o.Run(); err != nil { 62 | return err 63 | } 64 | 65 | return nil 66 | }, 67 | PreRun: func(cmd *cobra.Command, args []string) { 68 | viper.BindPFlags(cmd.Flags()) 69 | viper.Unmarshal(&o.globalFlags) 70 | }, 71 | } 72 | addFlagsForOptions(&o.cobraFlags, cmd) 73 | cmd.AddCommand(list.NewListCommand(f)) 74 | cmd.AddCommand(add.NewAddCommand(f)) 75 | cmd.AddCommand(remove.NewRemoveCommand(f)) 76 | return cmd 77 | } 78 | func addFlagsForOptions(o *Flags, cmd *cobra.Command) { 79 | 80 | home := os.Getenv("HOME") 81 | defaultPluginDir := home + plugin.DefaultLocalPluginDir 82 | cmd.PersistentFlags().StringVarP(&o.PluginDir, "plugin-dir", "p", defaultPluginDir, "The path where binary plugins are located") 83 | cmd.PersistentFlags().StringVarP(&o.Repo, "repo", "", "", "The name of the repository from which to list the plugins from") 84 | } 85 | 86 | func (o *Options) run() error { 87 | return nil 88 | } 89 | -------------------------------------------------------------------------------- /cmd/plugin-manager/remove/remove.go: -------------------------------------------------------------------------------- 1 | package remove 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "os" 7 | "path/filepath" 8 | 9 | "github.com/konveyor/crane/internal/flags" 10 | "github.com/konveyor/crane/internal/plugin" 11 | "github.com/spf13/cobra" 12 | "github.com/spf13/viper" 13 | ) 14 | 15 | type Options struct { 16 | // Two GlobalFlags struct fields are needed 17 | // 1. cobraGlobalFlags for explicit CLI args parsed by cobra 18 | // 2. globalFlags for the args merged with values from the viper config file 19 | cobraGlobalFlags *flags.GlobalFlags 20 | globalFlags *flags.GlobalFlags 21 | // Two Flags struct fields are needed 22 | // 1. cobraFlags for explicit CLI args parsed by cobra 23 | // 2. Flags for the args merged with values from the viper config file 24 | cobraFlags Flags 25 | Flags 26 | } 27 | 28 | type Flags struct { 29 | Repo string `mapstructure:"repo"` 30 | PluginDir string `mapstructure:"plugin-dir"` 31 | } 32 | 33 | func (o *Options) Complete(c *cobra.Command, args []string) error { 34 | // TODO: @jgabani 35 | return nil 36 | } 37 | 38 | func (o *Options) Validate() error { 39 | // TODO: @jgabani 40 | return nil 41 | } 42 | 43 | func (o *Options) Run(args []string) error { 44 | return o.run(args) 45 | } 46 | 47 | func NewRemoveCommand(f *flags.GlobalFlags) *cobra.Command { 48 | o := &Options{ 49 | globalFlags: f, 50 | } 51 | cmd := &cobra.Command{ 52 | Use: "remove ", 53 | Short: "removes the desired plugin", 54 | RunE: func(c *cobra.Command, args []string) error { 55 | if err := o.Complete(c, args); err != nil { 56 | return err 57 | } 58 | if err := o.Validate(); err != nil { 59 | return err 60 | } 61 | if err := o.Run(args); err != nil { 62 | return err 63 | } 64 | 65 | return nil 66 | }, 67 | PreRun: func(cmd *cobra.Command, args []string) { 68 | viper.BindPFlags(cmd.Flags()) 69 | viper.Unmarshal(&o.Flags) 70 | viper.Unmarshal(&o.globalFlags) 71 | }, 72 | } 73 | 74 | return cmd 75 | } 76 | 77 | func (o *Options) run(args []string) error { 78 | log := o.globalFlags.GetLogger() 79 | pluginDir, err := filepath.Abs(fmt.Sprintf("%v/%v", o.PluginDir, o.Repo)) 80 | if err != nil { 81 | return err 82 | } 83 | 84 | files, err := ioutil.ReadDir(pluginDir) 85 | if err != nil { 86 | return err 87 | } 88 | 89 | paths, err := plugin.LocateBinaryInPluginDir(pluginDir, args[0], files) 90 | if err != nil { 91 | return err 92 | } 93 | 94 | if len(paths) > 1 { 95 | // fail and ask for a specific repo 96 | log.Errorf("The binary is installed from multiple source, please specify repository from which you want to remove the plugin using --repo \n") 97 | fmt.Printf("The binary is present in the following path") 98 | for _, path := range paths { 99 | fmt.Printf("%s \n", path) 100 | } 101 | } else if len(paths) == 0 { 102 | log.Errorf(fmt.Sprintf("Plugin %s not found in the plugin dir %s", args[0], pluginDir)) 103 | fmt.Printf("Run \"crane plugin-manager list --installed -p %s\" to see the list of installed plugins \n", pluginDir) 104 | } else { 105 | err = os.Remove(paths[0]) 106 | if err != nil { 107 | return err 108 | } 109 | log.Infof("The plugin %s removed from path - %s", args[0], paths[0]) 110 | } 111 | 112 | return nil 113 | } 114 | -------------------------------------------------------------------------------- /cmd/runfn/runfn.go: -------------------------------------------------------------------------------- 1 | package runfn 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "github.com/konveyor/crane/internal/flags" 7 | "io" 8 | "sigs.k8s.io/kustomize/kyaml/yaml" 9 | "strings" 10 | 11 | "github.com/spf13/cobra" 12 | "sigs.k8s.io/kustomize/cmd/config/runner" 13 | "sigs.k8s.io/kustomize/kyaml/errors" 14 | "sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil" 15 | "sigs.k8s.io/kustomize/kyaml/runfn" 16 | ) 17 | 18 | type Options struct { 19 | // cobraGlobalFlags for explicit CLI args parsed by cobra 20 | cobraGlobalFlags *flags.GlobalFlags 21 | Flags 22 | } 23 | 24 | type Flags struct { 25 | ExportDir string 26 | TransformDir string 27 | Image string 28 | Env []string 29 | RunFns runfn.RunFns 30 | TransformedContent bytes.Buffer 31 | } 32 | 33 | func NewFnRunCommand(f *flags.GlobalFlags) *cobra.Command { 34 | o := &Options{ 35 | cobraGlobalFlags: f, 36 | } 37 | cmd := &cobra.Command{ 38 | Use: "runfn [IMAGE] [flags] [--args]", 39 | Long: "Transform resources by executing KRM function \n\nExperimental: This command is under active development and may change without notice.", 40 | Short: "Transform resources by executing KRM function", 41 | RunE: o.runE, 42 | PreRunE: o.preRunE, 43 | SilenceUsage: true, 44 | } 45 | addFlagsForOptions(&o.Flags, cmd) 46 | return cmd 47 | } 48 | 49 | func addFlagsForOptions(o *Flags, cmd *cobra.Command) { 50 | cmd.Flags().StringVarP(&o.ExportDir, "export-dir", "e", "export", 51 | fmt.Sprintf("Path to the local directory containing exported resources")) 52 | cmd.Flags().StringVarP(&o.TransformDir, "transform-dir", "t", "transform", 53 | fmt.Sprintf("The path where transformed resources are written")) 54 | cmd.Flags().StringArrayVarP( 55 | &o.Env, "env", "", []string{}, 56 | "a list of environment variables to be used by functions") 57 | } 58 | 59 | func (o *Options) runE(c *cobra.Command, _ []string) error { 60 | if err := runner.HandleError(c, o.RunFns.Execute()); err != nil { 61 | return err 62 | } 63 | if err := WriteOutput(o.TransformDir, o.TransformedContent.String()); err != nil { 64 | return err 65 | } 66 | fmt.Println("Transformed resources are written to:", o.TransformDir) 67 | return nil 68 | } 69 | 70 | func (o *Options) preRunE(c *cobra.Command, args []string) error { 71 | //check if export dir exists 72 | if !checkIfDirExists(o.ExportDir) { 73 | return fmt.Errorf("export-dir %s does not exist", o.ExportDir) 74 | } 75 | 76 | //check if transform dir does not already exist 77 | if checkIfDirExists(o.TransformDir) { 78 | return fmt.Errorf("transform-dir %s already exist", o.TransformDir) 79 | } 80 | 81 | var fnArgs []string 82 | if c.ArgsLenAtDash() >= 0 { 83 | fnArgs = append(fnArgs, args[c.ArgsLenAtDash():]...) 84 | args = args[:c.ArgsLenAtDash()] 85 | } 86 | 87 | var err error 88 | if o.Image, err = getFunctionImage(args); err != nil { 89 | return err 90 | } 91 | 92 | fns, err := o.getContainerFunctions(fnArgs) 93 | if err != nil { 94 | return err 95 | } 96 | 97 | // set the output 98 | var output io.Writer 99 | o.TransformedContent = bytes.Buffer{} 100 | output = &o.TransformedContent 101 | 102 | o.RunFns = runfn.RunFns{ 103 | Path: o.ExportDir, 104 | Output: output, 105 | Functions: fns, 106 | Env: o.Env, 107 | } 108 | return nil 109 | } 110 | 111 | // getFunctionImage parses the commandline arguments and return image string 112 | func getFunctionImage(args []string) (string, error) { 113 | if len(args) == 0 { 114 | return "", errors.Errorf("must specify image to run a function") 115 | } else if len(args) == 1 { 116 | return args[0], nil 117 | } else { 118 | return "", errors.Errorf("1 argument supported, function arguments go after '--'") 119 | } 120 | } 121 | 122 | // getContainerFunctions parses the commandline flags and arguments into explicit 123 | // Functions to run. 124 | func (o *Options) getContainerFunctions(dataItems []string) ([]*yaml.RNode, error) { 125 | res, err := getFunctionConfig(dataItems) 126 | if err != nil { 127 | return nil, err 128 | } 129 | 130 | // create the function spec to set as an annotation 131 | fnAnnotation, err := o.getFunctionAnnotation() 132 | 133 | if err != nil { 134 | return nil, err 135 | } 136 | 137 | // set the function annotation on the function config, so that it is parsed by RunFns 138 | value, err := fnAnnotation.String() 139 | if err != nil { 140 | return nil, errors.Wrap(err) 141 | } 142 | if err = res.PipeE( 143 | yaml.LookupCreate(yaml.MappingNode, "metadata", "annotations"), 144 | yaml.SetField(runtimeutil.FunctionAnnotationKey, yaml.NewScalarRNode(value))); err != nil { 145 | return nil, errors.Wrap(err) 146 | } 147 | 148 | return []*yaml.RNode{res}, nil 149 | } 150 | 151 | func (o *Options) getFunctionAnnotation() (*yaml.RNode, error) { 152 | if err := ValidateFunctionImageURL(o.Image); err != nil { 153 | return nil, err 154 | } 155 | 156 | fn, err := yaml.Parse(`container: {}`) 157 | if err != nil { 158 | return nil, errors.Wrap(err) 159 | } 160 | 161 | if err = fn.PipeE( 162 | yaml.Lookup("container"), 163 | yaml.SetField("image", yaml.NewScalarRNode(o.Image))); err != nil { 164 | return nil, errors.Wrap(err) 165 | } 166 | return fn, nil 167 | } 168 | 169 | // getFunctionConfig parses the commandline flags and arguments into explicit function config 170 | func getFunctionConfig(fnArgs []string) (*yaml.RNode, error) { 171 | // create the function config 172 | rc, err := yaml.Parse(` 173 | metadata: 174 | name: function-input 175 | data: {} 176 | `) 177 | if err != nil { 178 | return nil, err 179 | } 180 | 181 | // default the function config kind to ConfigMap, this may be overridden 182 | var kind = "ConfigMap" 183 | var version = "v1" 184 | 185 | // populate the function config with data. this is a convention for functions 186 | // to be more commandline friendly 187 | if len(fnArgs) > 0 { 188 | dataField, err := rc.Pipe(yaml.Lookup("data")) 189 | if err != nil { 190 | return nil, err 191 | } 192 | for i, s := range fnArgs { 193 | kv := strings.SplitN(s, "=", 2) 194 | if i == 0 && len(kv) == 1 { 195 | // first argument may be the kind 196 | kind = s 197 | continue 198 | } 199 | if len(kv) != 2 { 200 | return nil, fmt.Errorf("args must have keys and values separated by =") 201 | } 202 | // When we are using a ConfigMap as the functionConfig, we should create 203 | // the node with type string instead of creating a scalar node. Because 204 | // a scalar node might be parsed as int, float or bool later. 205 | err := dataField.PipeE(yaml.SetField(kv[0], yaml.NewStringRNode(kv[1]))) 206 | if err != nil { 207 | return nil, err 208 | } 209 | } 210 | } 211 | if err = rc.PipeE(yaml.SetField("kind", yaml.NewScalarRNode(kind))); err != nil { 212 | return nil, err 213 | } 214 | if err = rc.PipeE(yaml.SetField("apiVersion", yaml.NewScalarRNode(version))); err != nil { 215 | return nil, err 216 | } 217 | return rc, nil 218 | } 219 | -------------------------------------------------------------------------------- /cmd/runfn/util.go: -------------------------------------------------------------------------------- 1 | package runfn 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "os" 7 | "regexp" 8 | "sigs.k8s.io/kustomize/kyaml/kio" 9 | "strings" 10 | ) 11 | 12 | // WriteOutput Write the resource to the output directory. 13 | // If the output directory is not specified, the resource is written to stdout. 14 | func WriteOutput(outDir string, content string) error { 15 | r := strings.NewReader(content) 16 | var outputs []kio.Writer 17 | outDir, err := GetDestinationDir(outDir) 18 | if err != nil { 19 | return err 20 | } 21 | if err := os.MkdirAll(outDir, 0755); err != nil { 22 | return err 23 | } 24 | outputs = []kio.Writer{kio.LocalPackageWriter{PackagePath: outDir}} 25 | return kio.Pipeline{ 26 | Inputs: []kio.Reader{&kio.ByteReader{Reader: r}}, 27 | Outputs: outputs}.Execute() 28 | } 29 | 30 | // GetDestinationDir returns the destination directory, if outDir is not defined it creates a temp dir. 31 | func GetDestinationDir(outDir string) (string, error) { 32 | if outDir == "" { 33 | cwd, err := os.Getwd() 34 | if err != nil { 35 | return "", fmt.Errorf("failed to get current directory: %v", err) 36 | } 37 | dir, err := ioutil.TempDir(cwd, "crane_output") 38 | if err != nil { 39 | return "", err 40 | } 41 | outDir = dir 42 | } 43 | return outDir, nil 44 | } 45 | 46 | // ValidateFunctionImageURL validates the function name. 47 | // According to Docker implementation 48 | // https://github.com/docker/distribution/blob/master/reference/reference.go. A valid 49 | // name definition is: 50 | // name := [domain '/'] path-component ['/' path-component]* 51 | // domain := domain-component ['.' domain-component]* [':' port-number] 52 | // domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ 53 | // port-number := /[0-9]+/ 54 | // path-component := alpha-numeric [separator alpha-numeric]* 55 | // alpha-numeric := /[a-z0-9]+/ 56 | // separator := /[_.]|__|[-]*/ 57 | // https://github.com/GoogleContainerTools/kpt/blob/b197de30601072d7b8668dd41150f398a7f415f5/pkg/api/kptfile/v1/validation.go#L120-L150 58 | func ValidateFunctionImageURL(name string) error { 59 | pathComponentRegexp := `(?:[a-z0-9](?:(?:[_.]|__|[-]*)[a-z0-9]+)*)` 60 | domainComponentRegexp := `(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])` 61 | domainRegexp := fmt.Sprintf(`%s(?:\.%s)*(?:\:[0-9]+)?`, domainComponentRegexp, domainComponentRegexp) 62 | nameRegexp := fmt.Sprintf(`(?:%s\/)?%s(?:\/%s)*`, domainRegexp, 63 | pathComponentRegexp, pathComponentRegexp) 64 | tagRegexp := `(?:[\w][\w.-]{0,127})` 65 | shaRegexp := `(sha256:[a-zA-Z0-9]{64})` 66 | versionRegexp := fmt.Sprintf(`(%s|%s)`, tagRegexp, shaRegexp) 67 | t := fmt.Sprintf(`^(?:%s(?:(\:|@)%s)?)$`, nameRegexp, versionRegexp) 68 | 69 | matched, err := regexp.MatchString(t, name) 70 | if err != nil { 71 | return err 72 | } 73 | if !matched { 74 | return fmt.Errorf("function name %q is invalid", name) 75 | } 76 | return nil 77 | } 78 | 79 | // Check if Directory exists 80 | func checkIfDirExists(dir string) bool { 81 | _, err := os.Stat(dir) 82 | if err == nil || os.IsExist(err) { 83 | return true 84 | } 85 | if os.IsNotExist(err) { 86 | return false 87 | } 88 | return false 89 | } 90 | -------------------------------------------------------------------------------- /cmd/skopeo-sync-gen/skopeo-sync-gen.go: -------------------------------------------------------------------------------- 1 | package skopeo_sync_gen 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | // "fmt" 7 | "os" 8 | "path/filepath" 9 | "strings" 10 | 11 | "github.com/konveyor/crane/internal/file" 12 | "github.com/konveyor/crane/internal/flags" 13 | imagev1 "github.com/openshift/api/image/v1" 14 | "github.com/spf13/cobra" 15 | "github.com/spf13/viper" 16 | "k8s.io/apimachinery/pkg/runtime/schema" 17 | "sigs.k8s.io/yaml" 18 | ) 19 | 20 | const ( 21 | InternalRegistryDefault = "image-registry.openshift-image-registry.svc:5000" 22 | ) 23 | 24 | var ( 25 | ImageStreamGroupKind = schema.GroupKind{Group: "image.openshift.io", Kind: "ImageStream"} 26 | ) 27 | 28 | type Options struct { 29 | 30 | // Two GlobalFlags struct fields are needed 31 | // 1. cobraGlobalFlags for explicit CLI args parsed by cobra 32 | // 2. globalFlags for the args merged with values from the viper config file 33 | cobraGlobalFlags *flags.GlobalFlags 34 | globalFlags *flags.GlobalFlags 35 | 36 | Flags 37 | } 38 | 39 | type Flags struct { 40 | ExportDir string 41 | RegistryURL string 42 | InternalRegistryURL string 43 | } 44 | 45 | // Redefining types from skopeo here as they are unexported. 46 | // https://github.com/containers/skopeo/blob/7ddc5ce06cf86bd9d774fe4789db2c495b035619/cmd/skopeo/sync.go#L64-L75 47 | // registrySyncConfig contains information about a single registry, read from 48 | // the source YAML file 49 | type registrySyncConfig struct { 50 | Images map[string][]string `json:"images"` // Images map images name to slices with the images' references (tags, digests) 51 | // TODO(djzager): Do we even need to expose these? 52 | // ImagesByTagRegex map[string]string `yaml:"images-by-tag-regex"` // Images map images name to regular expression with the images' tags 53 | // Credentials types.DockerAuthConfig // Username and password used to authenticate with the registry 54 | // TLSVerify tlsVerifyConfig `yaml:"tls-verify"` // TLS verification mode (enabled by default) 55 | // CertDir string `yaml:"cert-dir"` // Path to the TLS certificates of the registry 56 | } 57 | 58 | // sourceConfig contains all registries information read from the source YAML file 59 | type sourceConfig map[string]registrySyncConfig 60 | 61 | func (o *Options) Complete(c *cobra.Command, args []string) error { 62 | return nil 63 | } 64 | 65 | func (o *Options) Validate() error { 66 | return nil 67 | } 68 | 69 | func NewSkopeoSyncGenCommand(f *flags.GlobalFlags) *cobra.Command { 70 | o := &Options{ 71 | cobraGlobalFlags: f, 72 | } 73 | cmd := &cobra.Command{ 74 | Use: "skopeo-sync-gen", 75 | Short: "Generate source yaml for skopeo sync and write the result to stdout", 76 | RunE: func(c *cobra.Command, args []string) error { 77 | if err := o.Complete(c, args); err != nil { 78 | return err 79 | } 80 | if err := o.Validate(); err != nil { 81 | return err 82 | } 83 | if err := o.Run(); err != nil { 84 | return err 85 | } 86 | 87 | return nil 88 | }, 89 | PreRun: func(cmd *cobra.Command, args []string) { 90 | viper.BindPFlags(cmd.Flags()) 91 | viper.Unmarshal(&o.globalFlags) 92 | }, 93 | } 94 | 95 | cmd.Flags().StringVarP(&o.ExportDir, "export-dir", "e", "export", "The path where kube resources are saved") 96 | cmd.Flags().StringVar(&o.RegistryURL, "registry-url", "", "Publicly accessible URL to registry") 97 | cmd.Flags().StringVar(&o.InternalRegistryURL, "internal-registry-url", InternalRegistryDefault, "Internal registry hostname[:port] used to determine whether an image should be synced") 98 | cmd.MarkFlagRequired("registry-url") 99 | 100 | return cmd 101 | } 102 | 103 | func shouldAddImageStream(internalRegistryURL string, tags []imagev1.NamedTagEventList) bool { 104 | for _, tag := range tags { 105 | for _, item := range tag.Items { 106 | if strings.HasPrefix(item.DockerImageReference, internalRegistryURL) { 107 | return true 108 | } 109 | } 110 | } 111 | return false 112 | } 113 | 114 | func (o *Options) Run() error { 115 | // Load all the resources from the export dir 116 | exportDir, err := filepath.Abs(o.ExportDir) 117 | if err != nil { 118 | // Handle errors better for users. 119 | return err 120 | } 121 | 122 | files, err := file.ReadFiles(context.TODO(), exportDir) 123 | if err != nil { 124 | return err 125 | } 126 | 127 | srcConfig := sourceConfig{ 128 | o.RegistryURL: registrySyncConfig{ 129 | Images: map[string][]string{}, 130 | }, 131 | } 132 | 133 | for _, f := range files { 134 | obj := f.Unstructured 135 | if obj.GetObjectKind().GroupVersionKind().GroupKind() == ImageStreamGroupKind { 136 | rawJSON, err := obj.MarshalJSON() 137 | if err != nil { 138 | return err 139 | } 140 | imageStream := &imagev1.ImageStream{} 141 | err = json.Unmarshal(rawJSON, imageStream) 142 | if err != nil { 143 | return err 144 | } 145 | if shouldAddImageStream(o.InternalRegistryURL, imageStream.Status.Tags) { 146 | imageName := obj.GetNamespace() + "/" + obj.GetName() 147 | srcConfig[o.RegistryURL].Images[imageName] = []string{} 148 | } 149 | } 150 | } 151 | 152 | yamlBytes, err := yaml.Marshal(srcConfig) 153 | if err != nil { 154 | return err 155 | } 156 | os.Stdout.Write(yamlBytes) 157 | 158 | return nil 159 | } 160 | -------------------------------------------------------------------------------- /cmd/transfer-pvc/README.md: -------------------------------------------------------------------------------- 1 | # Transfer Persistent Volume Claims 2 | 3 | The `transfer-pvc` subcommand in Crane can be used to transfer _PersistentVolumeClaim_ resource and volume data to destination cluster. It establishes connection to the destination cluster by creating a public endpoint of user's choice in the destination namespace. It then creates a PVC and an _rsync_ daemon Pod in the destination namespace to receive data from the source PVC. Finally, it creates an _rsync_ client Pod in the source namespace which transfers data to the rsync daemon using the endpoint. The connection is encrypted using self-signed cerificates created automatically at the time of transfer. 4 | 5 | ## Example 6 | 7 | ```bash 8 | crane transfer-pvc --source-context= --destination-context= --pvc-name= --endpoint=route 9 | ``` 10 | 11 | The above command transfers PVC (along with PV data) named `` in the namespace specified by `` context into the namespace specified by `` context. The `--endpoint` argument specifies the kind of public endpoint to use to establish a connection between the source and the destination cluster. 12 | 13 | ## Options 14 | 15 | `transfer-pvc` subcommand can be configured using various additional options available: 16 | 17 | | Option | Type | Required | Description | 18 | |-----------------------|---------|----------|-----------------------------------------------------------------------------------------------| 19 | | source-context | string | Yes | Kube context of the source cluster | 20 | | destination-context | string | Yes | Kube context of the destination cluster | 21 | | pvc-name | string | Yes | Mapping of the source/destination PVC names (See [PVC options](#pvc-options)) | 22 | | pvc-namespace | string | No | Mapping of the source/destination PVC namespaces (See [PVC options](#pvc-options)) | 23 | | dest-storage-class | string | No | Storage class of destination PVC (Defaults to source storage class) | 24 | | dest-storage-requests | string | No | Requested storage capacity of destination PVC (Defaults to source capacity) | 25 | | destination-image | string | No | Custom image to use for destination rsync Pod | 26 | | source-image | string | No | Custom image to use for source rsync Pod | 27 | | endpoint | string | No | Kind of endpoint to create in destination cluster (See [Endpoint Options](#endpoint-options)) | 28 | | ingress-class | string | No | Ingress class when endpoint is nginx-ingress (See [Endpoint Options](#endpoint-options)) | 29 | | subdomain | string | No | Custom subdomain to use for the endpoint (See [Endpoint Options](#endpoint-options)) | 30 | | output | string | No | Output transfer stats in the specified file | 31 | | verify | bool | No | Verify transferred files using checksums | 32 | | help | bool | No | Display help | 33 | 34 | ### PVC Options 35 | 36 | `--pvc-name` option allows specifying a mapping of source and destination PVC names. This is a required option. 37 | 38 | `--pvc-namespace=` option allows specifying a mapping of namespaces of source and destination PVC. By default, the namespaces in the source and destination contexts are used. When this option is specified the namespaces in kube contexts are ignored and specified namespaces are used. 39 | 40 | Both `--pvc-name` and `--pvc-namespaces` follow mapping format `:`, where `` specifies the name in the source cluster while `` is the name in the destination cluster. If only `` is specified, the same names are used in destination cluster. 41 | 42 | #### Examples 43 | 44 | To transfer a PVC `test-pvc` in namespace `test-ns` to a destination PVC by same name & namespace: 45 | 46 | ```bash 47 | crane transfer-pvc --pvc-name=test-pvc --pvc-namespace=test-ns ... 48 | ``` 49 | 50 | To transfer a PVC `source-pvc` in namespace `source-ns` to a destination PVC `destination-pvc` in namespace `destination-ns`: 51 | 52 | ```bash 53 | crane transfer-pvc --pvc-name=source-pvc:destination-pvc --pvc-namespace=source-ns:destination-ns ... 54 | ``` 55 | 56 | ### Endpoint Options 57 | 58 | Endpoint enables a connection between the source and destination cluster for data transfer. It is created in the destination cluster. The destination cluster _must_ support the kind of endpoint used. 59 | 60 | By default, `nginx-ingress` is used as endpoint. For nginx-ingress, `--subdomain` and `--ingress-class` are required. 61 | 62 | In an OpenShift cluster, `route` endpoint can be used. A subdomain option can be specified but is not required. By default, the cluster's subdomain will be used. 63 | 64 | 65 | -------------------------------------------------------------------------------- /cmd/transfer-pvc/progress.go: -------------------------------------------------------------------------------- 1 | package transfer_pvc 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "io/ioutil" 9 | "log" 10 | "math" 11 | "os" 12 | "regexp" 13 | "strconv" 14 | "strings" 15 | "time" 16 | 17 | corev1 "k8s.io/api/core/v1" 18 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 19 | "k8s.io/apimachinery/pkg/types" 20 | "k8s.io/apimachinery/pkg/util/wait" 21 | "k8s.io/client-go/kubernetes" 22 | "k8s.io/client-go/rest" 23 | "sigs.k8s.io/controller-runtime/pkg/client" 24 | ) 25 | 26 | type rsyncLogStream struct { 27 | pvc types.NamespacedName 28 | podLabels map[string]string 29 | restCfg *rest.Config 30 | stdout chan string 31 | stderr chan string 32 | err chan error 33 | progress *Progress 34 | outputFile *string 35 | } 36 | 37 | func NewRsyncLogStream(restCfg *rest.Config, pvc types.NamespacedName, labels map[string]string, output string) LogStreams { 38 | var outputFile string 39 | if output != "" { 40 | outputFile = output 41 | } 42 | return &rsyncLogStream{ 43 | restCfg: restCfg, 44 | pvc: pvc, 45 | podLabels: labels, 46 | outputFile: &outputFile, 47 | } 48 | } 49 | 50 | func (r *rsyncLogStream) Init() error { 51 | r.stdout = make(chan string) 52 | r.stderr = make(chan string) 53 | r.err = make(chan error) 54 | 55 | clientset, err := kubernetes.NewForConfig(r.restCfg) 56 | if err != nil { 57 | return err 58 | } 59 | 60 | podName, err := waitForPodRunning(clientset, r.pvc.Namespace, r.podLabels) 61 | if err != nil { 62 | return err 63 | } 64 | 65 | podLogsRequest := clientset.CoreV1().Pods(r.pvc.Namespace).GetLogs(podName, &corev1.PodLogOptions{ 66 | TypeMeta: metav1.TypeMeta{}, 67 | Container: "rsync", 68 | Follow: true, 69 | }) 70 | 71 | podLogStream, err := podLogsRequest.Stream(context.TODO()) 72 | if err != nil { 73 | return err 74 | } 75 | 76 | r.progress = NewProgress(r.pvc) 77 | var lastProgress *Progress 78 | 79 | go func() { 80 | defer podLogStream.Close() 81 | logString := "" 82 | zeroBytes := 0 83 | for { 84 | buf := make([]byte, 32*1024) 85 | n, readErr := podLogStream.Read(buf) 86 | if n > 0 { 87 | zeroBytes = 0 88 | } else { 89 | zeroBytes += 1 90 | } 91 | // sometimes, a stream would end without returning an EOF gracefully 92 | // we force exit the loop when we see null bytes on stream consecutively 93 | if zeroBytes > 4 { 94 | err = io.EOF 95 | } 96 | logString = fmt.Sprintf("%s%s", logString, string(buf[:n])) 97 | if readErr == io.EOF { 98 | err = readErr 99 | // attempt to get a final status of terminated pod 100 | code, finalLogs, e := getFinalPodStatus(clientset, podName, r.pvc.Namespace) 101 | if e != nil { 102 | err = e 103 | } 104 | r.progress.ExitCode = code 105 | logString = finalLogs 106 | } 107 | parsedProgress, unparsed := parseRsyncLogs(logString) 108 | r.progress.Merge(parsedProgress) 109 | outString, errString := r.progress.AsString() 110 | if len(outString) > 0 { 111 | // overwrite previous progress 112 | if lastProgress != nil { 113 | oldStdOut, _ := lastProgress.AsString() 114 | for i := 0; i < strings.Count(oldStdOut, "\n"); i++ { 115 | outString = fmt.Sprintf("\x1b[1A\x1b[2K%s", outString) 116 | } 117 | } 118 | r.stdout <- outString 119 | } 120 | if len(errString) > 0 { 121 | r.stderr <- errString 122 | } 123 | logString = unparsed 124 | lastProgress = r.progress 125 | if err != nil { 126 | r.err <- err 127 | break 128 | } 129 | } 130 | if r.outputFile != nil { 131 | writeProgressToFile(*r.outputFile, r.progress) 132 | } 133 | }() 134 | 135 | return nil 136 | } 137 | 138 | func writeProgressToFile(o string, p *Progress) error { 139 | file, err := os.OpenFile(o, os.O_CREATE, os.ModePerm) 140 | if err != nil { 141 | return err 142 | } 143 | defer file.Close() 144 | d, _ := json.MarshalIndent(p, "", " ") 145 | return ioutil.WriteFile(o, d, os.ModePerm) 146 | } 147 | 148 | func (r *rsyncLogStream) Close() { 149 | close(r.stdout) 150 | close(r.stderr) 151 | close(r.err) 152 | } 153 | 154 | func (r *rsyncLogStream) Streams() (stdout chan string, stderr chan string, err chan error) { 155 | return r.stdout, r.stderr, r.err 156 | } 157 | 158 | // Progress defines transfer Progress 159 | type Progress struct { 160 | PVC types.NamespacedName `json:"pvc"` 161 | TransferPercentage *int64 `json:"transferPercentage"` 162 | TransferRate *dataSize `json:"transferRate"` 163 | TransferredData *dataSize `json:"transferredData"` 164 | TotalFiles *int64 `json:"totalFiles"` 165 | TransferredFiles int64 `json:"transferredFiles"` 166 | ExitCode *int32 `json:"exitCode"` 167 | FailedFiles []FailedFile `json:"failedFiles"` 168 | Errors []string `json:"miscErrors"` 169 | retries *int 170 | startedAt time.Time 171 | } 172 | 173 | // pastAttempts stores cumulative progress info 174 | // of all previous attempts of transfer 175 | var pastAttempts Progress 176 | 177 | // failedFiles cache of discovered files 178 | var failedFiles map[string]bool 179 | 180 | type FailedFile struct { 181 | Name string `json:"name"` 182 | Err string `json:"error"` 183 | } 184 | 185 | type dataSize struct { 186 | val float64 187 | unit string 188 | } 189 | 190 | func addDataSize(a, b *dataSize) *dataSize { 191 | if b == nil { 192 | return nil 193 | } 194 | newDs := &dataSize{} 195 | units := map[string]int{"bytes": 0, "K": 3, "M": 6, "G": 9, "T": 12} 196 | if b.unit == a.unit { 197 | newDs.val = b.val + a.val 198 | newDs.unit = b.unit 199 | } else { 200 | if nu, exists := units[b.unit]; exists { 201 | if du, exists := units[a.unit]; exists { 202 | if nu > du { 203 | newDs.val = b.val + (a.val / math.Pow(10, float64(nu-du))) 204 | newDs.unit = b.unit 205 | } else { 206 | newDs.val = (b.val / math.Pow(10, float64(du-nu))) + a.val 207 | newDs.unit = a.unit 208 | } 209 | } 210 | } 211 | } 212 | return newDs 213 | } 214 | 215 | func (d *dataSize) String() string { 216 | return fmt.Sprintf("%.2f %s", d.val, d.unit) 217 | } 218 | 219 | func (d *dataSize) MarshalJSON() ([]byte, error) { 220 | return json.Marshal(d.String()) 221 | } 222 | 223 | func (p *Progress) AsString() (out string, err string) { 224 | progressLog := "" 225 | intVal := func(i *int64, pref string) string { 226 | if i == nil { 227 | return "" 228 | } 229 | return fmt.Sprintf("%d%s", *i, pref) 230 | } 231 | dataVal := func(i *dataSize) string { 232 | if i == nil { 233 | return "" 234 | } 235 | return i.String() 236 | } 237 | progressLog = fmt.Sprintf("%sStatus:\t%s\n", progressLog, p.Status()) 238 | progressLog = fmt.Sprintf("%sProgress:\n", progressLog) 239 | progressLog = fmt.Sprintf("%s Percentage:\t%s\n", progressLog, intVal(p.TransferPercentage, "%")) 240 | progressLog = fmt.Sprintf("%s Transferred:\t%s\n", progressLog, dataVal(p.TransferredData)) 241 | progressLog = fmt.Sprintf("%s Rate:\t\t%s\n", progressLog, dataVal(p.TransferRate)) 242 | if p.retries != nil { 243 | progressLog = fmt.Sprintf("%s Retries:\t%d\n", progressLog, *p.retries) 244 | } 245 | if p.Status().Completed() { 246 | progressLog = fmt.Sprintf("%s Files:\n", progressLog) 247 | progressLog = fmt.Sprintf("%s Sent:\t%d\n", progressLog, p.TransferredFiles) 248 | if p.TotalFiles != nil { 249 | progressLog = fmt.Sprintf("%s Total:\t%d\n", progressLog, *p.TotalFiles) 250 | } 251 | } 252 | progressLog = fmt.Sprintf("%sElapsed:\t%s\n", progressLog, time.Since(p.startedAt).Round(time.Second).String()) 253 | errors, failedFiles := "", "" 254 | if p.Status().Completed() { 255 | if len(p.FailedFiles) > 0 { 256 | failedFiles = "Failed files: \n" 257 | for _, f := range p.FailedFiles { 258 | failedFiles = fmt.Sprintf("%s - %s [%s]\n", failedFiles, f.Name, f.Err) 259 | } 260 | } 261 | if len(p.Errors) > 0 { 262 | errors := "Errors: \n" 263 | for _, e := range p.Errors { 264 | errors = fmt.Sprintf("%s - %s\n", errors, e) 265 | } 266 | } 267 | } 268 | return progressLog, fmt.Sprintf("%s%s", errors, failedFiles) 269 | } 270 | 271 | func NewProgress(name types.NamespacedName) *Progress { 272 | return &Progress{ 273 | PVC: name, 274 | FailedFiles: make([]FailedFile, 0), 275 | Errors: make([]string, 0), 276 | startedAt: time.Now(), 277 | } 278 | } 279 | 280 | type status string 281 | 282 | const ( 283 | succeeded status = "Succeeded" 284 | failed status = "Failed" 285 | partiallyFailed status = "Partially failed" 286 | preparing status = "Preparing" 287 | transferInProgress status = "Transfer in-progress" 288 | finishingUp status = "Finishing up" 289 | ) 290 | 291 | func (s status) Completed() bool { 292 | return s == succeeded || s == failed || s == partiallyFailed 293 | } 294 | 295 | // Status returns current status of transfer 296 | func (p *Progress) Status() status { 297 | if p.ExitCode != nil { 298 | if *p.ExitCode == 0 { 299 | int100 := int64(100) 300 | p.TransferPercentage = &int100 301 | return succeeded 302 | } 303 | if p.TransferredFiles == 0 && 304 | p.TransferredData.val == 0 && 305 | p.TotalFiles == nil { 306 | return failed 307 | } 308 | return partiallyFailed 309 | } else { 310 | if p.TransferPercentage == nil { 311 | return preparing 312 | } 313 | if *p.TransferPercentage >= 100 { 314 | return finishingUp 315 | } 316 | } 317 | return transferInProgress 318 | } 319 | 320 | // Merge merges two progress objects 321 | func (p *Progress) Merge(in *Progress) { 322 | p.TransferredFiles += in.TransferredFiles 323 | if in.TotalFiles != nil { 324 | p.TotalFiles = in.TotalFiles 325 | } 326 | if in.ExitCode != nil { 327 | p.ExitCode = in.ExitCode 328 | } 329 | if in.TransferRate != nil { 330 | p.TransferRate = in.TransferRate 331 | } 332 | // aggregate percentage of all retries 333 | var totalPercentage *int64 334 | if pastAttempts.TransferPercentage != nil { 335 | totalPercentage = pastAttempts.TransferPercentage 336 | } 337 | if in.TransferPercentage != nil { 338 | if totalPercentage == nil { 339 | totalPercentage = in.TransferPercentage 340 | } else { 341 | t := *totalPercentage + *in.TransferPercentage 342 | totalPercentage = &t 343 | } 344 | } 345 | if totalPercentage != nil { 346 | if (p.TransferPercentage == nil) || (*totalPercentage <= int64(100) && *totalPercentage > *p.TransferPercentage) { 347 | p.TransferPercentage = totalPercentage 348 | } 349 | } 350 | // aggregate transferred data of all retries 351 | var totalTransferredData *dataSize 352 | if pastAttempts.TransferredData != nil { 353 | totalTransferredData = pastAttempts.TransferredData 354 | } 355 | if in.TransferredData != nil { 356 | if totalTransferredData == nil { 357 | totalTransferredData = in.TransferredData 358 | } else { 359 | t := addDataSize(totalTransferredData, in.TransferredData) 360 | totalTransferredData = t 361 | } 362 | } 363 | if totalTransferredData != nil { 364 | if (p.TransferredData == nil) || (totalTransferredData.val > p.TransferredData.val) { 365 | p.TransferredData = totalTransferredData 366 | } 367 | } 368 | if in.retries != nil { 369 | pastAttempts = *p 370 | p.retries = in.retries 371 | } 372 | p.Errors = append(p.Errors, in.Errors...) 373 | p.FailedFiles = append(p.FailedFiles, in.FailedFiles...) 374 | } 375 | 376 | func newDataSize(str string) *dataSize { 377 | r := regexp.MustCompile(`([\d\.]+)([\w\/]*)`) 378 | matched := r.FindStringSubmatch(str) 379 | if len(matched) < 2 { 380 | return nil 381 | } 382 | size, err := strconv.ParseFloat(matched[1], 64) 383 | if err != nil { 384 | return nil 385 | } 386 | unit := matched[2] 387 | if unit == "" { 388 | unit = "bytes" 389 | } 390 | return &dataSize{ 391 | val: size, 392 | unit: unit, 393 | } 394 | } 395 | 396 | // parseRsyncLogs parses raw rsync logs and returns a structured progress 397 | // also returns data that was not processed, this is useful because log 398 | // stream can have incomplete lines 399 | func parseRsyncLogs(rawLogs string) (p *Progress, unprocessedData string) { 400 | p = NewProgress(types.NamespacedName{}) 401 | // in-progress information 402 | fileProgressRegex := regexp.MustCompile(`([\d.]+\w+)[\t ]+(\d+)%[\t ]+([\d\.]+\w{1,2}\/\w+).*\(xfr.*\)`) 403 | fileErrorRegex := regexp.MustCompile(`rsync: \w+ "(.*)".*: (.*)`) 404 | processErrorRegex := regexp.MustCompile(`@ERROR: (.*)`) 405 | // final stats 406 | fileStatsRegex := regexp.MustCompile(`Number of files: (\d+).*reg: ([\d,]+), dir: ([\d,]+)`) 407 | finalDataTransferredRegex := regexp.MustCompile(`Total transferred file size: (.*) bytes`) 408 | finalFileCountRegex := regexp.MustCompile(`Number of regular files transferred: (.*)`) 409 | unprocessedLines := regexp.MustCompile(`.*?\n(.*)$`) 410 | // retries 411 | retryRegex := regexp.MustCompile(`Syncronization failed. Retrying in \d+ seconds. Retry (\d+)/.*`) 412 | 413 | inProgressLines := fileProgressRegex.FindAllStringSubmatch(rawLogs, -1) 414 | for _, matched := range inProgressLines { 415 | // transferred data 416 | if len(matched) > 1 { 417 | p.TransferredData = newDataSize(matched[1]) 418 | } 419 | // percentage 420 | if len(matched) > 2 { 421 | observedPercentage, err := strconv.ParseInt(matched[2], 10, 64) 422 | if err == nil { 423 | p.TransferPercentage = &observedPercentage 424 | } 425 | } 426 | // speed 427 | if len(matched) > 3 { 428 | p.TransferRate = newDataSize(matched[3]) 429 | } 430 | } 431 | // post-completion transfer stats 432 | for _, matched := range fileStatsRegex.FindAllStringSubmatch(rawLogs, -1) { 433 | if len(matched) > 2 { 434 | matched[2] = strings.ReplaceAll(matched[2], ",", "") 435 | if val, err := strconv.ParseInt(matched[2], 10, 64); err == nil { 436 | p.TotalFiles = &val 437 | } 438 | } 439 | } 440 | for _, matched := range fileErrorRegex.FindAllStringSubmatch(rawLogs, -1) { 441 | if len(matched) > 2 { 442 | if failedFiles == nil { 443 | failedFiles = make(map[string]bool) 444 | } 445 | if _, exists := failedFiles[matched[1]]; !exists { 446 | p.FailedFiles = append(p.FailedFiles, FailedFile{ 447 | Name: matched[1], 448 | Err: matched[2], 449 | }) 450 | failedFiles[matched[1]] = true 451 | } 452 | } 453 | } 454 | for _, matched := range processErrorRegex.FindAllStringSubmatch(rawLogs, -1) { 455 | if len(matched) > 1 { 456 | p.Errors = append(p.Errors, matched[1]) 457 | } 458 | } 459 | if matched := finalDataTransferredRegex.FindStringSubmatch(rawLogs); len(matched) > 1 { 460 | p.TransferredData = newDataSize(matched[1]) 461 | } 462 | if matched := retryRegex.FindStringSubmatch(rawLogs); len(matched) > 1 { 463 | if val, err := strconv.Atoi(matched[1]); err == nil { 464 | p.retries = &val 465 | } 466 | } 467 | if matched := finalFileCountRegex.FindStringSubmatch(rawLogs); len(matched) > 1 { 468 | matched[1] = strings.ReplaceAll(matched[1], ",", "") 469 | if val, err := strconv.ParseInt(matched[1], 10, 64); err == nil { 470 | p.TransferredFiles = val 471 | } 472 | } 473 | if matched := unprocessedLines.FindStringSubmatch(rawLogs); len(matched) > 1 { 474 | return p, matched[1] 475 | } 476 | return p, "" 477 | } 478 | 479 | func waitForPodRunning(c *kubernetes.Clientset, namespace string, labels map[string]string) (string, error) { 480 | var podName string 481 | err := wait.PollUntil(time.Second, func() (done bool, err error) { 482 | listOptions := &client.ListOptions{} 483 | client.InNamespace(namespace).ApplyToList(listOptions) 484 | client.MatchingLabels(labels).ApplyToList(listOptions) 485 | clientPodList, err := c.CoreV1().Pods(namespace).List(context.TODO(), *listOptions.AsListOptions()) 486 | if err != nil { 487 | return false, err 488 | } 489 | 490 | if len(clientPodList.Items) != 1 { 491 | log.Printf("expected 1 client pod found %d, with labels %v\n", len(clientPodList.Items), labels) 492 | return false, nil 493 | } 494 | 495 | clientPod := &clientPodList.Items[0] 496 | podName = clientPod.Name 497 | 498 | for _, containerStatus := range clientPod.Status.ContainerStatuses { 499 | if containerStatus.State.Terminated != nil { 500 | log.Printf("container %s in pod %s completed", containerStatus.Name, client.ObjectKey{Namespace: namespace, Name: clientPod.Name}) 501 | break 502 | } 503 | if !containerStatus.Ready { 504 | log.Println(fmt.Errorf("container %s in pod %s is not ready", containerStatus.Name, client.ObjectKey{Namespace: namespace, Name: clientPod.Name})) 505 | return false, nil 506 | } 507 | } 508 | return true, nil 509 | }, make(<-chan struct{})) 510 | return podName, err 511 | } 512 | 513 | func getFinalPodStatus(c *kubernetes.Clientset, name string, namespace string) (*int32, string, error) { 514 | var exitCode *int32 515 | count := 0 516 | for { 517 | count += 1 518 | pod, err := c.CoreV1().Pods(namespace).Get(context.Background(), name, metav1.GetOptions{}) 519 | if err != nil { 520 | return nil, "", err 521 | } 522 | 523 | for _, container := range pod.Status.ContainerStatuses { 524 | if container.Name == "rsync" { 525 | if container.State.Terminated != nil { 526 | exitCode = &container.State.Terminated.ExitCode 527 | } 528 | } 529 | } 530 | if count > 5 || exitCode != nil { 531 | break 532 | } 533 | } 534 | 535 | lastLines := int64(35) 536 | finalLogRequest := c.CoreV1().Pods(namespace).GetLogs(name, &corev1.PodLogOptions{ 537 | TypeMeta: metav1.TypeMeta{}, 538 | Container: "rsync", 539 | TailLines: &lastLines, 540 | }) 541 | 542 | podLogStream, err := finalLogRequest.Stream(context.TODO()) 543 | if err != nil { 544 | return exitCode, "", err 545 | } 546 | defer podLogStream.Close() 547 | 548 | buf := new(strings.Builder) 549 | _, err = io.Copy(buf, podLogStream) 550 | if err != nil { 551 | return exitCode, buf.String(), err 552 | } 553 | 554 | return exitCode, buf.String(), nil 555 | } 556 | -------------------------------------------------------------------------------- /cmd/transfer-pvc/progress_test.go: -------------------------------------------------------------------------------- 1 | package transfer_pvc 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | ) 7 | 8 | func intEqual(a, b *int64) (string, string, bool) { 9 | if a != nil && b != nil && *a == *b { 10 | return fmt.Sprintf("%d", a), fmt.Sprintf("%d", b), true 11 | } 12 | as, bs := "nil", "nil" 13 | if a != nil { 14 | as = fmt.Sprintf("%d", *a) 15 | } 16 | if b != nil { 17 | bs = fmt.Sprintf("%d", *b) 18 | } 19 | return as, bs, a == b 20 | } 21 | 22 | func dataEqual(a, b *dataSize) (string, string, bool) { 23 | if a != nil && b != nil { 24 | if (a.val == b.val) && (a.unit == b.unit) { 25 | return a.String(), b.String(), true 26 | } 27 | return a.String(), b.String(), false 28 | } 29 | as, bs := "nil", "nil" 30 | if a != nil { 31 | as = a.String() 32 | } 33 | if b != nil { 34 | bs = b.String() 35 | } 36 | return as, bs, a == b 37 | } 38 | 39 | func Test_parseRsyncLogs(t *testing.T) { 40 | int130 := int64(130) 41 | int6 := int64(6) 42 | int49 := int64(49948) 43 | tests := []struct { 44 | name string 45 | stdout string 46 | stderr string 47 | want Progress 48 | wantStatus status 49 | wantUnProcessed string 50 | }{ 51 | { 52 | name: "failed-succeeded file list test", 53 | stdout: ` 54 | 16.78M 3% 105.06MB/s 0:00:00 (xfr#1, to-chk=19/21) 55 | admin.0 56 | 33.55M 6% 86.40MB/s 0:00:00 (xfr#2, to-chk=18/21) 57 | admin.ns 58 | `, 59 | stderr: ` 60 | rsync: [sender] send_files failed to open "/tmp/rsync-tests/a/dbvw": Permission denied (13) 61 | rsync: [sender] send_files failed to open "/tmp/rsync-tests/a/nhzmmmw": Permission denied (13)`, 62 | want: Progress{ 63 | FailedFiles: []FailedFile{ 64 | {Name: "/tmp/rsync-tests/a/dbvw", Err: "Permission denied (13)"}, 65 | {Name: "/tmp/rsync-tests/a/nhzmmmw", Err: "Permission denied (13)"}, 66 | }, 67 | TransferRate: &dataSize{ 68 | val: float64(86.4), 69 | unit: "MB/s", 70 | }, 71 | TransferredData: &dataSize{ 72 | val: float64(33.55), 73 | unit: "M", 74 | }, 75 | TransferPercentage: &int6, 76 | }, 77 | wantStatus: transferInProgress, 78 | wantUnProcessed: "", 79 | }, 80 | { 81 | name: "file number stat test", 82 | stdout: ` 83 | Number of files: 136 (reg: 130, dir: 6) 84 | Number of created files: 135 (reg: 130, dir: 5) 85 | Number of deleted files: 0 86 | Number of regular files transferred: 130 87 | `, 88 | want: Progress{ 89 | TotalFiles: &int130, 90 | TransferredFiles: 130, 91 | }, 92 | wantStatus: preparing, 93 | wantUnProcessed: "", 94 | }, 95 | { 96 | name: "unprocessed line test", 97 | stdout: ` 98 | Number of files: 136 (reg: 130, dir: 6) 99 | Number of created files: 135 (reg: 130, dir: 5) 100 | Number of deleted files: 0 101 | Number of re`, 102 | want: Progress{ 103 | TotalFiles: &int130, 104 | }, 105 | wantStatus: preparing, 106 | wantUnProcessed: "Number of re", 107 | }, 108 | { 109 | name: "final stats", 110 | stdout: ` 111 | 2022/07/14 18:09:11 [549] Number of files: 49,961 (reg: 49,948, dir: 13) 112 | Number of files: 49,961 (reg: 49,948, dir: 13) 113 | 2022/07/14 18:09:11 [549] Number of created files: 49,959 (reg: 49,948, dir: 11) 114 | Number of created files: 49,959 (reg: 49,948, dir: 11) 115 | 2022/07/14 18:09:11 [549] Number of deleted files: 0 116 | Number of deleted files: 0 117 | 2022/07/14 18:09:11 [549] Number of regular files transferred: 49,948 118 | Number of regular files transferred: 49,948 119 | 2022/07/14 18:09:11 [549] Total file size: 8.67G bytes 120 | Total file size: 8.67G bytes 121 | 2022/07/14 18:09:11 [549] Total transferred file size: 8.67G bytes 122 | Total transferred file size: 8.67G bytes 123 | 2022/07/14 18:09:11 [549] Literal data: 8.67G bytes 124 | Literal data: 8.67G bytes 125 | 2022/07/14 18:09:11 [549] Matched data: 0 bytes 126 | Matched data: 0 bytes 127 | 2022/07/14 18:09:11 [549] File list size: 655.34K 128 | File list size: 655.34K 129 | 2022/07/14 18:09:11 [549] File list generation time: 0.138 seconds 130 | File list generation time: 0.138 seconds 131 | 2022/07/14 18:09:11 [549] File list transfer time: 0.000 seconds 132 | File list transfer time: 0.000 seconds 133 | 2022/07/14 18:09:11 [549] Total bytes sent: 8.67G 134 | Total bytes sent: 8.67G 135 | 2022/07/14 18:09:11 [549] Total bytes received: 12.79M 136 | Total bytes received: 12.79M 137 | 138 | 2022/07/14 18:09:11 [549] sent 8.67G bytes received 12.79M bytes 88.19M bytes/sec 139 | sent 8.67G bytes received 12.79M bytes 88.19M bytes/sec 140 | 2022/07/14 18:09:11 [549] total size is 8.67G speedup is 1.00 141 | total size is 8.67G speedup is 1.00`, 142 | want: Progress{ 143 | TotalFiles: &int49, 144 | TransferredData: &dataSize{ 145 | val: float64(8.67), 146 | unit: "G", 147 | }, 148 | TransferredFiles: 49948, 149 | }, 150 | wantStatus: preparing, 151 | wantUnProcessed: "total size is 8.67G speedup is 1.00", 152 | }, 153 | } 154 | for _, tt := range tests { 155 | t.Run(tt.name, func(t *testing.T) { 156 | got, unprocessed := parseRsyncLogs(tt.stdout) 157 | if a, b, e := intEqual(got.TotalFiles, tt.want.TotalFiles); !e { 158 | t.Errorf("parseRsyncLogs() totalFiles = %v, want %v", a, b) 159 | } 160 | if a, b, e := intEqual(got.TransferPercentage, tt.want.TransferPercentage); !e { 161 | t.Errorf("parseRsyncLogs() percentage = %v, want %v", a, b) 162 | } 163 | if a, b, e := dataEqual(got.TransferredData, tt.want.TransferredData); !e { 164 | t.Errorf("parseRsyncLogs() dataTransferred = %v, want %v", a, b) 165 | } 166 | if a, b, e := dataEqual(got.TransferRate, tt.want.TransferRate); !e { 167 | t.Errorf("parseRsyncLogs() speed = %v, want %v", a, b) 168 | } 169 | if got.TransferredFiles != tt.want.TransferredFiles { 170 | t.Errorf("parseRsyncLogs() transferredFiles = %d, want %d", got.TransferredFiles, tt.want.TransferredFiles) 171 | } 172 | if got.Status() != tt.wantStatus { 173 | t.Errorf("parseRsyncLogs() status = %v, want %v", got.Status(), tt.wantStatus) 174 | } 175 | if unprocessed != tt.wantUnProcessed { 176 | t.Errorf("parseRsyncLogs() unprocessed = %v, want %v", unprocessed, tt.wantUnProcessed) 177 | } 178 | }) 179 | } 180 | } 181 | -------------------------------------------------------------------------------- /cmd/transfer-pvc/transfer-pvc.go: -------------------------------------------------------------------------------- 1 | package transfer_pvc 2 | 3 | import ( 4 | "context" 5 | "crypto/md5" 6 | "fmt" 7 | "io" 8 | "log" 9 | random "math/rand" 10 | "os" 11 | "strings" 12 | "time" 13 | 14 | logrusr "github.com/bombsimon/logrusr/v3" 15 | "github.com/go-logr/logr" 16 | configv1 "github.com/openshift/api/config/v1" 17 | routev1 "github.com/openshift/api/route/v1" 18 | "github.com/sirupsen/logrus" 19 | "github.com/spf13/cobra" 20 | corev1 "k8s.io/api/core/v1" 21 | networkingv1 "k8s.io/api/networking/v1" 22 | "k8s.io/apimachinery/pkg/api/errors" 23 | "k8s.io/apimachinery/pkg/api/resource" 24 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 25 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 26 | "k8s.io/apimachinery/pkg/types" 27 | errorsutil "k8s.io/apimachinery/pkg/util/errors" 28 | "k8s.io/apimachinery/pkg/util/wait" 29 | "k8s.io/cli-runtime/pkg/genericclioptions" 30 | "k8s.io/client-go/kubernetes/scheme" 31 | "k8s.io/client-go/rest" 32 | clientcmdapi "k8s.io/client-go/tools/clientcmd/api" 33 | "sigs.k8s.io/controller-runtime/pkg/client" 34 | 35 | "github.com/backube/pvc-transfer/endpoint" 36 | ingressendpoint "github.com/backube/pvc-transfer/endpoint/ingress" 37 | routeendpoint "github.com/backube/pvc-transfer/endpoint/route" 38 | "github.com/backube/pvc-transfer/transfer" 39 | rsynctransfer "github.com/backube/pvc-transfer/transfer/rsync" 40 | "github.com/backube/pvc-transfer/transport" 41 | stunneltransport "github.com/backube/pvc-transfer/transport/stunnel" 42 | securityv1 "github.com/openshift/api/security/v1" 43 | openshiftuid "github.com/openshift/library-go/pkg/security/uid" 44 | ) 45 | 46 | type endpointType string 47 | 48 | const ( 49 | endpointNginx endpointType = "nginx-ingress" 50 | endpointRoute endpointType = "route" 51 | ) 52 | 53 | type TransferPVCCommand struct { 54 | configFlags *genericclioptions.ConfigFlags 55 | genericclioptions.IOStreams 56 | logger logrus.FieldLogger 57 | 58 | sourceContext *clientcmdapi.Context 59 | destinationContext *clientcmdapi.Context 60 | 61 | // user defined flags for the subcommand 62 | Flags 63 | } 64 | 65 | // Flags defines options configured by users 66 | // via command line flags of the subcommand 67 | type Flags struct { 68 | PVC PvcFlags 69 | Endpoint EndpointFlags 70 | SourceContext string 71 | DestinationContext string 72 | SourceImage string 73 | DestinationImage string 74 | Verify bool 75 | RsyncFlags []string 76 | ProgressOutput string 77 | } 78 | 79 | // EndpointFlags defines command line flags specific 80 | // to the endpoint to be used in transfer 81 | type EndpointFlags struct { 82 | // Type defines the endpoint type 83 | Type endpointType 84 | // Subdomain defines host of the endpoint 85 | Subdomain string 86 | // IngressClass defines class for ingress 87 | IngressClass string 88 | } 89 | 90 | func (e EndpointFlags) Validate() error { 91 | // default endpoint type is nginx-ingress 92 | if e.Type == "" { 93 | e.Type = endpointNginx 94 | } 95 | switch e.Type { 96 | case endpointNginx: 97 | if e.Subdomain == "" { 98 | return fmt.Errorf("subdomain cannot be empty when using nginx ingress") 99 | } 100 | } 101 | return nil 102 | } 103 | 104 | // PvcFlags defines command line flags for the PVC to be transferred 105 | type PvcFlags struct { 106 | // Name defines Name of the PVC, 107 | // mapped in format : 108 | Name mappedNameVar 109 | // Namespace defines Namespace of the PVC, 110 | // mapped in format : 111 | Namespace mappedNameVar 112 | // StorageClassName defines storage class of destination PVC 113 | StorageClassName string 114 | // StorageRequests defines requested capacity of destination PVC 115 | StorageRequests quantityVar 116 | } 117 | 118 | func (p *PvcFlags) Validate() error { 119 | if p.Name.source == "" { 120 | return fmt.Errorf("source pvc name cannot be empty") 121 | } 122 | if p.Name.destination == "" { 123 | return fmt.Errorf("destnation pvc name cannot be empty") 124 | } 125 | if p.Namespace.source == "" { 126 | return fmt.Errorf("source pvc namespace cannot be empty") 127 | } 128 | if p.Namespace.destination == "" { 129 | return fmt.Errorf("destination pvc namespace cannot be empty") 130 | } 131 | return nil 132 | } 133 | 134 | func NewTransferPVCCommand(streams genericclioptions.IOStreams) *cobra.Command { 135 | t := &TransferPVCCommand{ 136 | configFlags: genericclioptions.NewConfigFlags(false), 137 | Flags: Flags{ 138 | PVC: PvcFlags{ 139 | Name: mappedNameVar{}, 140 | Namespace: mappedNameVar{}, 141 | StorageRequests: quantityVar{}, 142 | }, 143 | }, 144 | IOStreams: streams, 145 | logger: logrus.New(), 146 | } 147 | 148 | cmd := &cobra.Command{ 149 | Use: "transfer-pvc", 150 | Short: "transfer a pvc data from one kube context to another", 151 | RunE: func(c *cobra.Command, args []string) error { 152 | if err := t.Complete(c, args); err != nil { 153 | return err 154 | } 155 | if err := t.Validate(); err != nil { 156 | return err 157 | } 158 | if err := t.Run(); err != nil { 159 | return err 160 | } 161 | 162 | return nil 163 | }, 164 | } 165 | addFlagsToTransferPVCCommand(&t.Flags, cmd) 166 | 167 | return cmd 168 | } 169 | 170 | func addFlagsToTransferPVCCommand(c *Flags, cmd *cobra.Command) { 171 | cmd.Flags().StringVar(&c.SourceContext, "source-context", "", "Name of the source context in current kubeconfig") 172 | cmd.Flags().StringVar(&c.DestinationContext, "destination-context", "", "Name of the destination context in current kubeconfig") 173 | cmd.Flags().StringVar(&c.SourceImage, "source-image", "", "The container image to use on the source cluster. Defaults to quay.io/konveyor/esync-transfer:latest") 174 | cmd.Flags().StringVar(&c.DestinationImage, "destination-image", "", "The container image to use on the destination cluster. Defaults to quay.io/konveyor/rsync-transfer:latest") 175 | 176 | cmd.Flags().Var(&c.PVC.Name, "pvc-name", "Name of the PVC to be transferred. Optionally, source name can be mapped to a different destination name in format : ") 177 | cmd.Flags().Var(&c.PVC.Namespace, "pvc-namespace", "Namespace of the PVC to be transferred. Optionally, source namespace can be mapped to a different destination namespace in format :") 178 | cmd.Flags().StringVar(&c.PVC.StorageClassName, "dest-storage-class", "", "Storage class for the destination PVC") 179 | cmd.Flags().Var(&c.PVC.StorageRequests, "dest-storage-requests", "Requested storage capacity for the destination PVC") 180 | cmd.Flags().Var(&c.Endpoint.Type, "endpoint", "The type of networking endpoint to use to accept traffic in destination cluster. Must be `nginx-ingress` or `route`.") 181 | cmd.Flags().StringVar(&c.Endpoint.Subdomain, "subdomain", "", "Subdomain to use for the ingress endpoint") 182 | cmd.Flags().StringVar(&c.Endpoint.IngressClass, "ingress-class", "", "IngressClass to use for the ingress endpoint") 183 | cmd.Flags().BoolVar(&c.Verify, "verify", false, "Enable checksum verification") 184 | cmd.Flags().StringVar(&c.ProgressOutput, "output", "", "Write data transfer stats to specified output file") 185 | cmd.MarkFlagRequired("source-context") 186 | cmd.MarkFlagRequired("destination-context") 187 | cmd.MarkFlagRequired("pvc-name") 188 | } 189 | 190 | func (t *TransferPVCCommand) Complete(c *cobra.Command, args []string) error { 191 | config := t.configFlags.ToRawKubeConfigLoader() 192 | rawConfig, err := config.RawConfig() 193 | if err != nil { 194 | return err 195 | } 196 | 197 | if t.Flags.DestinationContext == "" { 198 | t.Flags.DestinationContext = *t.configFlags.Context 199 | } 200 | 201 | for name, context := range rawConfig.Contexts { 202 | if name == t.Flags.SourceContext { 203 | t.sourceContext = context 204 | } 205 | if name == t.Flags.DestinationContext { 206 | t.destinationContext = context 207 | } 208 | } 209 | 210 | if t.PVC.Namespace.source == "" && t.sourceContext != nil { 211 | t.PVC.Namespace.source = t.sourceContext.Namespace 212 | } 213 | 214 | if t.PVC.Namespace.destination == "" && t.destinationContext != nil { 215 | t.PVC.Namespace.destination = t.destinationContext.Namespace 216 | } 217 | 218 | return nil 219 | } 220 | 221 | func (t *TransferPVCCommand) Validate() error { 222 | if t.sourceContext == nil { 223 | return fmt.Errorf("cannot evaluate source context") 224 | } 225 | 226 | if t.destinationContext == nil { 227 | return fmt.Errorf("cannot evaluate destination context") 228 | } 229 | 230 | if t.sourceContext.Cluster == t.destinationContext.Cluster { 231 | return fmt.Errorf("both source and destination cluster are the same, this is not support right now, coming soon") 232 | } 233 | 234 | err := t.PVC.Validate() 235 | if err != nil { 236 | return err 237 | } 238 | 239 | err = t.Endpoint.Validate() 240 | if err != nil { 241 | return err 242 | } 243 | 244 | return nil 245 | } 246 | 247 | func (t *TransferPVCCommand) Run() error { 248 | return t.run() 249 | } 250 | 251 | func (t *TransferPVCCommand) getClientFromContext(ctx string) (client.Client, error) { 252 | restConfig, err := t.getRestConfigFromContext(ctx) 253 | if err != nil { 254 | return nil, err 255 | } 256 | 257 | err = routev1.Install(scheme.Scheme) 258 | if err != nil { 259 | return nil, err 260 | } 261 | 262 | if t.Endpoint.Type == endpointRoute { 263 | err = configv1.AddToScheme(scheme.Scheme) 264 | if err != nil { 265 | return nil, err 266 | } 267 | } 268 | 269 | return client.New(restConfig, client.Options{Scheme: scheme.Scheme}) 270 | } 271 | 272 | func (t *TransferPVCCommand) getRestConfigFromContext(ctx string) (*rest.Config, error) { 273 | c := ctx 274 | t.configFlags.Context = &c 275 | 276 | return t.configFlags.ToRESTConfig() 277 | } 278 | 279 | func (t *TransferPVCCommand) run() error { 280 | logrusLog := logrus.New() 281 | logrusLog.SetFormatter(&logrus.JSONFormatter{}) 282 | logger := logrusr.New(logrusLog).WithName("transfer-pvc") 283 | 284 | srcCfg, err := t.getRestConfigFromContext(t.Flags.SourceContext) 285 | if err != nil { 286 | log.Fatal(err, "unable to get source rest config") 287 | } 288 | 289 | srcClient, err := t.getClientFromContext(t.Flags.SourceContext) 290 | if err != nil { 291 | log.Fatal(err, "unable to get source client") 292 | } 293 | destClient, err := t.getClientFromContext(t.Flags.DestinationContext) 294 | if err != nil { 295 | log.Fatal(err, "unable to get destination client") 296 | } 297 | 298 | // set up the PVC on destination to receive the data 299 | srcPVC := &corev1.PersistentVolumeClaim{} 300 | err = srcClient.Get( 301 | context.TODO(), 302 | client.ObjectKey{ 303 | Namespace: t.PVC.Namespace.source, 304 | Name: t.PVC.Name.source, 305 | }, 306 | srcPVC, 307 | ) 308 | if err != nil { 309 | log.Fatal(err, "unable to get source PVC") 310 | } 311 | 312 | destPVC := t.buildDestinationPVC(srcPVC) 313 | err = destClient.Create(context.TODO(), destPVC, &client.CreateOptions{}) 314 | if err != nil && !errors.IsAlreadyExists(err) { 315 | log.Fatal(err, "unable to create destination PVC") 316 | } 317 | 318 | labels := map[string]string{ 319 | "app.kubernetes.io/name": "crane", 320 | "app.kubernetes.io/component": "transfer-pvc", 321 | "app.konveyor.io/created-for-pvc": getValidatedResourceName(srcPVC.Name), 322 | } 323 | 324 | e, err := createEndpoint(t.Endpoint, destPVC, labels, logger, destClient) 325 | if err != nil { 326 | log.Fatal(err, "failed creating endpoint") 327 | } 328 | 329 | if err := waitForEndpoint(e, destClient); err != nil { 330 | log.Fatal("endpoint not healthy") 331 | } 332 | 333 | stunnelServer, err := stunneltransport.NewServer( 334 | context.TODO(), 335 | destClient, 336 | logger, 337 | types.NamespacedName{ 338 | Name: getValidatedResourceName(destPVC.Name), 339 | Namespace: destPVC.Namespace, 340 | }, e, &transport.Options{ 341 | Labels: labels, 342 | Image: t.Flags.DestinationImage, 343 | }) 344 | if err != nil { 345 | log.Fatal(err, "error creating stunnel server") 346 | } 347 | 348 | secretList := &corev1.SecretList{} 349 | err = destClient.List( 350 | context.TODO(), 351 | secretList, 352 | client.InNamespace(destPVC.Namespace), 353 | client.MatchingLabels(labels)) 354 | if err != nil { 355 | log.Fatal(err, "failed to find certificate secrets") 356 | } 357 | 358 | for i := range secretList.Items { 359 | destSecret := &secretList.Items[i] 360 | srcSecret := &corev1.Secret{ 361 | ObjectMeta: metav1.ObjectMeta{ 362 | Name: destSecret.Name, 363 | Namespace: srcPVC.Namespace, 364 | Labels: destSecret.Labels, 365 | Annotations: destSecret.Annotations, 366 | }, 367 | StringData: destSecret.StringData, 368 | Data: destSecret.Data, 369 | } 370 | err = srcClient.Create(context.TODO(), srcSecret) 371 | if err != nil { 372 | log.Fatal(err, "failed to create certificate secret on source cluster") 373 | } 374 | } 375 | 376 | stunnelClient, err := stunneltransport.NewClient( 377 | context.TODO(), 378 | srcClient, 379 | logger, 380 | types.NamespacedName{ 381 | Name: getValidatedResourceName(srcPVC.Name), 382 | Namespace: srcPVC.Namespace, 383 | }, e.Hostname(), e.IngressPort(), &transport.Options{ 384 | Labels: labels, 385 | Image: t.Flags.DestinationImage, 386 | }, 387 | ) 388 | if err != nil { 389 | log.Fatal(err, "error creating stunnel server") 390 | } 391 | 392 | destPVCList := transfer.NewSingletonPVC(destPVC) 393 | srcPVCList := transfer.NewSingletonPVC(srcPVC) 394 | 395 | rsyncPassword := getRsyncPassword() 396 | 397 | serverPodSecContext, err := getRsyncServerPodSecurityContext(destClient, destPVC.Namespace) 398 | if err != nil { 399 | log.Fatal(err, "error creating security context for rsync server") 400 | } 401 | 402 | trueBool := bool(true) 403 | falseBool := bool(false) 404 | rsyncServer, err := rsynctransfer.NewServer( 405 | context.TODO(), 406 | destClient, 407 | logger, destPVCList, stunnelServer, e, labels, nil, rsyncPassword, 408 | transfer.PodOptions{ 409 | ContainerSecurityContext: corev1.SecurityContext{ 410 | Capabilities: &corev1.Capabilities{ 411 | Drop: []corev1.Capability{"ALL"}, 412 | }, 413 | RunAsNonRoot: &trueBool, 414 | AllowPrivilegeEscalation: &falseBool, 415 | SeccompProfile: &corev1.SeccompProfile{ 416 | Type: corev1.SeccompProfileTypeRuntimeDefault, 417 | }, 418 | }, 419 | PodSecurityContext: corev1.PodSecurityContext{ 420 | FSGroup: serverPodSecContext.FSGroup, 421 | }, 422 | Image: t.Flags.DestinationImage, 423 | }, 424 | ) 425 | if err != nil { 426 | log.Fatal(err, "error creating rsync transfer server") 427 | } 428 | 429 | _ = wait.PollUntil(time.Second*5, func() (done bool, err error) { 430 | ready, err := rsyncServer.IsHealthy(context.TODO(), destClient) 431 | if err != nil { 432 | log.Println(err, "unable to check rsync server health, retrying...") 433 | return false, nil 434 | } 435 | return ready, nil 436 | }, make(<-chan struct{})) 437 | 438 | nodeName, err := getNodeNameForPVC(srcClient, srcPVC.Namespace, srcPVC.Name) 439 | if err != nil { 440 | log.Fatal(err, "failed to find node name") 441 | } 442 | 443 | clientPodSecCtx, err := getRsyncClientPodSecurityContext(srcClient, srcPVC.Namespace) 444 | if err != nil { 445 | log.Fatal(err, "error creating security context for rsync server") 446 | } 447 | 448 | _, err = rsynctransfer.NewClient( 449 | context.TODO(), 450 | srcClient, srcPVCList, stunnelClient, e, logger, "rsync-client", labels, nil, rsyncPassword, 451 | transfer.PodOptions{ 452 | NodeName: nodeName, 453 | CommandOptions: rsynctransfer.NewDefaultOptionsFrom( 454 | verify(t.Verify), 455 | restrictedContainers(true), 456 | verbose(true), 457 | ), 458 | ContainerSecurityContext: corev1.SecurityContext{ 459 | Privileged: &falseBool, 460 | Capabilities: &corev1.Capabilities{ 461 | Drop: []corev1.Capability{"ALL"}, 462 | }, 463 | RunAsNonRoot: &trueBool, 464 | RunAsUser: clientPodSecCtx.RunAsUser, 465 | AllowPrivilegeEscalation: &falseBool, 466 | }, 467 | PodSecurityContext: corev1.PodSecurityContext{ 468 | FSGroup: clientPodSecCtx.FSGroup, 469 | }, 470 | Image: t.Flags.SourceImage, 471 | }, 472 | ) 473 | if err != nil { 474 | log.Fatal(err, "failed to create rsync client") 475 | } 476 | 477 | err = followClientLogs( 478 | srcCfg, types.NamespacedName{Name: srcPVC.Name, Namespace: srcPVC.Namespace}, labels, t.ProgressOutput) 479 | if err != nil { 480 | log.Fatal(err, "error following rsync client logs") 481 | } 482 | 483 | return garbageCollect(srcClient, destClient, labels, t.Endpoint.Type, t.PVC.Namespace) 484 | } 485 | 486 | // getValidatedResourceName returns a name for resources 487 | // created by the command such that they don't fail validations 488 | func getValidatedResourceName(name string) string { 489 | if len(name) < 63 { 490 | return name 491 | } else { 492 | return fmt.Sprintf("crane-%x", md5.Sum([]byte(name))) 493 | } 494 | } 495 | 496 | // getNodeNameForPVC returns name of the node on which the PVC is currently mounted on 497 | // returns name of the node as a string, and an error 498 | func getNodeNameForPVC(srcClient client.Client, namespace string, pvcName string) (string, error) { 499 | podList := corev1.PodList{} 500 | err := srcClient.List(context.TODO(), &podList, client.InNamespace(namespace)) 501 | if err != nil { 502 | return "", err 503 | } 504 | for _, pod := range podList.Items { 505 | if pod.Status.Phase == corev1.PodRunning { 506 | for _, vol := range pod.Spec.Volumes { 507 | if vol.PersistentVolumeClaim != nil { 508 | if vol.PersistentVolumeClaim.ClaimName == pvcName { 509 | return pod.Spec.NodeName, nil 510 | } 511 | } 512 | } 513 | } 514 | } 515 | return "", nil 516 | } 517 | 518 | // getRsyncPassword returns a random password for rsync 519 | func getRsyncPassword() string { 520 | var letters = []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") 521 | random.Seed(time.Now().UnixNano()) 522 | password := make([]byte, 6) 523 | for i := range password { 524 | password[i] = letters[random.Intn(len(letters))] 525 | } 526 | return string(password) 527 | } 528 | 529 | func getIDsForNamespace(client client.Client, namespace string) (*corev1.SecurityContext, error) { 530 | ctx := &corev1.SecurityContext{} 531 | ns := &corev1.Namespace{} 532 | err := client.Get(context.TODO(), types.NamespacedName{Name: namespace}, ns) 533 | if err != nil { 534 | return nil, err 535 | } 536 | if annotationVal, found := ns.Annotations[securityv1.UIDRangeAnnotation]; found { 537 | uidBlock, err := openshiftuid.ParseBlock(annotationVal) 538 | if err != nil { 539 | return nil, nil 540 | } 541 | min := int64(uidBlock.Start) 542 | ctx.RunAsUser = &min 543 | } 544 | if annotationVal, found := ns.Annotations[securityv1.SupplementalGroupsAnnotation]; found { 545 | uidBlock, err := openshiftuid.ParseBlock(annotationVal) 546 | if err != nil { 547 | return nil, nil 548 | } 549 | min := int64(uidBlock.Start) 550 | ctx.RunAsGroup = &min 551 | } 552 | return ctx, nil 553 | } 554 | 555 | func getRsyncClientPodSecurityContext(client client.Client, namespace string) (*corev1.PodSecurityContext, error) { 556 | ps := &corev1.PodSecurityContext{} 557 | ctx, err := getIDsForNamespace(client, namespace) 558 | if err != nil { 559 | return ps, err 560 | } 561 | ps.RunAsUser = ctx.RunAsUser 562 | ps.RunAsGroup = ctx.RunAsGroup 563 | ps.FSGroup = ctx.RunAsGroup 564 | return ps, nil 565 | } 566 | 567 | func getRsyncServerPodSecurityContext(client client.Client, namespace string) (*corev1.PodSecurityContext, error) { 568 | ps := &corev1.PodSecurityContext{} 569 | ctx, err := getIDsForNamespace(client, namespace) 570 | if err != nil { 571 | return ps, err 572 | } 573 | ps.RunAsUser = ctx.RunAsUser 574 | ps.RunAsGroup = ctx.RunAsGroup 575 | ps.FSGroup = ctx.RunAsGroup 576 | return ps, nil 577 | } 578 | 579 | func garbageCollect(srcClient client.Client, destClient client.Client, labels map[string]string, endpoint endpointType, namespace mappedNameVar) error { 580 | srcGVK := []client.Object{ 581 | &corev1.Pod{}, 582 | &corev1.ConfigMap{}, 583 | &corev1.Secret{}, 584 | } 585 | destGVK := []client.Object{ 586 | &corev1.Pod{}, 587 | &corev1.ConfigMap{}, 588 | &corev1.Secret{}, 589 | } 590 | switch endpoint { 591 | case endpointRoute: 592 | destGVK = append(destGVK, &routev1.Route{}) 593 | case endpointNginx: 594 | destGVK = append(destGVK, &networkingv1.Ingress{}) 595 | } 596 | 597 | err := deleteResourcesForGVK(srcClient, srcGVK, labels, namespace.source) 598 | if err != nil { 599 | return err 600 | } 601 | 602 | err = deleteResourcesForGVK(destClient, destGVK, labels, namespace.destination) 603 | if err != nil { 604 | return err 605 | } 606 | 607 | return deleteResourcesIteratively(destClient, []client.Object{ 608 | &corev1.Service{ 609 | TypeMeta: metav1.TypeMeta{ 610 | Kind: "Service", 611 | APIVersion: corev1.SchemeGroupVersion.Version, 612 | }, 613 | }}, labels, namespace.destination) 614 | } 615 | 616 | func deleteResourcesIteratively(c client.Client, iterativeTypes []client.Object, labels map[string]string, namespace string) error { 617 | listOptions := []client.ListOption{ 618 | client.MatchingLabels(labels), 619 | client.InNamespace(namespace), 620 | } 621 | errs := []error{} 622 | for _, objList := range iterativeTypes { 623 | ulist := &unstructured.UnstructuredList{} 624 | ulist.SetGroupVersionKind(objList.GetObjectKind().GroupVersionKind()) 625 | err := c.List(context.TODO(), ulist, listOptions...) 626 | if err != nil { 627 | // if we hit error with one api still try all others 628 | errs = append(errs, err) 629 | continue 630 | } 631 | for _, item := range ulist.Items { 632 | err = c.Delete(context.TODO(), &item, client.PropagationPolicy(metav1.DeletePropagationBackground)) 633 | if err != nil { 634 | // if we hit error deleting on continue delete others 635 | errs = append(errs, err) 636 | } 637 | } 638 | } 639 | return errorsutil.NewAggregate(errs) 640 | } 641 | 642 | func deleteResourcesForGVK(c client.Client, gvk []client.Object, labels map[string]string, namespace string) error { 643 | for _, obj := range gvk { 644 | err := c.DeleteAllOf(context.TODO(), obj, client.InNamespace(namespace), client.MatchingLabels(labels)) 645 | if err != nil { 646 | return err 647 | } 648 | } 649 | return nil 650 | } 651 | 652 | // LogStreams defines functions to read from a stream of pod logs 653 | type LogStreams interface { 654 | // Init initiates the log streams 655 | Init() error 656 | // Streams returns streams for output and error logs 657 | // returns a stream to communicate errors 658 | Streams() (stdout chan string, stderr chan string, err chan error) 659 | // Close closes log streams 660 | Close() 661 | } 662 | 663 | func followClientLogs(srcConfig *rest.Config, pvc types.NamespacedName, labels map[string]string, outputFile string) error { 664 | logReader := NewRsyncLogStream(srcConfig, pvc, labels, outputFile) 665 | err := logReader.Init() 666 | if err != nil { 667 | return err 668 | } 669 | defer logReader.Close() 670 | stdout, stderr, errChan := logReader.Streams() 671 | for { 672 | closed := false 673 | select { 674 | case out := <-stdout: 675 | os.Stdout.WriteString(out) 676 | case err := <-stderr: 677 | os.Stderr.WriteString(err) 678 | case e := <-errChan: 679 | if e != io.EOF { 680 | err = e 681 | } 682 | closed = true 683 | } 684 | if err != nil || closed { 685 | break 686 | } 687 | } 688 | return err 689 | } 690 | 691 | // waitForEndpoint waits for endpoint to become ready 692 | func waitForEndpoint(e endpoint.Endpoint, destClient client.Client) error { 693 | return wait.PollUntil(time.Second*5, func() (done bool, err error) { 694 | ready, err := e.IsHealthy(context.TODO(), destClient) 695 | if err != nil { 696 | log.Println(err, "unable to check endpoint health, retrying...") 697 | return false, nil 698 | } 699 | return ready, nil 700 | }, make(<-chan struct{})) 701 | } 702 | 703 | // createEndpoint creates an endpoint based on provided endpointFlags 704 | func createEndpoint( 705 | endpointFlags EndpointFlags, pvc *corev1.PersistentVolumeClaim, 706 | labels map[string]string, logger logr.Logger, destClient client.Client) (endpoint.Endpoint, error) { 707 | switch endpointFlags.Type { 708 | case endpointNginx: 709 | annotations := map[string]string{ 710 | ingressendpoint.NginxIngressPassthroughAnnotation: "true", 711 | } 712 | err := ingressendpoint.AddToScheme(scheme.Scheme) 713 | if err != nil { 714 | return nil, err 715 | } 716 | e, err := ingressendpoint.New( 717 | context.TODO(), destClient, logger, 718 | types.NamespacedName{ 719 | Namespace: pvc.Namespace, 720 | Name: getValidatedResourceName(pvc.Name), 721 | }, &endpointFlags.IngressClass, 722 | endpointFlags.Subdomain, 723 | labels, annotations, nil) 724 | return e, err 725 | case endpointRoute: 726 | err := routeendpoint.AddToScheme(scheme.Scheme) 727 | if err != nil { 728 | return nil, err 729 | } 730 | resourceName := types.NamespacedName{ 731 | Namespace: pvc.Namespace, 732 | Name: getValidatedResourceName(pvc.Name), 733 | } 734 | hostname, err := getRouteHostName(destClient, resourceName) 735 | if err != nil { 736 | return nil, err 737 | } 738 | e, err := routeendpoint.New( 739 | context.TODO(), destClient, logger, 740 | resourceName, routeendpoint.EndpointTypePassthrough, 741 | hostname, labels, nil) 742 | return e, err 743 | default: 744 | return nil, fmt.Errorf("unrecognized endpoint type") 745 | } 746 | } 747 | 748 | // getRouteHostName returns a hostname for Route created by the subcommand 749 | func getRouteHostName(client client.Client, namespacedName types.NamespacedName) (*string, error) { 750 | routeNamePrefix := fmt.Sprintf("%s-%s", namespacedName.Name, namespacedName.Namespace) 751 | // if route prefix is within limits, default hostname can be used 752 | if len(routeNamePrefix) <= 62 { 753 | return nil, nil 754 | } 755 | // if route prefix exceeds limits, a custom hostname will be provided 756 | ingressConfig := &configv1.Ingress{} 757 | err := client.Get(context.TODO(), types.NamespacedName{Name: "cluster"}, ingressConfig) 758 | if err != nil { 759 | return nil, err 760 | } 761 | hostname := fmt.Sprintf("%s.%s", routeNamePrefix[:62], ingressConfig.Spec.Domain) 762 | return &hostname, nil 763 | } 764 | 765 | // buildDestinationPVC given a source PVC, returns a PVC to be created in the destination cluster 766 | func (t *TransferPVCCommand) buildDestinationPVC(sourcePVC *corev1.PersistentVolumeClaim) *corev1.PersistentVolumeClaim { 767 | pvc := &corev1.PersistentVolumeClaim{} 768 | pvc.Namespace = t.PVC.Namespace.destination 769 | pvc.Name = t.PVC.Name.destination 770 | pvc.Labels = sourcePVC.Labels 771 | pvc.Spec = *sourcePVC.Spec.DeepCopy() 772 | if t.PVC.StorageRequests.quantity != nil { 773 | pvc.Spec.Resources.Requests[corev1.ResourceStorage] = *t.PVC.StorageRequests.quantity 774 | } 775 | if t.PVC.StorageClassName != "" { 776 | pvc.Spec.StorageClassName = &t.PVC.StorageClassName 777 | } 778 | // clear fields 779 | pvc.Spec.VolumeMode = nil 780 | pvc.Spec.VolumeName = "" 781 | return pvc 782 | } 783 | 784 | // verify enables/disables --checksum option in Rsync 785 | type verify bool 786 | 787 | func (v verify) ApplyTo(opts *rsynctransfer.CommandOptions) error { 788 | if bool(v) { 789 | opts.Extras = append(opts.Extras, "--checksum") 790 | } else { 791 | newExtras := []string{} 792 | for _, opt := range opts.Extras { 793 | if opt != "--checksum" && 794 | opt != "-c" { 795 | newExtras = append(newExtras, opt) 796 | } 797 | } 798 | opts.Extras = newExtras 799 | } 800 | return nil 801 | } 802 | 803 | // restrictedContainers enables/disables Rsync options that 804 | // require privileged containers 805 | type restrictedContainers bool 806 | 807 | func (r restrictedContainers) ApplyTo(opts *rsynctransfer.CommandOptions) error { 808 | opts.Groups = bool(!r) 809 | opts.Owners = bool(!r) 810 | opts.DeviceFiles = bool(!r) 811 | opts.SpecialFiles = bool(!r) 812 | opts.Extras = append( 813 | opts.Extras, "--omit-dir-times") 814 | return nil 815 | } 816 | 817 | type verbose bool 818 | 819 | func (i verbose) ApplyTo(opts *rsynctransfer.CommandOptions) error { 820 | opts.Info = []string{ 821 | "COPY", "DEL", "STATS2", "PROGRESS2", "FLIST2", 822 | } 823 | opts.Extras = append(opts.Extras, "--progress") 824 | return nil 825 | } 826 | 827 | // mappedNameVar defines a mapping of source to destination names 828 | type mappedNameVar struct { 829 | source string 830 | destination string 831 | } 832 | 833 | // String returns string repr of mapped name 834 | // follows format : 835 | func (m *mappedNameVar) String() string { 836 | return fmt.Sprintf("%s:%s", m.source, m.destination) 837 | } 838 | 839 | func (m *mappedNameVar) Set(val string) error { 840 | source, destination, err := parseSourceDestinationMapping(val) 841 | if err != nil { 842 | return err 843 | } 844 | m.source = source 845 | m.destination = destination 846 | return nil 847 | } 848 | 849 | func (m *mappedNameVar) Type() string { 850 | return "string" 851 | } 852 | 853 | // parseSourceDestinationMapping given a mapping of source to destination names, 854 | // returns two separate strings. mapping follows format :. 855 | func parseSourceDestinationMapping(mapping string) (source string, destination string, err error) { 856 | split := strings.Split(string(mapping), ":") 857 | switch len(split) { 858 | case 1: 859 | if split[0] == "" { 860 | return "", "", fmt.Errorf("source name cannot be empty") 861 | } 862 | return split[0], split[0], nil 863 | case 2: 864 | if split[1] == "" || split[0] == "" { 865 | return "", "", fmt.Errorf("source or destination name cannot be empty") 866 | } 867 | return split[0], split[1], nil 868 | default: 869 | return "", "", fmt.Errorf("invalid name mapping. must be of format :") 870 | } 871 | } 872 | 873 | type quantityVar struct { 874 | quantity *resource.Quantity 875 | } 876 | 877 | func (q *quantityVar) String() string { 878 | return q.quantity.String() 879 | } 880 | 881 | func (q *quantityVar) Set(val string) error { 882 | parsedQuantity, err := resource.ParseQuantity(val) 883 | if err != nil { 884 | return err 885 | } 886 | q.quantity = &parsedQuantity 887 | return nil 888 | } 889 | 890 | func (q *quantityVar) Type() string { 891 | return "string" 892 | } 893 | 894 | func (e endpointType) String() string { 895 | return string(e) 896 | } 897 | 898 | func (e *endpointType) Set(val string) error { 899 | switch val { 900 | case string(endpointNginx), string(endpointRoute): 901 | *e = endpointType(val) 902 | return nil 903 | default: 904 | return fmt.Errorf("unsupported endpoint type %s", val) 905 | } 906 | } 907 | 908 | func (e endpointType) Type() string { 909 | return "string" 910 | } 911 | -------------------------------------------------------------------------------- /cmd/transfer-pvc/transfer-pvc_test.go: -------------------------------------------------------------------------------- 1 | package transfer_pvc 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func Test_parseSourceDestinationMapping(t *testing.T) { 8 | tests := []struct { 9 | name string 10 | mapping string 11 | wantSource string 12 | wantDestination string 13 | wantErr bool 14 | }{ 15 | { 16 | name: "given a string with only source name, should return same values for both source and destination", 17 | mapping: "validstring", 18 | wantSource: "validstring", 19 | wantDestination: "validstring", 20 | wantErr: false, 21 | }, 22 | { 23 | name: "given a string with a valid source to destination mapping, should return correct values for source and destination", 24 | mapping: "source:destination", 25 | wantSource: "source", 26 | wantDestination: "destination", 27 | wantErr: false, 28 | }, 29 | { 30 | name: "given a string with invalid source to destination mapping, should return error", 31 | mapping: "source::destination", 32 | wantSource: "", 33 | wantDestination: "", 34 | wantErr: true, 35 | }, 36 | { 37 | name: "given a string with empty destination name, should return error", 38 | mapping: "source:", 39 | wantSource: "", 40 | wantDestination: "", 41 | wantErr: true, 42 | }, 43 | { 44 | name: "given a mapping with empty source and destination strings, should return error", 45 | mapping: ":", 46 | wantSource: "", 47 | wantDestination: "", 48 | wantErr: true, 49 | }, 50 | { 51 | name: "given an empty string, should return error", 52 | mapping: "", 53 | wantSource: "", 54 | wantDestination: "", 55 | wantErr: true, 56 | }, 57 | } 58 | for _, tt := range tests { 59 | t.Run(tt.name, func(t *testing.T) { 60 | gotSource, gotDestination, err := parseSourceDestinationMapping(tt.mapping) 61 | if (err != nil) != tt.wantErr { 62 | t.Errorf("parseSourceDestinationMapping() error = %v, wantErr %v", err, tt.wantErr) 63 | return 64 | } 65 | if gotSource != tt.wantSource { 66 | t.Errorf("parseSourceDestinationMapping() gotSource = %v, want %v", gotSource, tt.wantSource) 67 | } 68 | if gotDestination != tt.wantDestination { 69 | t.Errorf("parseSourceDestinationMapping() gotDestination = %v, want %v", gotDestination, tt.wantDestination) 70 | } 71 | }) 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /cmd/transform/listplugins/listplugins.go: -------------------------------------------------------------------------------- 1 | package listplugins 2 | 3 | import ( 4 | "fmt" 5 | "path/filepath" 6 | 7 | "github.com/konveyor/crane/internal/flags" 8 | "github.com/konveyor/crane/internal/plugin" 9 | "github.com/spf13/cobra" 10 | "github.com/spf13/viper" 11 | ) 12 | 13 | type Options struct { 14 | // Two GlobalFlags struct fields are needed 15 | // 1. cobraGlobalFlags for explicit CLI args parsed by cobra 16 | // 2. globalFlags for the args merged with values from the viper config file 17 | cobraGlobalFlags *flags.GlobalFlags 18 | globalFlags *flags.GlobalFlags 19 | // Two Flags struct fields are needed 20 | // 1. cobraFlags for explicit CLI args parsed by cobra 21 | // 2. Flags for the args merged with values from the viper config file 22 | cobraFlags Flags 23 | Flags 24 | } 25 | 26 | type Flags struct { 27 | PluginDir string `mapstructure:"plugin-dir"` 28 | SkipPlugins []string `mapstructure:"skip-plugins"` 29 | } 30 | 31 | func (o *Options) Complete(c *cobra.Command, args []string) error { 32 | // TODO: @sseago 33 | return nil 34 | } 35 | 36 | func (o *Options) Validate() error { 37 | // TODO: @sseago 38 | return nil 39 | } 40 | 41 | func (o *Options) Run() error { 42 | return o.run() 43 | } 44 | 45 | func NewListPluginsCommand(f *flags.GlobalFlags) *cobra.Command { 46 | o := &Options{ 47 | cobraGlobalFlags: f, 48 | } 49 | cmd := &cobra.Command{ 50 | Use: "list-plugins", 51 | Short: "Return a list of configured plugins", 52 | RunE: func(c *cobra.Command, args []string) error { 53 | if err := o.Complete(c, args); err != nil { 54 | return err 55 | } 56 | if err := o.Validate(); err != nil { 57 | return err 58 | } 59 | if err := o.Run(); err != nil { 60 | return err 61 | } 62 | 63 | return nil 64 | }, 65 | PreRun: func(cmd *cobra.Command, args []string) { 66 | viper.BindPFlags(cmd.Flags()) 67 | viper.Unmarshal(&o.Flags) 68 | viper.Unmarshal(&o.globalFlags) 69 | }, 70 | } 71 | 72 | // No separate addFlags needed here since all options inherit from parent PersistentFlags() 73 | 74 | return cmd 75 | } 76 | 77 | func (o *Options) run() error { 78 | pluginDir, err := filepath.Abs(o.PluginDir) 79 | if err != nil { 80 | return err 81 | } 82 | 83 | log := o.globalFlags.GetLogger() 84 | 85 | plugins, err := plugin.GetFilteredPlugins(pluginDir, o.SkipPlugins, log) 86 | if err != nil { 87 | return err 88 | } 89 | 90 | for _, thisPlugin := range plugins { 91 | fmt.Printf("Plugin: %v (version %v)\n", thisPlugin.Metadata().Name, thisPlugin.Metadata().Version) 92 | } 93 | return nil 94 | } 95 | -------------------------------------------------------------------------------- /cmd/transform/optionals/optionals.go: -------------------------------------------------------------------------------- 1 | package optionals 2 | 3 | import ( 4 | "fmt" 5 | "path/filepath" 6 | 7 | "github.com/konveyor/crane/internal/flags" 8 | "github.com/konveyor/crane/internal/plugin" 9 | "github.com/spf13/cobra" 10 | "github.com/spf13/viper" 11 | ) 12 | 13 | type Options struct { 14 | // Two GlobalFlags struct fields are needed 15 | // 1. cobraGlobalFlags for explicit CLI args parsed by cobra 16 | // 2. globalFlags for the args merged with values from the viper config file 17 | cobraGlobalFlags *flags.GlobalFlags 18 | globalFlags *flags.GlobalFlags 19 | // Two Flags struct fields are needed 20 | // 1. cobraFlags for explicit CLI args parsed by cobra 21 | // 2. Flags for the args merged with values from the viper config file 22 | cobraFlags Flags 23 | Flags 24 | } 25 | 26 | type Flags struct { 27 | PluginDir string `mapstructure:"plugin-dir"` 28 | SkipPlugins []string `mapstructure:"skip-plugins"` 29 | } 30 | 31 | func (o *Options) Complete(c *cobra.Command, args []string) error { 32 | // TODO: @sseago 33 | return nil 34 | } 35 | 36 | func (o *Options) Validate() error { 37 | // TODO: @sseago 38 | return nil 39 | } 40 | 41 | func (o *Options) Run() error { 42 | return o.run() 43 | } 44 | 45 | func NewOptionalsCommand(f *flags.GlobalFlags) *cobra.Command { 46 | o := &Options{ 47 | cobraGlobalFlags: f, 48 | } 49 | cmd := &cobra.Command{ 50 | Use: "optionals", 51 | Short: "Return a list of optional fields accepted by configured plugins", 52 | RunE: func(c *cobra.Command, args []string) error { 53 | if err := o.Complete(c, args); err != nil { 54 | return err 55 | } 56 | if err := o.Validate(); err != nil { 57 | return err 58 | } 59 | if err := o.Run(); err != nil { 60 | return err 61 | } 62 | 63 | return nil 64 | }, 65 | PreRun: func(cmd *cobra.Command, args []string) { 66 | viper.BindPFlags(cmd.Flags()) 67 | viper.Unmarshal(&o.Flags) 68 | viper.Unmarshal(&o.globalFlags) 69 | }, 70 | } 71 | 72 | // No separate addFlags needed here since all options inherit from parent PersistentFlags() 73 | 74 | return cmd 75 | } 76 | 77 | func (o *Options) run() error { 78 | pluginDir, err := filepath.Abs(o.PluginDir) 79 | if err != nil { 80 | return err 81 | } 82 | log := o.globalFlags.GetLogger() 83 | 84 | plugins, err := plugin.GetFilteredPlugins(pluginDir, o.SkipPlugins, log) 85 | if err != nil { 86 | return err 87 | } 88 | 89 | for _, thisPlugin := range plugins { 90 | if len(thisPlugin.Metadata().OptionalFields) > 0 { 91 | fmt.Printf("Plugin: %v (version %v)\n", thisPlugin.Metadata().Name, thisPlugin.Metadata().Version) 92 | for _, field := range thisPlugin.Metadata().OptionalFields { 93 | fmt.Printf(" %v: %v\n", field.FlagName, field.Help) 94 | fmt.Printf(" Example: %v\n", field.Example) 95 | } 96 | } 97 | } 98 | return nil 99 | } 100 | -------------------------------------------------------------------------------- /cmd/transform/transform.go: -------------------------------------------------------------------------------- 1 | package transform 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "os" 7 | "path/filepath" 8 | "strings" 9 | 10 | "github.com/konveyor/crane-lib/transform" 11 | "github.com/konveyor/crane/cmd/transform/listplugins" 12 | "github.com/konveyor/crane/cmd/transform/optionals" 13 | "github.com/konveyor/crane/internal/file" 14 | "github.com/konveyor/crane/internal/flags" 15 | "github.com/konveyor/crane/internal/plugin" 16 | "github.com/spf13/cobra" 17 | "github.com/spf13/viper" 18 | ) 19 | 20 | type Options struct { 21 | // Two GlobalFlags struct fields are needed 22 | // 1. cobraGlobalFlags for explicit CLI args parsed by cobra 23 | // 2. globalFlags for the args merged with values from the viper config file 24 | cobraGlobalFlags *flags.GlobalFlags 25 | globalFlags *flags.GlobalFlags 26 | // Two Flags struct fields are needed 27 | // 1. cobraFlags for explicit CLI args parsed by cobra 28 | // 2. Flags for the args merged with values from the viper config file 29 | cobraFlags Flags 30 | Flags 31 | } 32 | 33 | type Flags struct { 34 | ExportDir string `mapstructure:"export-dir"` 35 | PluginDir string `mapstructure:"plugin-dir"` 36 | TransformDir string `mapstructure:"transform-dir"` 37 | IgnoredPatchesDir string `mapstructure:"ignored-patches-dir"` 38 | PluginPriorities []string `mapstructure:"plugin-priorities"` 39 | SkipPlugins []string `mapstructure:"skip-plugins"` 40 | OptionalFlags string `mapstructure:"optional-flags"` 41 | } 42 | 43 | func (o *Options) Complete(c *cobra.Command, args []string) error { 44 | // TODO: @sseago 45 | return nil 46 | } 47 | 48 | func (o *Options) Validate() error { 49 | // TODO: @sseago 50 | return nil 51 | } 52 | 53 | func (o *Options) Run() error { 54 | return o.run() 55 | } 56 | 57 | func NewTransformCommand(f *flags.GlobalFlags) *cobra.Command { 58 | o := &Options{ 59 | cobraGlobalFlags: f, 60 | } 61 | cmd := &cobra.Command{ 62 | Use: "transform", 63 | Short: "Create the transformations for the exported resources and plugins and save the results in a transform directory", 64 | RunE: func(c *cobra.Command, args []string) error { 65 | if err := o.Complete(c, args); err != nil { 66 | return err 67 | } 68 | if err := o.Validate(); err != nil { 69 | return err 70 | } 71 | if err := o.Run(); err != nil { 72 | return err 73 | } 74 | 75 | return nil 76 | }, 77 | PreRun: func(cmd *cobra.Command, args []string) { 78 | viper.BindPFlags(cmd.Flags()) 79 | viper.BindPFlags(cmd.PersistentFlags()) 80 | viper.Unmarshal(&o.Flags) 81 | viper.Unmarshal(&o.globalFlags) 82 | }, 83 | } 84 | 85 | addFlagsForOptions(&o.cobraFlags, cmd) 86 | cmd.AddCommand(optionals.NewOptionalsCommand(f)) 87 | cmd.AddCommand(listplugins.NewListPluginsCommand(f)) 88 | return cmd 89 | } 90 | 91 | func addFlagsForOptions(o *Flags, cmd *cobra.Command) { 92 | home := os.Getenv("HOME") 93 | defaultPluginDir := home + plugin.DefaultLocalPluginDir 94 | cmd.Flags().StringVarP(&o.ExportDir, "export-dir", "e", "export", "The path where the kubernetes resources are saved") 95 | cmd.Flags().StringVarP(&o.TransformDir, "transform-dir", "t", "transform", "The path where files that contain the transformations are saved") 96 | cmd.Flags().StringVar(&o.IgnoredPatchesDir, "ignored-patches-dir", "", "The path where files that contain transformations that were discarded due to conflicts are saved. If left blank, these files will not be saved.") 97 | cmd.Flags().StringSliceVar(&o.PluginPriorities, "plugin-priorities", nil, "A comma-separated list of plugin names. A plugin listed will take priority in the case of patch conflict over a plugin listed later in the list or over one not listed at all.") 98 | cmd.Flags().StringVar(&o.OptionalFlags, "optional-flags", "", "JSON string holding flag value pairs to be passed to all plugins ran in transform operation. (ie. '{\"foo-flag\": \"foo-a=/data,foo-b=/data\", \"bar-flag\": \"bar-value\"}')") 99 | // These flags pass down to subcommands 100 | cmd.PersistentFlags().StringVarP(&o.PluginDir, "plugin-dir", "p", defaultPluginDir, "The path where binary plugins are located") 101 | cmd.PersistentFlags().StringSliceVarP(&o.SkipPlugins, "skip-plugins", "s", nil, "A comma-separated list of plugins to skip") 102 | 103 | } 104 | 105 | func (o *Options) run() error { 106 | log := o.globalFlags.GetLogger() 107 | // Load all the resources from the export dir 108 | exportDir, err := filepath.Abs(o.ExportDir) 109 | if err != nil { 110 | // Handle errors better for users. 111 | return err 112 | } 113 | 114 | pluginDir, err := filepath.Abs(o.PluginDir) 115 | if err != nil { 116 | return err 117 | } 118 | 119 | transformDir, err := filepath.Abs(o.TransformDir) 120 | if err != nil { 121 | return err 122 | } 123 | 124 | var ignoredPatchesDir string 125 | if o.IgnoredPatchesDir != "" { 126 | ignoredPatchesDir, err = filepath.Abs(o.IgnoredPatchesDir) 127 | if err != nil { 128 | return err 129 | } 130 | } 131 | 132 | plugins, err := plugin.GetFilteredPlugins(pluginDir, o.SkipPlugins, log) 133 | if err != nil { 134 | return err 135 | } 136 | files, err := file.ReadFiles(context.TODO(), exportDir) 137 | if err != nil { 138 | return err 139 | } 140 | 141 | opts := file.PathOpts{ 142 | TransformDir: transformDir, 143 | ExportDir: exportDir, 144 | IgnoredPatchesDir: ignoredPatchesDir, 145 | } 146 | 147 | runner := transform.Runner{Log: log.WithField("command", "transform").Logger} 148 | if len(o.PluginPriorities) > 0 { 149 | runner.PluginPriorities = o.getPluginPrioritiesMap() 150 | } 151 | 152 | if len(o.OptionalFlags) > 0 { 153 | err = json.Unmarshal([]byte(o.OptionalFlags), &runner.OptionalFlags) 154 | if err != nil { 155 | return err 156 | } 157 | runner.OptionalFlags = optionalFlagsToLower(runner.OptionalFlags) 158 | log.Debugf("parsed optional-flags: %v", runner.OptionalFlags) 159 | } 160 | 161 | for _, f := range files { 162 | response, err := runner.Run(f.Unstructured, plugins) 163 | if err != nil { 164 | return err 165 | } 166 | 167 | if response.HaveWhiteOut { 168 | whPath := opts.GetWhiteOutFilePath(f.Path) 169 | _, statErr := os.Stat(whPath) 170 | if os.IsNotExist(statErr) { 171 | log.Infof("resource file: %v creating whiteout file: %v", f.Info.Name(), whPath) 172 | err = os.MkdirAll(filepath.Dir(whPath), 0700) 173 | if err != nil { 174 | return err 175 | } 176 | whFile, err := os.Create(whPath) 177 | if err != nil { 178 | return err 179 | } 180 | whFile.Close() 181 | } 182 | continue 183 | } else { 184 | // if whiteout file exists from prior run, remove it 185 | whPath := opts.GetWhiteOutFilePath(f.Path) 186 | _, statErr := os.Stat(whPath) 187 | if !os.IsNotExist(statErr) { 188 | log.Infof("resource file: %v removing stale whiteout file: %v", f.Info.Name(), whPath) 189 | err := os.Remove(whPath) 190 | if err != nil { 191 | return err 192 | } 193 | } 194 | } 195 | 196 | // TODO: log if file exists and is truncated 197 | // TODO: delete transform file if it exists and haveWhiteOut 198 | tfPath := opts.GetTransformPath(f.Path) 199 | err = os.MkdirAll(filepath.Dir(tfPath), 0700) 200 | if err != nil { 201 | return err 202 | } 203 | transformFile, err := os.Create(tfPath) 204 | if err != nil { 205 | return err 206 | } 207 | defer transformFile.Close() 208 | i, err := transformFile.Write(response.TransformFile) 209 | if err != nil { 210 | return err 211 | } 212 | log.Debugf("wrote %v bytes for file: %v", i, tfPath) 213 | if len(response.IgnoredPatches) > 2 { 214 | log.Infof("Ignoring patches: %v", string(response.IgnoredPatches)) 215 | if len(ignoredPatchesDir) > 0 { 216 | ignorePath := opts.GetIgnoredPatchesPath(f.Path) 217 | err = os.MkdirAll(filepath.Dir(ignorePath), 0700) 218 | if err != nil { 219 | return err 220 | } 221 | ignoreFile, err := os.Create(ignorePath) 222 | if err != nil { 223 | return err 224 | } 225 | defer ignoreFile.Close() 226 | i, err := ignoreFile.Write(response.IgnoredPatches) 227 | if err != nil { 228 | return err 229 | } 230 | log.Debugf("wrote %v bytes for file: %v", i, ignorePath) 231 | } 232 | } 233 | } 234 | return nil 235 | } 236 | 237 | func (o *Options) getPluginPrioritiesMap() map[string]int { 238 | prioritiesMap := make(map[string]int) 239 | for i, pluginName := range o.PluginPriorities { 240 | if len(pluginName) > 0 { 241 | prioritiesMap[pluginName] = i 242 | } 243 | } 244 | return prioritiesMap 245 | } 246 | 247 | // Returns an extras map with lowercased keys, since any keys coming from the config file 248 | // are lower-cased by viper 249 | func optionalFlagsToLower(inFlags map[string]string) map[string]string { 250 | lowerMap := make(map[string]string) 251 | for key, val := range inFlags { 252 | lowerMap[strings.ToLower(key)] = val 253 | } 254 | return lowerMap 255 | } 256 | -------------------------------------------------------------------------------- /cmd/tunnel-api/tunnel-api.go: -------------------------------------------------------------------------------- 1 | package tunnel_api 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | 7 | "github.com/konveyor/crane-lib/connect/tunnel_api" 8 | "github.com/sirupsen/logrus" 9 | "github.com/spf13/cobra" 10 | "k8s.io/cli-runtime/pkg/genericclioptions" 11 | "k8s.io/client-go/kubernetes/scheme" 12 | "k8s.io/client-go/rest" 13 | clientcmdapi "k8s.io/client-go/tools/clientcmd/api" 14 | "sigs.k8s.io/controller-runtime/pkg/client" 15 | ) 16 | 17 | type TunnelAPIOptions struct { 18 | configFlags *genericclioptions.ConfigFlags 19 | genericclioptions.IOStreams 20 | 21 | logger logrus.FieldLogger 22 | SourceContext string 23 | DestinationContext string 24 | Namespace string 25 | SourceImage string 26 | DestinationImage string 27 | ProxyHost string 28 | ProxyPort string 29 | ProxyUser string 30 | ProxyPass string 31 | sourceContext *clientcmdapi.Context 32 | destinationContext *clientcmdapi.Context 33 | } 34 | 35 | func NewTunnelAPIOptions(streams genericclioptions.IOStreams) *cobra.Command { 36 | t := &TunnelAPIOptions{ 37 | configFlags: genericclioptions.NewConfigFlags(false), 38 | 39 | IOStreams: streams, 40 | logger: logrus.New(), 41 | } 42 | 43 | cmd := &cobra.Command{ 44 | Use: "tunnel-api", 45 | Short: "set up an openvpn tunnel to access an (source) on premise cluster from a (cloud) destination cluster", 46 | RunE: func(c *cobra.Command, args []string) error { 47 | if err := t.Complete(c, args); err != nil { 48 | return err 49 | } 50 | if err := t.Validate(); err != nil { 51 | return err 52 | } 53 | if err := t.Run(); err != nil { 54 | return err 55 | } 56 | 57 | return nil 58 | }, 59 | } 60 | addFlagsForTunnelAPIOptions(t, cmd) 61 | 62 | return cmd 63 | } 64 | 65 | func addFlagsForTunnelAPIOptions(t *TunnelAPIOptions, cmd *cobra.Command) { 66 | cmd.Flags().StringVar(&t.SourceContext, "source-context", "", "The name of the source context in current kubeconfig") 67 | cmd.Flags().StringVar(&t.DestinationContext, "destination-context", "", "The name of destination context current kubeconfig") 68 | cmd.Flags().StringVar(&t.Namespace, "namespace", "", "The namespace of the pvc which is to be transferred, if empty it will try to use the openvpn namespace") 69 | cmd.Flags().StringVar(&t.SourceImage, "source-image", "", "The container image to use on the source cluster. Defaults to quay.io/konveyor/openvpn:latest") 70 | cmd.Flags().StringVar(&t.DestinationImage, "destination-image", "", "The container image to use on the destination cluster. Defaults to quay.io/konveyor/openvpn:latest") 71 | cmd.Flags().StringVar(&t.ProxyHost, "proxy-host", "", "The hostname of an http-proxy to use on the source cluster for connecting to the destination cluster") 72 | cmd.Flags().StringVar(&t.ProxyPort, "proxy-port", "", "The port the http-proxy is listening on. If no specified it will default to 3128") 73 | cmd.Flags().StringVar(&t.ProxyUser, "proxy-user", "", "The username for the http-proxy. If specified you must also specify a password or it will be ignored.") 74 | cmd.Flags().StringVar(&t.ProxyPass, "proxy-pass", "", "The password for the http-proxy. If specified you must also specify a username or it will be ignored.") 75 | } 76 | 77 | func (t *TunnelAPIOptions) Complete(c *cobra.Command, args []string) error { 78 | config := t.configFlags.ToRawKubeConfigLoader() 79 | rawConfig, err := config.RawConfig() 80 | if err != nil { 81 | return err 82 | } 83 | 84 | if t.DestinationContext == "" { 85 | t.DestinationContext = *t.configFlags.Context 86 | } 87 | 88 | for name, context := range rawConfig.Contexts { 89 | if name == t.SourceContext { 90 | t.sourceContext = context 91 | } 92 | if name == t.DestinationContext { 93 | t.destinationContext = context 94 | } 95 | } 96 | 97 | return nil 98 | } 99 | 100 | func (t *TunnelAPIOptions) Validate() error { 101 | if t.sourceContext == nil { 102 | return fmt.Errorf("cannot evaluate source context") 103 | } 104 | 105 | if t.destinationContext == nil { 106 | return fmt.Errorf("cannot evaluate destination context") 107 | } 108 | 109 | if t.sourceContext.Cluster == t.destinationContext.Cluster { 110 | return fmt.Errorf("both source and destination cluster are same, this is not supported") 111 | } 112 | 113 | return nil 114 | } 115 | 116 | func (t *TunnelAPIOptions) Run() error { 117 | return t.run() 118 | } 119 | 120 | func (t *TunnelAPIOptions) getClientFromContext(ctx string) (client.Client, error) { 121 | restConfig, err := t.getRestConfigFromContext(ctx) 122 | if err != nil { 123 | return nil, err 124 | } 125 | 126 | return client.New(restConfig, client.Options{Scheme: scheme.Scheme}) 127 | } 128 | 129 | func (t *TunnelAPIOptions) getRestConfigFromContext(ctx string) (*rest.Config, error) { 130 | c := ctx 131 | t.configFlags.Context = &c 132 | 133 | return t.configFlags.ToRESTConfig() 134 | } 135 | 136 | func (t *TunnelAPIOptions) run() error { 137 | tunnel := tunnel_api.Tunnel{} 138 | 139 | fmt.Println("Generating SSL certificates. This may take several minutes.") 140 | ca, serverCrt, serverKey, clientCrt, clientKey, dh, err := tunnel_api.GenOpenvpnSSLCrts() 141 | if err != nil { 142 | return err 143 | } 144 | tunnel.Options.CACrt = ca 145 | tunnel.Options.ServerCrt = serverCrt 146 | tunnel.Options.ServerKey = serverKey 147 | tunnel.Options.ClientCrt = clientCrt 148 | tunnel.Options.ClientKey = clientKey 149 | tunnel.Options.RSADHKey = dh 150 | fmt.Println("SSL Certificate generation complete.") 151 | 152 | srcConfig, err := t.getRestConfigFromContext(t.SourceContext) 153 | if err != nil { 154 | log.Fatal(err, "unable to get source config") 155 | } 156 | 157 | dstConfig, err := t.getRestConfigFromContext(t.DestinationContext) 158 | if err != nil { 159 | log.Fatal(err, "unable to get destination config") 160 | } 161 | 162 | _, err = t.getClientFromContext(t.SourceContext) 163 | if err != nil { 164 | log.Fatal(err, "unable to get source client") 165 | } 166 | _, err = t.getClientFromContext(t.DestinationContext) 167 | if err != nil { 168 | log.Fatal(err, "unable to get destination client") 169 | } 170 | 171 | tunnel.SrcConfig = srcConfig 172 | tunnel.DstConfig = dstConfig 173 | tunnel.Options.Namespace = t.Namespace 174 | tunnel.Options.ClientImage = t.SourceImage 175 | tunnel.Options.ServerImage = t.DestinationImage 176 | tunnel.Options.ProxyHost = t.ProxyHost 177 | tunnel.Options.ProxyPort = t.ProxyPort 178 | tunnel.Options.ProxyUser = t.ProxyUser 179 | tunnel.Options.ProxyPass = t.ProxyPass 180 | 181 | err = tunnel_api.Openvpn(tunnel) 182 | if err != nil { 183 | log.Fatal(err, "Unable to create Tunnel") 184 | } 185 | 186 | return nil 187 | } 188 | -------------------------------------------------------------------------------- /cmd/version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/konveyor/crane/internal/buildinfo" 7 | "github.com/konveyor/crane/internal/flags" 8 | "github.com/spf13/cobra" 9 | "github.com/spf13/viper" 10 | ) 11 | 12 | type Options struct { 13 | // Two GlobalFlags struct fields are needed 14 | // 1. cobraGlobalFlags for explicit CLI args parsed by cobra 15 | // 2. globalFlags for the args merged with values from the viper config file 16 | cobraGlobalFlags *flags.GlobalFlags 17 | globalFlags *flags.GlobalFlags 18 | } 19 | 20 | func (o *Options) Complete(c *cobra.Command, args []string) error { 21 | // TODO: @sseago 22 | return nil 23 | } 24 | 25 | func (o *Options) Validate() error { 26 | // TODO: @sseago 27 | return nil 28 | } 29 | 30 | func (o *Options) Run() error { 31 | return o.run() 32 | } 33 | 34 | func NewVersionCommand(f *flags.GlobalFlags) *cobra.Command { 35 | o := &Options{ 36 | cobraGlobalFlags: f, 37 | } 38 | cmd := &cobra.Command{ 39 | Use: "version", 40 | Short: "Return the current crane (and crane-lib) version", 41 | RunE: func(c *cobra.Command, args []string) error { 42 | if err := o.Complete(c, args); err != nil { 43 | return err 44 | } 45 | if err := o.Validate(); err != nil { 46 | return err 47 | } 48 | if err := o.Run(); err != nil { 49 | return err 50 | } 51 | return nil 52 | }, 53 | PreRun: func(cmd *cobra.Command, args []string) { 54 | viper.Unmarshal(&o.globalFlags) 55 | }, 56 | } 57 | return cmd 58 | } 59 | 60 | func (o *Options) run() error { 61 | fmt.Println("crane:") 62 | fmt.Printf("\tVersion: %s\n", buildinfo.Version) 63 | fmt.Println("crane-lib:") 64 | fmt.Printf("\tVersion: %s\n", buildinfo.CranelibVersion) 65 | return nil 66 | } 67 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/konveyor/crane 2 | 3 | go 1.23.0 4 | 5 | toolchain go1.24.2 6 | 7 | require ( 8 | github.com/backube/pvc-transfer v0.0.0-20220718185428-1d2440958552 9 | github.com/bombsimon/logrusr/v3 v3.0.0 10 | github.com/ghodss/yaml v1.0.0 11 | github.com/go-logr/logr v1.2.3 12 | github.com/jarcoal/httpmock v1.2.0 13 | github.com/konveyor/crane-lib v0.0.8 14 | github.com/olekukonko/tablewriter v0.0.5 15 | github.com/openshift/api v0.0.0-20220525145417-ee5b62754c68 16 | github.com/openshift/library-go v0.0.0-20220704153411-3ea4b775d418 17 | github.com/sirupsen/logrus v1.8.1 18 | github.com/spf13/cobra v1.5.0 19 | github.com/spf13/viper v1.12.0 20 | github.com/vmware-tanzu/velero v1.6.3 21 | golang.org/x/mod v0.8.0 22 | gotest.tools/v3 v3.0.3 23 | k8s.io/api v0.24.2 24 | k8s.io/apimachinery v0.24.2 25 | k8s.io/cli-runtime v0.24.2 26 | k8s.io/client-go v0.24.2 27 | sigs.k8s.io/controller-runtime v0.12.2 28 | sigs.k8s.io/kustomize/cmd/config v0.10.7 29 | sigs.k8s.io/kustomize/kyaml v0.13.7 30 | sigs.k8s.io/yaml v1.3.0 31 | ) 32 | 33 | require ( 34 | github.com/Luzifer/go-dhparam v1.1.0 // indirect 35 | github.com/PuerkitoBio/purell v1.1.1 // indirect 36 | github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect 37 | github.com/davecgh/go-spew v1.1.1 // indirect 38 | github.com/emicklei/go-restful v2.9.5+incompatible // indirect 39 | github.com/evanphx/json-patch v4.12.0+incompatible // indirect 40 | github.com/evanphx/json-patch/v5 v5.5.0 // indirect 41 | github.com/fsnotify/fsnotify v1.5.4 // indirect 42 | github.com/go-errors/errors v1.0.1 // indirect 43 | github.com/go-logr/zapr v1.2.3 // indirect 44 | github.com/go-openapi/jsonpointer v0.19.5 // indirect 45 | github.com/go-openapi/jsonreference v0.19.5 // indirect 46 | github.com/go-openapi/swag v0.19.14 // indirect 47 | github.com/gogo/protobuf v1.3.2 // indirect 48 | github.com/golang/protobuf v1.5.2 // indirect 49 | github.com/google/btree v1.0.1 // indirect 50 | github.com/google/gnostic v0.5.7-v3refs // indirect 51 | github.com/google/go-cmp v0.5.9 // indirect 52 | github.com/google/gofuzz v1.2.0 // indirect 53 | github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect 54 | github.com/google/uuid v1.1.2 // indirect 55 | github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect 56 | github.com/hashicorp/hcl v1.0.0 // indirect 57 | github.com/imdario/mergo v0.3.12 // indirect 58 | github.com/inconshreveable/mousetrap v1.0.0 // indirect 59 | github.com/josharian/intern v1.0.0 // indirect 60 | github.com/json-iterator/go v1.1.12 // indirect 61 | github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect 62 | github.com/magiconair/properties v1.8.6 // indirect 63 | github.com/mailru/easyjson v0.7.6 // indirect 64 | github.com/mattn/go-runewidth v0.0.9 // indirect 65 | github.com/mitchellh/mapstructure v1.5.0 // indirect 66 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 67 | github.com/modern-go/reflect2 v1.0.2 // indirect 68 | github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect 69 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 70 | github.com/pelletier/go-toml v1.9.5 // indirect 71 | github.com/pelletier/go-toml/v2 v2.0.1 // indirect 72 | github.com/peterbourgon/diskv v2.0.1+incompatible // indirect 73 | github.com/pkg/errors v0.9.1 // indirect 74 | github.com/spf13/afero v1.8.2 // indirect 75 | github.com/spf13/cast v1.5.0 // indirect 76 | github.com/spf13/jwalterweatherman v1.1.0 // indirect 77 | github.com/spf13/pflag v1.0.5 // indirect 78 | github.com/subosito/gotenv v1.3.0 // indirect 79 | github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca // indirect 80 | go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect 81 | golang.org/x/net v0.24.0 // indirect 82 | golang.org/x/oauth2 v0.27.0 // indirect 83 | golang.org/x/sys v0.19.0 // indirect 84 | golang.org/x/term v0.19.0 // indirect 85 | golang.org/x/text v0.14.0 // indirect 86 | golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect 87 | google.golang.org/appengine v1.6.7 // indirect 88 | google.golang.org/protobuf v1.33.0 // indirect 89 | gopkg.in/inf.v0 v0.9.1 // indirect 90 | gopkg.in/ini.v1 v1.66.4 // indirect 91 | gopkg.in/yaml.v2 v2.4.0 // indirect 92 | gopkg.in/yaml.v3 v3.0.0 // indirect 93 | k8s.io/klog/v2 v2.60.1 // indirect 94 | k8s.io/kube-openapi v0.0.0-20220401212409-b28bf2818661 // indirect 95 | k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect 96 | sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect 97 | sigs.k8s.io/kustomize/api v0.11.5 // indirect 98 | sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect 99 | ) 100 | -------------------------------------------------------------------------------- /hack/minikube-clusters-delete.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set +x 3 | 4 | SRC_CLUSTER_NAME=src 5 | DEST_CLUSTER_NAME=dest 6 | 7 | SOURCE_IP=$(minikube ip -p ${SRC_CLUSTER_NAME}) 8 | DEST_IP=$(minikube ip -p ${DEST_CLUSTER_NAME}) 9 | SOURCE_IP_RANGE="${SOURCE_IP%.*}.0/24" 10 | DEST_IP_RANGE="${DEST_IP%.*}.0/24" 11 | 12 | sudo iptables -D FORWARD -p all -s $SOURCE_IP_RANGE -d $DEST_IP_RANGE -j ACCEPT 13 | sudo iptables -D FORWARD -p all -s $DEST_IP_RANGE -d $SOURCE_IP_RANGE -j ACCEPT 14 | 15 | minikube delete -p ${SRC_CLUSTER_NAME} 16 | minikube delete -p ${DEST_CLUSTER_NAME} 17 | 18 | -------------------------------------------------------------------------------- /hack/minikube-clusters-start.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set +x 3 | 4 | SRC_CLUSTER_NAME=src 5 | DEST_CLUSTER_NAME=dest 6 | SRC_KUBE_VERSION="${SRC_KUBE_VERSION:-}" 7 | DEST_KUBE_VERSION="${DEST_KUBE_VERSION:-}" 8 | 9 | minikube status -p ${SRC_CLUSTER_NAME} >> /dev/null 10 | if [[ $? == 0 ]]; then 11 | echo "run hack/minikube-delete-clusters.sh before running this script"; exit 1 12 | fi 13 | minikube status -p ${DEST_CLUSTER_NAME} >> /dev/null 14 | if [[ $? == 0 ]]; then 15 | echo "run hack/minikube-delete-clusters.sh before running this script"; exit 1 16 | fi 17 | 18 | echo "create two minikube clusters" 19 | 20 | minikube start -p ${SRC_CLUSTER_NAME} --kubernetes-version="${SRC_KUBE_VERSION}" 21 | minikube start -p ${DEST_CLUSTER_NAME} --kubernetes-version="${DEST_KUBE_VERSION}" 22 | 23 | echo "clusters started, configuring networking between source and destination clusters" 24 | 25 | SOURCE_IP=$(minikube ip -p ${SRC_CLUSTER_NAME}) 26 | DEST_IP=$(minikube ip -p ${DEST_CLUSTER_NAME}) 27 | SOURCE_IP_RANGE="${SOURCE_IP%.*}.0/24" 28 | DEST_IP_RANGE="${DEST_IP%.*}.0/24" 29 | 30 | sudo iptables -I FORWARD 2 -p all -s $SOURCE_IP_RANGE -d $DEST_IP_RANGE -j ACCEPT 31 | sudo iptables -I FORWARD 3 -p all -s $DEST_IP_RANGE -d $SOURCE_IP_RANGE -j ACCEPT 32 | 33 | minikube ssh -p ${SRC_CLUSTER_NAME} sudo ip r add $DEST_IP_RANGE via $(echo $SOURCE_IP | cut -d"." -f1-3).1 34 | minikube ssh -p ${DEST_CLUSTER_NAME} sudo ip r add $SOURCE_IP_RANGE via $(echo $DEST_IP | cut -d"." -f1-3).1 35 | 36 | minikube ssh -p ${SRC_CLUSTER_NAME} "ping -c 4 ${DEST_IP}" 37 | if [ "$?" != 0 ]; 38 | then 39 | echo "unable to set up networking" 40 | exit 1 41 | fi 42 | 43 | echo "network setup successful, configuring nginx ingress on destination cluster" 44 | minikube addons -p ${DEST_CLUSTER_NAME} enable ingress 45 | 46 | minikube update-context -p ${SRC_CLUSTER_NAME} 47 | 48 | # this hack does not work if the script is run twice 49 | COREFILE=$(kubectl get cm -n kube-system coredns -ojson | jq '.data.Corefile') 50 | COREFILE=$(echo $COREFILE | sed s/'fallthrough\\n }\\n/& file \/etc\/coredns\/crane.db crane.dev\\n/') 51 | kubectl get cm -n kube-system coredns -ojson | jq ".data.Corefile = ${COREFILE}" | kubectl replace -f - 52 | 53 | kubectl patch cm -n kube-system coredns --type='json' -p='[{"op": "replace", "path": "/data/crane.db", "value": "; crane.dev test file\ncrane.dev. IN SOA a.crane.dev. b.crane.dev. 2 604800 86400 2419200 604800\ncrane.dev. IN NS a.crane.dev.\ncrane.dev. IN NS b.crane.dev.\na.crane.dev. IN A 127.0.0.1\nb.crane.dev. IN A 127.0.0.1\n\n*.crane.dev. IN A DEST_IP\n"}]' 54 | kubectl get cm -n kube-system coredns -oyaml | sed "s/DEST_IP/${DEST_IP}/" | kubectl replace -f - 55 | 56 | kubectl patch deploy -n kube-system coredns --type='json' -p='[{"op": "add", "path": "/spec/template/spec/volumes/0/configMap/items/1", "value": {"key": "crane.db", "path": "crane.db"}}]' 57 | 58 | kubectl patch deploy --context=${DEST_CLUSTER_NAME} -n ingress-nginx ingress-nginx-controller --type='json' -p='[{"op": "add", "path": "/spec/template/spec/containers/0/args/12", "value": "--enable-ssl-passthrough"}]' 59 | 60 | # force a rollout 61 | kubectl delete rs -n ingress-nginx --context=${DEST_CLUSTER_NAME} -l app.kubernetes.io/component=controller,app.kubernetes.io/instance=ingress-nginx 62 | -------------------------------------------------------------------------------- /internal/buildinfo/buildinfo.go: -------------------------------------------------------------------------------- 1 | package buildinfo 2 | 3 | import ( 4 | cranelibversion "github.com/konveyor/crane-lib/version" 5 | ) 6 | 7 | var ( 8 | Version string = "v0.0.5" 9 | 10 | CranelibVersion string = cranelibversion.Version 11 | ) 12 | -------------------------------------------------------------------------------- /internal/file/file_helper.go: -------------------------------------------------------------------------------- 1 | package file 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io/ioutil" 7 | "os" 8 | "path/filepath" 9 | "strings" 10 | 11 | "github.com/sirupsen/logrus" 12 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 13 | "sigs.k8s.io/yaml" 14 | ) 15 | 16 | type File struct { 17 | Info os.FileInfo 18 | Unstructured unstructured.Unstructured 19 | Path string 20 | } 21 | 22 | func ReadFiles(ctx context.Context, dir string) ([]File, error) { 23 | log := logrus.New() 24 | 25 | files, err := ioutil.ReadDir(dir) 26 | if err != nil { 27 | return nil, err 28 | } 29 | return readFiles(ctx, dir, files, log) 30 | } 31 | 32 | func readFiles(ctx context.Context, path string, files []os.FileInfo, log *logrus.Logger) ([]File, error) { 33 | jsonFiles := []File{} 34 | for _, file := range files { 35 | filePath := fmt.Sprintf("%v/%v", path, file.Name()) 36 | if file.IsDir() { 37 | if file.Name() == "failures" { 38 | continue 39 | } 40 | newFiles, err := ioutil.ReadDir(filePath) 41 | if err != nil { 42 | return nil, err 43 | } 44 | files, err := readFiles(ctx, filePath, newFiles, log) 45 | if err != nil { 46 | return nil, err 47 | } 48 | jsonFiles = append(jsonFiles, files...) 49 | } else { 50 | data, err := ioutil.ReadFile(filePath) 51 | if err != nil { 52 | return nil, err 53 | } 54 | json, err := yaml.YAMLToJSON(data) 55 | if err != nil { 56 | return nil, err 57 | } 58 | 59 | u := unstructured.Unstructured{} 60 | err = u.UnmarshalJSON(json) 61 | if err != nil { 62 | return nil, err 63 | } 64 | 65 | jsonFiles = append(jsonFiles, File{ 66 | Info: file, 67 | Unstructured: u, 68 | Path: filePath, 69 | }) 70 | } 71 | } 72 | return jsonFiles, nil 73 | } 74 | 75 | //TODO: @shawn-hurley Add errors for these methods to validate that the correct struct values are set. 76 | type PathOpts struct { 77 | TransformDir string 78 | ExportDir string 79 | OutputDir string 80 | IgnoredPatchesDir string 81 | } 82 | 83 | func (opts *PathOpts) GetWhiteOutFilePath(filePath string) string { 84 | return opts.updateTransformDirPath(".wh.", filePath) 85 | } 86 | 87 | func (opts *PathOpts) GetTransformPath(filePath string) string { 88 | return opts.updateTransformDirPath("transform-", filePath) 89 | } 90 | 91 | func (opts *PathOpts) GetIgnoredPatchesPath(filePath string) string { 92 | return opts.updateIgnoredPatchesDirPath("ignored-", filePath) 93 | } 94 | 95 | func (opts *PathOpts) updateTransformDirPath(prefix, filePath string) string { 96 | return opts.updatePath(opts.TransformDir, prefix, filePath) 97 | } 98 | 99 | func (opts *PathOpts) updateIgnoredPatchesDirPath(prefix, filePath string) string { 100 | if len(opts.IgnoredPatchesDir) == 0 { 101 | return "" 102 | } 103 | return opts.updatePath(opts.IgnoredPatchesDir, prefix, filePath) 104 | } 105 | 106 | func (opts *PathOpts) updatePath(updateDir, prefix, filePath string) string { 107 | dir, fname := filepath.Split(filePath) 108 | dir = strings.Replace(dir, opts.ExportDir, updateDir, 1) 109 | fname = fmt.Sprintf("%v%v", prefix, fname) 110 | return filepath.Join(dir, fname) 111 | } 112 | 113 | func (opts *PathOpts) GetOutputFilePath(filePath string) string { 114 | dir, fname := filepath.Split(filePath) 115 | dir = strings.Replace(dir, opts.ExportDir, opts.OutputDir, 1) 116 | return filepath.Join(dir, fname) 117 | } 118 | -------------------------------------------------------------------------------- /internal/file/file_helper_test.go: -------------------------------------------------------------------------------- 1 | package file_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/konveyor/crane/internal/file" 7 | ) 8 | 9 | func TestGetWhiteOutFilePath(t *testing.T) { 10 | cases := []struct { 11 | Name string 12 | Filepath string 13 | Dir string 14 | ResourceDir string 15 | Expected string 16 | }{ 17 | { 18 | Name: "test whiteout file creation", 19 | Filepath: "/fully/qualified/resources/ns/path-test", 20 | Dir: "/fully/qualified/transform", 21 | ResourceDir: "/fully/qualified/resources", 22 | Expected: "/fully/qualified/transform/ns/.wh.path-test", 23 | }, 24 | } 25 | 26 | for _, test := range cases { 27 | opts := file.PathOpts{ 28 | TransformDir: test.Dir, 29 | ExportDir: test.ResourceDir, 30 | } 31 | if actual := opts.GetWhiteOutFilePath(test.Filepath); actual != test.Expected { 32 | t.Errorf("actual: %v did not match expected: %v", actual, test.Expected) 33 | } 34 | } 35 | } 36 | 37 | func TestGetTransformPath(t *testing.T) { 38 | cases := []struct { 39 | Name string 40 | Filepath string 41 | Dir string 42 | ResourceDir string 43 | Expected string 44 | }{ 45 | { 46 | Name: "test transform file creation", 47 | Filepath: "/fully/qualified/ns/path-test", 48 | Dir: "/fully/qualified/transform", 49 | ResourceDir: "/fully/qualified", 50 | Expected: "/fully/qualified/transform/ns/transform-path-test", 51 | }, 52 | } 53 | for _, test := range cases { 54 | opts := file.PathOpts{ 55 | TransformDir: test.Dir, 56 | ExportDir: test.ResourceDir, 57 | } 58 | if actual := opts.GetTransformPath(test.Filepath); actual != test.Expected { 59 | t.Errorf("actual: %v did not match expected: %v", actual, test.Expected) 60 | } 61 | } 62 | 63 | } 64 | 65 | func TestGetOutputFilePath(t *testing.T) { 66 | cases := []struct { 67 | Name string 68 | Filepath string 69 | Dir string 70 | ResourceDir string 71 | Expected string 72 | }{ 73 | { 74 | Name: "test transform file creation", 75 | Filepath: "/fully/qualified/ns/path-test", 76 | Dir: "/fully/qualified/output", 77 | ResourceDir: "/fully/qualified", 78 | Expected: "/fully/qualified/output/ns/path-test", 79 | }, 80 | } 81 | for _, test := range cases { 82 | opts := file.PathOpts{ 83 | OutputDir: test.Dir, 84 | ExportDir: test.ResourceDir, 85 | } 86 | if actual := opts.GetOutputFilePath(test.Filepath); actual != test.Expected { 87 | t.Errorf("actual: %v did not match expected: %v", actual, test.Expected) 88 | } 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /internal/flags/global_flags.go: -------------------------------------------------------------------------------- 1 | package flags 2 | 3 | import ( 4 | "github.com/sirupsen/logrus" 5 | "github.com/spf13/cobra" 6 | "github.com/spf13/viper" 7 | ) 8 | 9 | type GlobalFlags struct { 10 | ConfigFile string 11 | Debug bool 12 | } 13 | 14 | func (g *GlobalFlags) ApplyFlags(cmd *cobra.Command) { 15 | cobra.OnInitialize(g.initConfig) 16 | cmd.PersistentFlags().BoolVar(&g.Debug, "debug", false, "Debug the command by printing more information") 17 | cmd.PersistentFlags().StringVarP(&g.ConfigFile, "flags-file", "f", "", "Path to input file which contains a yaml representation of cli flags. Explicit flags take precedence over input file values.") 18 | viper.BindPFlags(cmd.PersistentFlags()) 19 | } 20 | 21 | func (g *GlobalFlags) GetLogger() *logrus.Logger { 22 | log := logrus.New() 23 | if g.Debug { 24 | log.SetLevel(logrus.DebugLevel) 25 | } 26 | return log 27 | } 28 | 29 | func (g *GlobalFlags) initConfig() { 30 | if g.ConfigFile != "" { 31 | viper.SetConfigFile(g.ConfigFile) 32 | } 33 | viper.AutomaticEnv() 34 | 35 | if err := viper.ReadInConfig(); err == nil { 36 | g.GetLogger().Infof("Using config file: %v", viper.ConfigFileUsed()) 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /internal/plugin/plugin_helper.go: -------------------------------------------------------------------------------- 1 | package plugin 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "os" 7 | "path/filepath" 8 | 9 | "github.com/konveyor/crane-lib/transform" 10 | binary_plugin "github.com/konveyor/crane-lib/transform/binary-plugin" 11 | "github.com/konveyor/crane-lib/transform/kubernetes" 12 | "github.com/sirupsen/logrus" 13 | ) 14 | 15 | const ( 16 | DefaultLocalPluginDir = "/.local/share/crane/plugins" 17 | GlobalPluginDir = "/usr/local/share/crane/plugins" 18 | PkgPluginDir = "/usr/share/crane/plugins" 19 | ) 20 | 21 | func GetPlugins(dir string, logger *logrus.Logger) ([]transform.Plugin, error) { 22 | pluginList := []transform.Plugin{&kubernetes.KubernetesTransformPlugin{}} 23 | files, err := ioutil.ReadDir(dir) 24 | switch { 25 | case os.IsNotExist(err): 26 | return pluginList, nil 27 | case err != nil: 28 | return nil, err 29 | } 30 | list, err := getBinaryPlugins(dir, files, logger) 31 | if err != nil { 32 | return nil, err 33 | } 34 | pluginList = append(pluginList, list...) 35 | return pluginList, nil 36 | } 37 | 38 | func getBinaryPlugins(path string, files []os.FileInfo, logger *logrus.Logger) ([]transform.Plugin, error) { 39 | pluginList := []transform.Plugin{} 40 | for _, file := range files { 41 | filePath := fmt.Sprintf("%v/%v", path, file.Name()) 42 | if file.IsDir() { 43 | newFiles, err := ioutil.ReadDir(filePath) 44 | if err != nil { 45 | return nil, err 46 | } 47 | plugins, err := getBinaryPlugins(filePath, newFiles, logger) 48 | if err != nil { 49 | return nil, err 50 | } 51 | pluginList = append(pluginList, plugins...) 52 | } else if file.Mode().IsRegular() && IsExecAny(file.Mode().Perm()) { 53 | newPlugin, err := binary_plugin.NewBinaryPlugin(filePath, logger) 54 | if err != nil { 55 | return nil, err 56 | } 57 | pluginList = append(pluginList, newPlugin) 58 | } 59 | } 60 | return pluginList, nil 61 | } 62 | 63 | func IsExecAny(mode os.FileMode) bool { 64 | return mode&0111 != 0 65 | } 66 | 67 | func GetFilteredPlugins(pluginDir string, skipPlugins []string, logger *logrus.Logger) ([]transform.Plugin, error) { 68 | var filteredPlugins, unfilteredPlugins []transform.Plugin 69 | absPathPluginDir, err := filepath.Abs("plugins") 70 | if err != nil { 71 | return filteredPlugins, err 72 | } 73 | 74 | paths := []string{absPathPluginDir, pluginDir, GlobalPluginDir, PkgPluginDir} 75 | 76 | for _, path := range paths { 77 | plugins, err := GetPlugins(path, logger) 78 | if err != nil { 79 | return filteredPlugins, err 80 | } 81 | for _, newPlugin := range plugins { 82 | exists := false 83 | for _, plugin := range unfilteredPlugins { 84 | if plugin.Metadata().Name == newPlugin.Metadata().Name { 85 | exists = true 86 | break 87 | } 88 | } 89 | if !exists { 90 | unfilteredPlugins = append(unfilteredPlugins, newPlugin) 91 | } 92 | } 93 | } 94 | 95 | if len(skipPlugins) == 0 { 96 | return unfilteredPlugins, nil 97 | } 98 | for _, thisPlugin := range unfilteredPlugins { 99 | if !isPluginInList(thisPlugin, skipPlugins) { 100 | filteredPlugins = append(filteredPlugins, thisPlugin) 101 | } 102 | } 103 | return filteredPlugins, nil 104 | } 105 | 106 | func isPluginInList(plugin transform.Plugin, list []string) bool { 107 | pluginName := plugin.Metadata().Name 108 | for _, listItem := range list { 109 | if pluginName == listItem { 110 | return true 111 | } 112 | } 113 | return false 114 | } 115 | -------------------------------------------------------------------------------- /internal/plugin/plugin_manager_helper.go: -------------------------------------------------------------------------------- 1 | package plugin 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "io/ioutil" 7 | "net/http" 8 | "net/url" 9 | "os" 10 | "runtime" 11 | "strings" 12 | 13 | "github.com/ghodss/yaml" 14 | "github.com/sirupsen/logrus" 15 | ) 16 | 17 | const ( 18 | DEFAULT_REPO = "default" 19 | DEFAULT_REPO_URL = "DEFAULT_REPO_URL" 20 | DEFAULT_URL = "https://raw.githubusercontent.com/konveyor/crane-plugins/main/index.yaml" 21 | ) 22 | 23 | // returns map containing the manifests with the key as name-version. Takes name and repo as input to filter accordingly 24 | func BuildManifestMap(log *logrus.Logger, name string, repoName string) (map[string]map[string][]PluginVersion, error) { 25 | // TODO: for multiple repo, read values from conf file to this map 26 | repos := make(map[string]string) 27 | 28 | if repoName != "" { 29 | // read the repo and url from the conf file and update the map with the same 30 | // repos[repoName] = 31 | log.Errorf("Multiple repository is not supported right now so the flag --repo will not work till next release") 32 | return nil, errors.New("multiple repository is not supported right now so the flag --repo will not work till next release") 33 | } else { 34 | // read the whole config file and iterate through all repos to make sure every manifest is read 35 | repos[DEFAULT_REPO] = GetDefaultSource() 36 | } 37 | manifestMap := make(map[string]map[string][]PluginVersion) 38 | 39 | // iterate over all the repos 40 | for repo, url := range repos { 41 | // get the index.yml file for respective repo 42 | index, err := GetYamlFromUrl(url) 43 | if err != nil { 44 | return nil, err 45 | } 46 | // fetch all the manifest file from a repo 47 | for _, p := range index.Plugins { 48 | // retrieve the manifest if name matches or there is no name passed, i.e a specific or all of the manifest 49 | if name == "" || strings.Contains(p.Name, name) { 50 | plugin, err := YamlToManifest(p.Path) 51 | if err != nil { 52 | log.Errorf("Error reading %s plugin manifest located at %s - Error: %s", p.Name, p.Path, err) 53 | return nil, err 54 | } 55 | if _, ok := manifestMap[repo]; ok { 56 | manifestMap[repo][p.Name] = plugin 57 | } else { 58 | manifestMap[repo] = make(map[string][]PluginVersion) 59 | manifestMap[repo][p.Name] = plugin 60 | } 61 | } 62 | } 63 | } 64 | return manifestMap, nil 65 | } 66 | 67 | // takes url as input and returns index.yml for plugin repository 68 | func GetYamlFromUrl(URL string) (PluginIndex, error) { 69 | var manifest PluginIndex 70 | index, err := getData(URL) 71 | if err != nil { 72 | return manifest, err 73 | } 74 | err = yaml.Unmarshal(index, &manifest) 75 | if err != nil { 76 | return manifest, err 77 | } 78 | return manifest, nil 79 | } 80 | 81 | // takes url as input and fetches the manifest of a plugin 82 | func YamlToManifest(URL string) ([]PluginVersion, error) { 83 | plugin := Plugin{} 84 | 85 | body, err := getData(URL) 86 | if err != nil { 87 | return plugin.Versions, err 88 | } 89 | 90 | err = yaml.Unmarshal(body, &plugin) 91 | if err != nil { 92 | return []PluginVersion{}, err 93 | } 94 | 95 | isPluginAvailable := FilterPluginForOsArch(&plugin) 96 | if isPluginAvailable { 97 | return plugin.Versions, nil 98 | } 99 | // TODO: figure out a better way to not return the plugin 100 | return []PluginVersion{}, nil 101 | } 102 | 103 | // takes manifest as input and filters manifest for current os/arch 104 | func FilterPluginForOsArch(plugin *Plugin) bool { 105 | // filter manifests for current os/arch 106 | isPluginAvailable := false 107 | for _, version := range plugin.Versions { 108 | for _, binary := range version.Binaries { 109 | if binary.OS == runtime.GOOS && binary.Arch == runtime.GOARCH { 110 | isPluginAvailable = true 111 | version.Binaries = []Binary{ 112 | binary, 113 | } 114 | break 115 | } 116 | } 117 | } 118 | return isPluginAvailable 119 | } 120 | 121 | // overrides the default plugin dir url 122 | func GetDefaultSource() string { 123 | val, present := os.LookupEnv(DEFAULT_REPO_URL) 124 | if present { 125 | return val 126 | } 127 | return DEFAULT_URL 128 | } 129 | 130 | // return array of string containing all the paths where a binary installed within plugin-dir 131 | func LocateBinaryInPluginDir(pluginDir string, name string, files []os.FileInfo) ([]string, error) { 132 | paths := []string{} 133 | 134 | for _, file := range files { 135 | filePath := fmt.Sprintf("%v/%v", pluginDir, file.Name()) 136 | if file.Mode().IsRegular() && IsExecAny(file.Mode().Perm()) && file.Name() == name { 137 | paths = append(paths, filePath) 138 | } 139 | } 140 | return paths, nil 141 | } 142 | 143 | func IsUrl(URL string) (bool, string) { 144 | URL = strings.TrimPrefix(URL, "file://") 145 | u, err := url.Parse(URL) 146 | return err == nil && u.Scheme != "" && u.Host != "", URL 147 | } 148 | 149 | func getData(URL string) ([]byte, error) { 150 | var index []byte 151 | var err error 152 | isUrl, URL := IsUrl(URL) 153 | if !isUrl { 154 | index, err = ioutil.ReadFile(URL) 155 | if err != nil { 156 | return nil, err 157 | } 158 | } else { 159 | res, err := http.Get(URL) 160 | if err != nil { 161 | return nil, err 162 | } 163 | 164 | defer res.Body.Close() 165 | 166 | index, err = ioutil.ReadAll(res.Body) 167 | if err != nil { 168 | return nil, err 169 | } 170 | } 171 | return index, nil 172 | } 173 | -------------------------------------------------------------------------------- /internal/plugin/plugin_manager_helper_test.go: -------------------------------------------------------------------------------- 1 | package plugin 2 | 3 | import ( 4 | "io/ioutil" 5 | "net/http" 6 | "os" 7 | "testing" 8 | 9 | "github.com/ghodss/yaml" 10 | "github.com/jarcoal/httpmock" 11 | "gotest.tools/v3/assert" 12 | ) 13 | 14 | func TestGetYamlFromUrlWithUrl(t *testing.T) { 15 | URL := "https://test.com/index.yml" 16 | httpmock.Activate() 17 | defer httpmock.DeactivateAndReset() 18 | index := PluginIndex{ 19 | Kind: "PluginIndex", 20 | ApiVersion: "crane.konveyor.io/v1alpha1", 21 | Plugins: []PluginLocation{ 22 | { 23 | Name: "foo", 24 | Path: "https://test.com/foo.yml", 25 | }, 26 | }, 27 | } 28 | httpmock.RegisterResponder("GET", URL, 29 | func(req *http.Request) (*http.Response, error) { 30 | // Get ID from request 31 | return httpmock.NewJsonResponse(200, index) 32 | }, 33 | ) 34 | 35 | resp, _ := GetYamlFromUrl(URL) 36 | assert.DeepEqual(t, index, resp) 37 | } 38 | 39 | func TestYamlToManifestWithUrl(t *testing.T) { 40 | URL := "https://test.com/foo.yml" 41 | httpmock.Activate() 42 | defer httpmock.DeactivateAndReset() 43 | plugin := Plugin{ 44 | Kind: "Plugin", 45 | ApiVersion: "crane.konveyor.io/v1alpha1", 46 | Versions: []PluginVersion{ 47 | { 48 | Name: "foo", 49 | Version: "0.0.1", 50 | Description: "Description of foo plugin", 51 | ShortDescription: "Short description of foo plugin", 52 | Binaries: []Binary{ 53 | { 54 | OS: "linux", 55 | Arch: "amd64", 56 | URI: "https://test.com/download/foo", 57 | }, 58 | }, 59 | }, 60 | }, 61 | } 62 | httpmock.RegisterResponder("GET", URL, 63 | func(req *http.Request) (*http.Response, error) { 64 | // Get ID from request 65 | return httpmock.NewJsonResponse(200, plugin) 66 | }, 67 | ) 68 | 69 | resp, _ := YamlToManifest(URL) 70 | assert.DeepEqual(t, plugin.Versions, resp) 71 | } 72 | 73 | func TestGetYamlFromUrlWithFile(t *testing.T) { 74 | index := PluginIndex{ 75 | Kind: "PluginIndex", 76 | ApiVersion: "crane.konveyor.io/v1alpha1", 77 | Plugins: []PluginLocation{ 78 | { 79 | Name: "foo", 80 | Path: "https://test.com/foo.yml", 81 | }, 82 | }, 83 | } 84 | tempFile, err := ioutil.TempFile(os.TempDir(), "index.yml") 85 | if err != nil { 86 | panic(err) 87 | } 88 | defer os.Remove(tempFile.Name()) 89 | 90 | data, err := yaml.Marshal(&index) 91 | if err != nil { 92 | panic(err) 93 | } 94 | 95 | _, err = tempFile.Write(data) 96 | if err != nil { 97 | panic(err) 98 | } 99 | 100 | resp, _ := GetYamlFromUrl(tempFile.Name()) 101 | assert.DeepEqual(t, index, resp) 102 | 103 | resp, _ = GetYamlFromUrl("file://" + tempFile.Name()) 104 | assert.DeepEqual(t, index, resp) 105 | } 106 | 107 | func TestYamlToManifestWithFile(t *testing.T) { 108 | plugin := Plugin{ 109 | Kind: "Plugin", 110 | ApiVersion: "crane.konveyor.io/v1alpha1", 111 | Versions: []PluginVersion{ 112 | { 113 | Name: "foo", 114 | Version: "0.0.1", 115 | Description: "Description of foo plugin", 116 | ShortDescription: "Short description of foo plugin", 117 | Binaries: []Binary{ 118 | { 119 | OS: "linux", 120 | Arch: "amd64", 121 | URI: "https://test.com/download/foo", 122 | }, 123 | }, 124 | }, 125 | }, 126 | } 127 | tempFile, err := ioutil.TempFile(os.TempDir(), "index.yml") 128 | if err != nil { 129 | panic(err) 130 | } 131 | defer os.Remove(tempFile.Name()) 132 | 133 | data, err := yaml.Marshal(&plugin) 134 | if err != nil { 135 | panic(err) 136 | } 137 | 138 | _, err = tempFile.Write(data) 139 | if err != nil { 140 | panic(err) 141 | } 142 | 143 | resp, _ := YamlToManifest(tempFile.Name()) 144 | assert.DeepEqual(t, plugin.Versions, resp) 145 | 146 | resp, _ = YamlToManifest("file://" + tempFile.Name()) 147 | assert.DeepEqual(t, plugin.Versions, resp) 148 | } 149 | -------------------------------------------------------------------------------- /internal/plugin/plugin_types.go: -------------------------------------------------------------------------------- 1 | package plugin 2 | 3 | import "github.com/konveyor/crane-lib/transform" 4 | 5 | type Plugin struct { 6 | Kind string `json:"kind"` 7 | ApiVersion string `json:"apiVersion"` 8 | Versions []PluginVersion `json:"versions"` 9 | } 10 | 11 | type PluginVersion struct { 12 | Name string `json:"name"` 13 | ShortDescription string `json:"shortDescription"` 14 | Description string `json:"description"` 15 | Version Version `json:"version"` 16 | Binaries []Binary `json:"binaries"` 17 | OptionalFields []transform.OptionalFields `json:"optionalFields"` 18 | } 19 | 20 | type Version string 21 | 22 | type Binary struct { 23 | OS string `json:"os"` 24 | Arch string `json:"arch"` 25 | URI string `json:"uri"` 26 | SHA string `json:"sha,omitempty"` 27 | } 28 | 29 | type PluginLocation struct { 30 | Name string `json:"name"` 31 | Path string `json:"path"` 32 | } 33 | 34 | type PluginIndex struct { 35 | Kind string `json:"kind"` 36 | ApiVersion string `json:"apiVersion"` 37 | Plugins []PluginLocation `json:"plugins"` 38 | } 39 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/konveyor/crane/cmd/apply" 7 | export "github.com/konveyor/crane/cmd/export" 8 | plugin_manager "github.com/konveyor/crane/cmd/plugin-manager" 9 | "github.com/konveyor/crane/cmd/runfn" 10 | transfer_pvc "github.com/konveyor/crane/cmd/transfer-pvc" 11 | skopeo_sync_gen "github.com/konveyor/crane/cmd/skopeo-sync-gen" 12 | "github.com/konveyor/crane/cmd/transform" 13 | tunnel_api "github.com/konveyor/crane/cmd/tunnel-api" 14 | "github.com/konveyor/crane/cmd/version" 15 | "github.com/konveyor/crane/internal/flags" 16 | "github.com/spf13/cobra" 17 | "k8s.io/cli-runtime/pkg/genericclioptions" 18 | ) 19 | 20 | func main() { 21 | f := &flags.GlobalFlags{} 22 | root := cobra.Command{ 23 | Use: "crane", 24 | } 25 | f.ApplyFlags(&root) 26 | root.AddCommand(export.NewExportCommand(genericclioptions.IOStreams{In: os.Stdin, Out: os.Stdout, ErrOut: os.Stderr}, f)) 27 | root.AddCommand(transfer_pvc.NewTransferPVCCommand(genericclioptions.IOStreams{In: os.Stdin, Out: os.Stdout, ErrOut: os.Stderr})) 28 | root.AddCommand(tunnel_api.NewTunnelAPIOptions(genericclioptions.IOStreams{In: os.Stdin, Out: os.Stdout, ErrOut: os.Stderr})) 29 | root.AddCommand(transform.NewTransformCommand(f)) 30 | root.AddCommand(skopeo_sync_gen.NewSkopeoSyncGenCommand(f)) 31 | root.AddCommand(apply.NewApplyCommand(f)) 32 | root.AddCommand(plugin_manager.NewPluginManagerCommand(f)) 33 | root.AddCommand(version.NewVersionCommand(f)) 34 | root.AddCommand(runfn.NewFnRunCommand(f)) 35 | if err := root.Execute(); err != nil { 36 | os.Exit(1) 37 | } 38 | } 39 | --------------------------------------------------------------------------------