├── .codecov.yml ├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── build-rsync-image.yml │ ├── build-sshd-image.yml │ ├── build.yml │ ├── release-rsync-image.yml │ ├── release-sshd-image.yml │ └── release.yml ├── .gitignore ├── .golangci.yml ├── .goreleaser.yml ├── .krew.yaml ├── .renovaterc.json ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Dockerfile ├── INSTALL.md ├── LICENSE ├── README.md ├── Taskfile.yml ├── USAGE.md ├── USAGE.md.gotmpl ├── app ├── app.go ├── completion.go └── migrate.go ├── cmd └── pv-migrate │ └── main.go ├── docker ├── rsync │ └── Dockerfile └── sshd │ ├── Dockerfile │ └── sshd_config ├── go.mod ├── go.sum ├── helm ├── helm.go ├── helm_test.go ├── pv-migrate │ ├── .helmignore │ ├── Chart.yaml │ ├── README.md │ ├── templates │ │ ├── _helpers.tpl │ │ ├── rsync │ │ │ ├── job.yaml │ │ │ ├── networkpolicy.yaml │ │ │ ├── secret.yaml │ │ │ └── serviceaccount.yaml │ │ └── sshd │ │ │ ├── deployment.yaml │ │ │ ├── networkpolicy.yaml │ │ │ ├── secret.yaml │ │ │ ├── service.yaml │ │ │ └── serviceaccount.yaml │ └── values.yaml ├── test-vals-different-cluster.yaml ├── test-vals-different-ns.yaml └── test-vals-same-ns.yaml ├── img └── demo.gif ├── integration └── integration_test.go ├── k8s ├── client.go ├── client_test.go ├── completion.go ├── helm.go ├── job.go ├── pod.go ├── portforward.go ├── service.go └── testdata │ └── _kubeconfig_test.yaml ├── migration └── types.go ├── migrator ├── migrator.go └── migrator_test.go ├── pvc ├── info.go └── info_test.go ├── rsync ├── cmd.go └── progress │ ├── logger.go │ ├── progress.go │ └── progress_test.go ├── scripts └── completions.sh ├── ssh ├── ssh.go └── types.go ├── strategy ├── lbsvc.go ├── lbsvc_test.go ├── local.go ├── mnt2.go ├── mnt2_test.go ├── strategy.go ├── strategy_test.go ├── svc.go └── svc_test.go ├── test ├── .gitignore ├── k8s │ ├── _ns-1.yaml │ ├── _ns-2.yaml │ ├── dest-1.yaml │ ├── dest-2.yaml │ └── source-1.yaml ├── kind-config.yaml ├── metallb-manifests.yaml ├── netpol-allow-all.yaml ├── terraform │ ├── .gitignore │ ├── .terraform.lock.hcl │ ├── gcp.tf │ ├── terraform.tfvars │ └── variables.tf ├── test-destroy-env.sh ├── test-prepare-env.sh └── test-run.sh └── util ├── util.go └── util_test.go /.codecov.yml: -------------------------------------------------------------------------------- 1 | coverage: 2 | range: 30..70 3 | status: 4 | patch: 5 | default: 6 | informational: true 7 | project: 8 | default: 9 | informational: true 10 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @utkuozdemir 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Run command '...' 16 | 2. See error 17 | 18 | **Expected behavior** 19 | A clear and concise description of what you expected to happen. 20 | 21 | **Console output** 22 | Add the error logs and/or the output to help us diagnose the problem. 23 | 24 | **Version** 25 | - Source and destination Kubernetes versions [e.g. `v1.17.14-gke.1600`, `v1.21.1+k3s1`] 26 | - Source and destination container runtimes [e.g. `containerd://1.4.4-k3s2`, `docker://19.3.6`] 27 | - `pv-migrate` version and architecture [e.g. `v0.5.5 - darwin_x86_64`] 28 | - Installation method [e.g. `homebrew`, binary download] 29 | - Source and destination PVC type, size and accessModes [e.g. `ReadWriteMany, 8G, kubernetes.io/gce-pd -> ReadWriteOnce, N/A, rancher.io/local-path` ] 30 | 31 | **Additional context** 32 | Add any other context about the problem here. 33 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## Types of changes 2 | 3 | - [ ] Bug fix (non-breaking change which fixes an issue) 4 | - [ ] New feature (non-breaking change which adds functionality) 5 | - [ ] Breaking change (fix or feature that would cause existing functionality to change) 6 | - [ ] I have read the **CONTRIBUTING** document. 7 | - [ ] My code follows the code style of this project. 8 | - [ ] My change requires a change to the documentation. 9 | - [ ] I have updated the documentation accordingly. 10 | - [ ] I have added tests to cover my changes. 11 | - [ ] All new and existing tests passed. 12 | -------------------------------------------------------------------------------- /.github/workflows/build-rsync-image.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: build rsync image 3 | 4 | on: 5 | workflow_dispatch: {} 6 | push: 7 | branches: 8 | - master 9 | - renovate/** 10 | paths: 11 | - docker/rsync/** 12 | pull_request: 13 | branches: 14 | - master 15 | paths: 16 | - docker/rsync/** 17 | 18 | jobs: 19 | build-rsync-image: 20 | runs-on: ubuntu-24.04 21 | steps: 22 | - name: Checkout 23 | uses: actions/checkout@v4.2.2 24 | - name: Set up QEMU 25 | uses: docker/setup-qemu-action@v3.6.0 26 | - name: Set up Docker Buildx 27 | uses: docker/setup-buildx-action@v3.10.0 28 | - name: Login to DockerHub 29 | uses: docker/login-action@v3.4.0 30 | with: 31 | username: utkuozdemir 32 | password: ${{ secrets.DOCKERHUB_TOKEN }} 33 | - name: Login to GitHub Container Registry 34 | uses: docker/login-action@v3.4.0 35 | with: 36 | registry: ghcr.io 37 | username: ${{ github.repository_owner }} 38 | password: ${{ secrets.GITHUB_TOKEN }} 39 | - name: Build and push 40 | uses: docker/build-push-action@v6.15.0 41 | with: 42 | context: ./docker/rsync/ 43 | platforms: linux/amd64,linux/arm,linux/arm64 44 | push: true 45 | tags: | 46 | docker.io/utkuozdemir/pv-migrate-rsync:latest 47 | ghcr.io/${{ github.repository_owner }}/pv-migrate-rsync:latest 48 | -------------------------------------------------------------------------------- /.github/workflows/build-sshd-image.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: build sshd image 3 | 4 | on: 5 | workflow_dispatch: {} 6 | push: 7 | branches: 8 | - master 9 | - renovate/** 10 | paths: 11 | - docker/sshd/** 12 | pull_request: 13 | branches: 14 | - master 15 | paths: 16 | - docker/sshd/** 17 | 18 | jobs: 19 | build-sshd-image: 20 | runs-on: ubuntu-24.04 21 | steps: 22 | - name: Checkout 23 | uses: actions/checkout@v4.2.2 24 | - name: Set up QEMU 25 | uses: docker/setup-qemu-action@v3.6.0 26 | - name: Set up Docker Buildx 27 | uses: docker/setup-buildx-action@v3.10.0 28 | - name: Login to DockerHub 29 | uses: docker/login-action@v3.4.0 30 | with: 31 | username: utkuozdemir 32 | password: ${{ secrets.DOCKERHUB_TOKEN }} 33 | - name: Login to GitHub Container Registry 34 | uses: docker/login-action@v3.4.0 35 | with: 36 | registry: ghcr.io 37 | username: ${{ github.repository_owner }} 38 | password: ${{ secrets.GITHUB_TOKEN }} 39 | - name: Build and push 40 | uses: docker/build-push-action@v6.15.0 41 | with: 42 | context: ./docker/sshd/ 43 | platforms: linux/amd64,linux/arm,linux/arm64 44 | push: true 45 | tags: | 46 | docker.io/utkuozdemir/pv-migrate-sshd:latest 47 | ghcr.io/${{ github.repository_owner }}/pv-migrate-sshd:latest 48 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: build 3 | 4 | on: 5 | workflow_dispatch: {} 6 | push: 7 | branches: 8 | - master 9 | - renovate/** 10 | paths-ignore: 11 | - "**.md" 12 | - "img/**" 13 | - "test/**" 14 | - ".gitignore" 15 | - "docker/**" 16 | - "renovate.json" 17 | - ".krew.yaml" 18 | pull_request: 19 | branches: 20 | - master 21 | paths-ignore: 22 | - "**.md" 23 | - "img/**" 24 | - "test/**" 25 | - ".gitignore" 26 | - "docker/**" 27 | - "renovate.json" 28 | - ".krew.yaml" 29 | 30 | concurrency: 31 | group: build-${{ github.event.pull_request.number || github.ref }} 32 | cancel-in-progress: true 33 | 34 | jobs: 35 | build: 36 | runs-on: ubuntu-24.04 37 | steps: 38 | - name: Checkout 39 | uses: actions/checkout@v4.2.2 40 | - name: Install go 41 | uses: actions/setup-go@v5.4.0 42 | with: 43 | go-version-file: go.mod 44 | - name: Install Task 45 | uses: arduino/setup-task@v2.0.0 46 | with: 47 | # renovate: depName=go-task/task datasource=github-releases 48 | version: 3.42.1 49 | - name: Tidy go.mod 50 | run: go mod tidy 51 | - name: Run linters 52 | uses: golangci/golangci-lint-action@v8.0.0 53 | with: 54 | # renovate: depName=golangci/golangci-lint datasource=github-releases 55 | version: v2.0.2 56 | args: --timeout=10m0s 57 | install-mode: goinstall 58 | - name: Build with Goreleaser 59 | if: ${{ always() }} 60 | uses: goreleaser/goreleaser-action@v6.3.0 61 | with: 62 | # renovate: depName=goreleaser/goreleaser datasource=github-releases 63 | version: v2.8.2 64 | args: build --snapshot --clean --single-target 65 | - name: Update usage 66 | run: task update-usage 67 | - name: Check for dirty files 68 | run: git diff --exit-code 69 | test: 70 | runs-on: ubuntu-24.04 71 | steps: 72 | - name: Checkout 73 | uses: actions/checkout@v4.2.2 74 | - name: Install go 75 | uses: actions/setup-go@v5.4.0 76 | with: 77 | go-version-file: go.mod 78 | - name: Install richgo 79 | # renovate: depName=kyoh86/richgo 80 | run: go install github.com/kyoh86/richgo@v0.3.12 81 | - name: Install helm 82 | run: curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash 83 | - name: Install cilium-cli 84 | env: 85 | # renovate: depName=cilium/cilium-cli datasource=github-releases 86 | CILIUM_CLI_VERSION: v0.18.3 87 | run: | 88 | wget https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-amd64.tar.gz 89 | sudo tar xzvfC cilium-linux-amd64.tar.gz /usr/local/bin 90 | rm cilium-linux-amd64.tar.gz 91 | 92 | - name: kind-1 - Create cluster 93 | uses: helm/kind-action@v1.12.0 94 | with: 95 | cluster_name: kind-1 96 | # renovate: depName=kubernetes-sigs/kind datasource=github-releases 97 | version: v0.27.0 98 | config: test/kind-config.yaml 99 | - name: kind-1 - Create metallb-system namespace 100 | run: kubectl create namespace metallb-system --dry-run=client -oyaml | kubectl apply -f - 101 | - name: kind-1 - Install allow-all network policies on system namespaces 102 | run: | 103 | kubectl -n kube-system apply -f test/netpol-allow-all.yaml 104 | kubectl -n local-path-storage apply -f test/netpol-allow-all.yaml 105 | kubectl -n metallb-system apply -f test/netpol-allow-all.yaml 106 | - name: kind-1 - Install Cilium with default-deny policy 107 | run: cilium install --set policyEnforcementMode=always 108 | - name: kind-1 - Install MetalLB 109 | env: 110 | # renovate: depName=metallb datasource=helm registryUrl=https://charts.bitnami.com/bitnami 111 | METALLB_CHART_VERSION: 6.4.9 112 | run: | 113 | MANIFESTS_PATH=/tmp/metallb-manifests.yaml 114 | 115 | # Find the first IPv4 subnet of kind, e.g., 172.18.0.0/16, then trim it to remove last octet, e.g., 172.18.0 116 | ADDRESS_RANGE_PREFIX=$(docker network inspect -f json kind | jq -r '.[0].IPAM.Config | map(select(.Subnet | test("^[0-9]+\\."))) | .[0].Subnet | split("/")[0] | split(".")[:3] | join(".")') 117 | export ADDRESS_RANGE=${ADDRESS_RANGE_PREFIX}.240-${ADDRESS_RANGE_PREFIX}.255 118 | 119 | envsubst < test/metallb-manifests.yaml > $MANIFESTS_PATH 120 | 121 | helm repo add bitnami https://charts.bitnami.com/bitnami 122 | helm upgrade metallb \ 123 | --install bitnami/metallb \ 124 | --namespace metallb-system \ 125 | --version ${METALLB_CHART_VERSION} \ 126 | --atomic \ 127 | --set networkPolicy.enabled=true 128 | 129 | helm -n metallb-system get values metallb 130 | 131 | echo "MetalLB rendered manifests:" 132 | cat $MANIFESTS_PATH 133 | 134 | kubectl apply -f $MANIFESTS_PATH 135 | 136 | - name: kind-1 - Wait for all pods in the cluster to be ready 137 | run: | 138 | for i in $(seq 1 10); do 139 | echo "Attempt: $i" 140 | kubectl wait pod --for=condition=Ready --all --all-namespaces && break; 141 | sleep 5; 142 | done 143 | 144 | - name: kind-2 - Create cluster 145 | uses: helm/kind-action@v1.12.0 146 | env: 147 | KUBECONFIG: /home/runner/.kube/kind-2.yaml 148 | with: 149 | cluster_name: kind-2 150 | # renovate: depName=kubernetes-sigs/kind datasource=github-releases 151 | version: v0.27.0 152 | config: test/kind-config.yaml 153 | - name: kind-2 - Install allow-all network policies on system namespaces 154 | env: 155 | KUBECONFIG: /home/runner/.kube/kind-2.yaml 156 | run: | 157 | kubectl -n kube-system apply -f test/netpol-allow-all.yaml 158 | kubectl -n local-path-storage apply -f test/netpol-allow-all.yaml 159 | - name: kind-2 - Install Cilium with default-deny policy 160 | env: 161 | KUBECONFIG: /home/runner/.kube/kind-2.yaml 162 | run: cilium install --set policyEnforcementMode=always 163 | - name: kind-2 - Wait for all pods in the cluster to be ready 164 | env: 165 | KUBECONFIG: /home/runner/.kube/kind-2.yaml 166 | run: | 167 | for i in $(seq 1 10); do 168 | echo "Attempt: $i" 169 | kubectl wait pod --for=condition=Ready --all --all-namespaces && break; 170 | sleep 5; 171 | done 172 | 173 | - name: Run tests 174 | env: 175 | RICHGO_FORCE_COLOR: "1" 176 | PVMIG_TEST_EXTRA_KUBECONFIG: /home/runner/.kube/kind-2.yaml 177 | run: richgo test -tags integration -race -coverpkg=./... -coverprofile=coverage.txt -covermode=atomic -timeout 20m -v ./... 178 | - name: Send coverage 179 | uses: codecov/codecov-action@v5.4.0 180 | with: 181 | files: coverage.txt 182 | token: ${{ secrets.CODECOV_TOKEN }} 183 | -------------------------------------------------------------------------------- /.github/workflows/release-rsync-image.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: release rsync image 3 | 4 | on: 5 | push: 6 | tags: 7 | - docker-rsync-* 8 | 9 | jobs: 10 | release-rsync-image: 11 | runs-on: ubuntu-24.04 12 | steps: 13 | - name: Checkout 14 | uses: actions/checkout@v4.2.2 15 | - name: Set up QEMU 16 | uses: docker/setup-qemu-action@v3.6.0 17 | - name: Set up Docker Buildx 18 | uses: docker/setup-buildx-action@v3.10.0 19 | - name: Login to DockerHub 20 | uses: docker/login-action@v3.4.0 21 | with: 22 | username: utkuozdemir 23 | password: ${{ secrets.DOCKERHUB_TOKEN }} 24 | - name: Login to GitHub Container Registry 25 | uses: docker/login-action@v3.4.0 26 | with: 27 | registry: ghcr.io 28 | username: ${{ github.repository_owner }} 29 | password: ${{ secrets.GITHUB_TOKEN }} 30 | - name: Set image tag as env variable 31 | run: echo "IMAGE_VERSION=$(echo ${GITHUB_REF#refs/*/} | sed 's/^docker-rsync-//')" >> $GITHUB_ENV 32 | - name: Build and push 33 | uses: docker/build-push-action@v6.15.0 34 | with: 35 | context: ./docker/rsync/ 36 | platforms: linux/amd64,linux/arm,linux/arm64 37 | push: true 38 | tags: | 39 | docker.io/utkuozdemir/pv-migrate-rsync:latest 40 | docker.io/utkuozdemir/pv-migrate-rsync:${{ env.IMAGE_VERSION }} 41 | ghcr.io/${{ github.repository_owner }}/pv-migrate-rsync:latest 42 | ghcr.io/${{ github.repository_owner }}/pv-migrate-rsync:${{ env.IMAGE_VERSION }} 43 | -------------------------------------------------------------------------------- /.github/workflows/release-sshd-image.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: release sshd image 3 | 4 | on: 5 | push: 6 | tags: 7 | - docker-sshd-* 8 | 9 | jobs: 10 | release-sshd-image: 11 | runs-on: ubuntu-24.04 12 | steps: 13 | - name: Checkout 14 | uses: actions/checkout@v4.2.2 15 | - name: Set up QEMU 16 | uses: docker/setup-qemu-action@v3.6.0 17 | - name: Set up Docker Buildx 18 | uses: docker/setup-buildx-action@v3.10.0 19 | - name: Login to DockerHub 20 | uses: docker/login-action@v3.4.0 21 | with: 22 | username: utkuozdemir 23 | password: ${{ secrets.DOCKERHUB_TOKEN }} 24 | - name: Login to GitHub Container Registry 25 | uses: docker/login-action@v3.4.0 26 | with: 27 | registry: ghcr.io 28 | username: ${{ github.repository_owner }} 29 | password: ${{ secrets.GITHUB_TOKEN }} 30 | - name: Set image tag as env variable 31 | run: echo "IMAGE_VERSION=$(echo ${GITHUB_REF#refs/*/} | sed 's/^docker-sshd-//')" >> $GITHUB_ENV 32 | - name: Build and push 33 | uses: docker/build-push-action@v6.15.0 34 | with: 35 | context: ./docker/sshd/ 36 | platforms: linux/amd64,linux/arm,linux/arm64 37 | push: true 38 | tags: | 39 | docker.io/utkuozdemir/pv-migrate-sshd:latest 40 | docker.io/utkuozdemir/pv-migrate-sshd:${{ env.IMAGE_VERSION }} 41 | ghcr.io/${{ github.repository_owner }}/pv-migrate-sshd:latest 42 | ghcr.io/${{ github.repository_owner }}/pv-migrate-sshd:${{ env.IMAGE_VERSION }} 43 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: release 3 | 4 | on: 5 | push: 6 | tags: 7 | - 'v*.*.*' 8 | 9 | jobs: 10 | release: 11 | runs-on: ubuntu-24.04 12 | steps: 13 | - name: Checkout 14 | uses: actions/checkout@v4.2.2 15 | with: 16 | # to be able to generate the full changelog: 17 | # https://github.com/goreleaser/goreleaser-action/issues/56#issuecomment-568718162 18 | fetch-depth: 0 19 | - name: Setup Go 20 | uses: actions/setup-go@v5.4.0 21 | with: 22 | go-version-file: go.mod 23 | - name: Login to DockerHub 24 | uses: docker/login-action@v3.4.0 25 | with: 26 | username: utkuozdemir 27 | password: ${{ secrets.DOCKERHUB_TOKEN }} 28 | - name: Login to GitHub Container Registry 29 | uses: docker/login-action@v3.4.0 30 | with: 31 | registry: ghcr.io 32 | username: ${{ github.repository_owner }} 33 | password: ${{ secrets.GITHUB_TOKEN }} 34 | - name: GoReleaser 35 | uses: goreleaser/goreleaser-action@v6.3.0 36 | with: 37 | # renovate: depName=goreleaser/goreleaser datasource=github-releases 38 | version: v2.8.2 39 | args: release --clean 40 | env: 41 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 42 | PRIVATE_ACCESS_TOKEN: ${{ secrets.PRIVATE_ACCESS_TOKEN }} 43 | - name: Update new version in krew-index 44 | uses: rajatjindal/krew-release-bot@v0.0.47 45 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by https://www.toptal.com/developers/gitignore/api/go,macos,terraform,jetbrains+all,visualstudiocode,helm 2 | # Edit at https://www.toptal.com/developers/gitignore?templates=go,macos,terraform,jetbrains+all,visualstudiocode,helm 3 | 4 | ### Go ### 5 | # If you prefer the allow list template instead of the deny list, see community template: 6 | # https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore 7 | # 8 | # Binaries for programs and plugins 9 | *.exe 10 | *.exe~ 11 | *.dll 12 | *.so 13 | *.dylib 14 | 15 | # Test binary, built with `go test -c` 16 | *.test 17 | 18 | # Output of the go coverage tool, specifically when used with LiteIDE 19 | *.out 20 | 21 | # Dependency directories (remove the comment below to include it) 22 | # vendor/ 23 | 24 | # Go workspace file 25 | go.work 26 | 27 | ### Go Patch ### 28 | /vendor/ 29 | /Godeps/ 30 | 31 | ### Helm ### 32 | # Chart dependencies 33 | **/charts/*.tgz 34 | 35 | ### JetBrains+all ### 36 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider 37 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 38 | 39 | # User-specific stuff 40 | .idea/**/workspace.xml 41 | .idea/**/tasks.xml 42 | .idea/**/usage.statistics.xml 43 | .idea/**/dictionaries 44 | .idea/**/shelf 45 | 46 | # AWS User-specific 47 | .idea/**/aws.xml 48 | 49 | # Generated files 50 | .idea/**/contentModel.xml 51 | 52 | # Sensitive or high-churn files 53 | .idea/**/dataSources/ 54 | .idea/**/dataSources.ids 55 | .idea/**/dataSources.local.xml 56 | .idea/**/sqlDataSources.xml 57 | .idea/**/dynamic.xml 58 | .idea/**/uiDesigner.xml 59 | .idea/**/dbnavigator.xml 60 | 61 | # Gradle 62 | .idea/**/gradle.xml 63 | .idea/**/libraries 64 | 65 | # Gradle and Maven with auto-import 66 | # When using Gradle or Maven with auto-import, you should exclude module files, 67 | # since they will be recreated, and may cause churn. Uncomment if using 68 | # auto-import. 69 | # .idea/artifacts 70 | # .idea/compiler.xml 71 | # .idea/jarRepositories.xml 72 | # .idea/modules.xml 73 | # .idea/*.iml 74 | # .idea/modules 75 | # *.iml 76 | # *.ipr 77 | 78 | # CMake 79 | cmake-build-*/ 80 | 81 | # Mongo Explorer plugin 82 | .idea/**/mongoSettings.xml 83 | 84 | # File-based project format 85 | *.iws 86 | 87 | # IntelliJ 88 | out/ 89 | 90 | # mpeltonen/sbt-idea plugin 91 | .idea_modules/ 92 | 93 | # JIRA plugin 94 | atlassian-ide-plugin.xml 95 | 96 | # Cursive Clojure plugin 97 | .idea/replstate.xml 98 | 99 | # SonarLint plugin 100 | .idea/sonarlint/ 101 | 102 | # Crashlytics plugin (for Android Studio and IntelliJ) 103 | com_crashlytics_export_strings.xml 104 | crashlytics.properties 105 | crashlytics-build.properties 106 | fabric.properties 107 | 108 | # Editor-based Rest Client 109 | .idea/httpRequests 110 | 111 | # Android studio 3.1+ serialized cache file 112 | .idea/caches/build_file_checksums.ser 113 | 114 | ### JetBrains+all Patch ### 115 | # Ignores the whole .idea folder and all .iml files 116 | # See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360 117 | 118 | .idea/* 119 | 120 | # Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023 121 | 122 | *.iml 123 | modules.xml 124 | .idea/misc.xml 125 | *.ipr 126 | 127 | # Sonarlint plugin 128 | .idea/sonarlint 129 | 130 | ### macOS ### 131 | # General 132 | .DS_Store 133 | .AppleDouble 134 | .LSOverride 135 | 136 | # Icon must end with two \r 137 | Icon 138 | 139 | 140 | # Thumbnails 141 | ._* 142 | 143 | # Files that might appear in the root of a volume 144 | .DocumentRevisions-V100 145 | .fseventsd 146 | .Spotlight-V100 147 | .TemporaryItems 148 | .Trashes 149 | .VolumeIcon.icns 150 | .com.apple.timemachine.donotpresent 151 | 152 | # Directories potentially created on remote AFP share 153 | .AppleDB 154 | .AppleDesktop 155 | Network Trash Folder 156 | Temporary Items 157 | .apdisk 158 | 159 | ### Terraform ### 160 | # Local .terraform directories 161 | **/.terraform/* 162 | 163 | # .tfstate files 164 | *.tfstate 165 | *.tfstate.* 166 | 167 | # Crash log files 168 | crash.log 169 | crash.*.log 170 | 171 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as 172 | # password, private keys, and other secrets. These should not be part of version 173 | # control as they are data points which are potentially sensitive and subject 174 | # to change depending on the environment. 175 | *.tfvars 176 | 177 | # Ignore override files as they are usually used to override resources locally and so 178 | # are not checked in 179 | override.tf 180 | override.tf.json 181 | *_override.tf 182 | *_override.tf.json 183 | 184 | # Include override files you do wish to add to version control using negated pattern 185 | # !example_override.tf 186 | 187 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 188 | # example: *tfplan* 189 | 190 | # Ignore CLI configuration files 191 | .terraformrc 192 | terraform.rc 193 | 194 | ### VisualStudioCode ### 195 | .vscode/* 196 | !.vscode/settings.json 197 | !.vscode/tasks.json 198 | !.vscode/launch.json 199 | !.vscode/extensions.json 200 | !.vscode/*.code-snippets 201 | 202 | # Local History for Visual Studio Code 203 | .history/ 204 | 205 | # Built Visual Studio Code Extensions 206 | *.vsix 207 | 208 | ### VisualStudioCode Patch ### 209 | # Ignore all local history of files 210 | .history 211 | .ionide 212 | 213 | # Support for Project snippet scope 214 | 215 | # End of https://www.toptal.com/developers/gitignore/api/go,macos,terraform,jetbrains+all,visualstudiocode,helm 216 | 217 | /dist 218 | /profile.cov 219 | /.task 220 | /completions 221 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | linters: 3 | default: all 4 | disable: 5 | - depguard # breaking things: https://github.com/atc0005/go-ci/issues/1024 6 | - err113 # no need to wrap all errors, too much work 7 | - exhaustruct # pointless 8 | - gochecknoglobals # globals are sometimes ok 9 | - testpackage # todo: consider enabling 10 | settings: 11 | goconst: 12 | min-occurrences: 5 13 | exclusions: 14 | generated: lax 15 | presets: 16 | - comments 17 | - std-error-handling 18 | - common-false-positives 19 | - legacy 20 | rules: 21 | - linters: 22 | - goconst 23 | path: (.+)_test\.go 24 | formatters: 25 | enable: 26 | - gci 27 | - gofmt 28 | - gofumpt 29 | - goimports 30 | - golines 31 | settings: 32 | gci: 33 | sections: 34 | - standard 35 | - default 36 | - localmodule 37 | -------------------------------------------------------------------------------- /.goreleaser.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 2 3 | before: 4 | hooks: 5 | - go mod download 6 | - ./scripts/completions.sh 7 | 8 | builds: 9 | - id: pv-migrate 10 | main: ./cmd/pv-migrate 11 | binary: pv-migrate 12 | env: 13 | - CGO_ENABLED=0 14 | goos: 15 | - linux 16 | - darwin 17 | - windows 18 | goarch: 19 | - amd64 20 | - arm 21 | - arm64 22 | goarm: 23 | - "7" 24 | ignore: 25 | - goos: windows 26 | goarch: arm 27 | - goos: windows 28 | goarch: arm64 29 | - goos: darwin 30 | goarch: arm 31 | 32 | archives: 33 | - id: pv-migrate-archive 34 | # default name template except we use .Tag instead of .Version to keep the "v" prefix 35 | name_template: >- 36 | {{ .ProjectName }}_ 37 | {{- .Tag }}_ 38 | {{- .Os }}_ 39 | {{- if eq .Arch "amd64" }}x86_64 40 | {{- else if eq .Arch "386" }}i386 41 | {{- else }}{{ .Arch }}{{ end }} 42 | {{- with .Arm }}v{{ . }}{{ end }} 43 | {{- with .Mips }}_{{ . }}{{ end }} 44 | {{- if not (eq .Amd64 "v1") }}{{ .Amd64 }}{{ end }} 45 | builds: 46 | - pv-migrate 47 | format_overrides: 48 | - goos: windows 49 | formats: 50 | - zip 51 | files: 52 | - LICENSE 53 | - completions/* 54 | 55 | checksum: 56 | name_template: checksums.txt 57 | algorithm: sha256 58 | 59 | changelog: 60 | sort: asc 61 | filters: 62 | exclude: 63 | - '^build(\(.+\))?:' 64 | - '^chore(\(.+\))?:' 65 | - '^cd(\(.+\))?:' 66 | - '^ci(\(.+\))?:' 67 | - '^cicd(\(.+\))?:' 68 | - '^docker(\(.+\))?:' 69 | - '^docs(\(.+\))?:' 70 | - '^refactor(\(.+\))?:' 71 | - '^test(\(.+\))?:' 72 | - Merge pull request 73 | - Merge branch 74 | - go mod tidy 75 | 76 | dockers: 77 | - image_templates: 78 | - docker.io/utkuozdemir/pv-migrate:{{ .Tag }}-amd64 79 | - ghcr.io/utkuozdemir/pv-migrate:{{ .Tag }}-amd64 80 | use: buildx 81 | dockerfile: Dockerfile 82 | build_flag_templates: 83 | - --platform=linux/amd64 84 | - image_templates: 85 | - docker.io/utkuozdemir/pv-migrate:{{ .Tag }}-arm64 86 | - ghcr.io/utkuozdemir/pv-migrate:{{ .Tag }}-arm64 87 | use: buildx 88 | goarch: arm64 89 | dockerfile: Dockerfile 90 | build_flag_templates: 91 | - --platform=linux/arm64 92 | - image_templates: 93 | - docker.io/utkuozdemir/pv-migrate:{{ .Tag }}-armv7 94 | - ghcr.io/utkuozdemir/pv-migrate:{{ .Tag }}-armv7 95 | use: buildx 96 | goarch: arm 97 | goarm: "7" 98 | dockerfile: Dockerfile 99 | build_flag_templates: 100 | - --platform=linux/arm/v7 101 | 102 | docker_manifests: 103 | - name_template: docker.io/utkuozdemir/pv-migrate:{{ .Tag }} 104 | image_templates: 105 | - docker.io/utkuozdemir/pv-migrate:{{ .Tag }}-amd64 106 | - docker.io/utkuozdemir/pv-migrate:{{ .Tag }}-arm64 107 | - docker.io/utkuozdemir/pv-migrate:{{ .Tag }}-armv7 108 | - name_template: ghcr.io/utkuozdemir/pv-migrate:{{ .Tag }} 109 | image_templates: 110 | - ghcr.io/utkuozdemir/pv-migrate:{{ .Tag }}-amd64 111 | - ghcr.io/utkuozdemir/pv-migrate:{{ .Tag }}-arm64 112 | - ghcr.io/utkuozdemir/pv-migrate:{{ .Tag }}-armv7 113 | 114 | # To test docker image push, uncomment the following and run 115 | # goreleaser release --skip-validate --rm-dist --debug 116 | #release: 117 | # disable: true 118 | 119 | brews: 120 | - repository: 121 | owner: utkuozdemir 122 | name: homebrew-pv-migrate 123 | token: "{{ .Env.PRIVATE_ACCESS_TOKEN }}" 124 | commit_author: 125 | name: Utku Ozdemir 126 | email: utkuozdemir@gmail.com 127 | directory: Formula 128 | goarm: "7" 129 | homepage: https://github.com/utkuozdemir/pv-migrate 130 | description: Persistent volume migration plugin for Kubernetes 131 | license: Apache-2.0 132 | test: | 133 | system "#{bin}/pv-migrate -v" 134 | install: |- 135 | bin.install "pv-migrate" 136 | bash_completion.install "completions/pv-migrate.bash" => "pv-migrate" 137 | zsh_completion.install "completions/pv-migrate.zsh" => "_pv-migrate" 138 | fish_completion.install "completions/pv-migrate.fish" 139 | 140 | scoops: 141 | - repository: 142 | owner: utkuozdemir 143 | name: scoop-pv-migrate 144 | token: "{{ .Env.PRIVATE_ACCESS_TOKEN }}" 145 | commit_author: 146 | name: Utku Ozdemir 147 | email: utkuozdemir@gmail.com 148 | commit_msg_template: "Scoop update for {{ .ProjectName }} version {{ .Tag }}" 149 | homepage: https://github.com/utkuozdemir/pv-migrate 150 | description: Persistent volume migration plugin for Kubernetes 151 | license: Apache-2.0 152 | -------------------------------------------------------------------------------- /.krew.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: krew.googlecontainertools.github.com/v1alpha2 2 | kind: Plugin 3 | metadata: 4 | name: pv-migrate 5 | spec: 6 | version: {{ .TagName }} 7 | homepage: https://github.com/utkuozdemir/pv-migrate 8 | platforms: 9 | - selector: 10 | matchLabels: 11 | os: linux 12 | arch: amd64 13 | {{addURIAndSha "https://github.com/utkuozdemir/pv-migrate/releases/download/{{ .TagName }}/pv-migrate_{{ .TagName }}_linux_x86_64.tar.gz" .TagName | indent 6 }} 14 | bin: pv-migrate 15 | - selector: 16 | matchLabels: 17 | os: linux 18 | arch: arm64 19 | {{addURIAndSha "https://github.com/utkuozdemir/pv-migrate/releases/download/{{ .TagName }}/pv-migrate_{{ .TagName }}_linux_arm64.tar.gz" .TagName | indent 6 }} 20 | bin: pv-migrate 21 | - selector: 22 | matchLabels: 23 | os: linux 24 | arch: arm 25 | {{addURIAndSha "https://github.com/utkuozdemir/pv-migrate/releases/download/{{ .TagName }}/pv-migrate_{{ .TagName }}_linux_armv7.tar.gz" .TagName | indent 6 }} 26 | bin: pv-migrate 27 | - selector: 28 | matchLabels: 29 | os: darwin 30 | arch: amd64 31 | {{addURIAndSha "https://github.com/utkuozdemir/pv-migrate/releases/download/{{ .TagName }}/pv-migrate_{{ .TagName }}_darwin_x86_64.tar.gz" .TagName | indent 6 }} 32 | bin: pv-migrate 33 | - selector: 34 | matchLabels: 35 | os: darwin 36 | arch: arm64 37 | {{addURIAndSha "https://github.com/utkuozdemir/pv-migrate/releases/download/{{ .TagName }}/pv-migrate_{{ .TagName }}_darwin_arm64.tar.gz" .TagName | indent 6 }} 38 | bin: pv-migrate 39 | - selector: 40 | matchLabels: 41 | os: windows 42 | arch: amd64 43 | {{addURIAndSha "https://github.com/utkuozdemir/pv-migrate/releases/download/{{ .TagName }}/pv-migrate_{{ .TagName }}_windows_x86_64.zip" .TagName | indent 6 }} 44 | bin: pv-migrate.exe 45 | shortDescription: Migrate data across persistent volumes 46 | description: | 47 | pv-migrate uses ssh and rsync to copy data across persistent volumes 48 | -------------------------------------------------------------------------------- /.renovaterc.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": [ 3 | "config:recommended", 4 | ":automergeBranch", 5 | ":automergeMinor", 6 | ":automergeDigest", 7 | ":semanticCommitTypeAll(chore)" 8 | ], 9 | "postUpdateOptions": [ 10 | "gomodTidy" 11 | ], 12 | "packageRules": [ 13 | { 14 | "matchDatasources": [ 15 | "go" 16 | ], 17 | "groupName": "go packages" 18 | }, 19 | { 20 | "matchDatasources": [ 21 | "docker" 22 | ], 23 | "groupName": "docker images" 24 | }, 25 | { 26 | "extends": [ 27 | ":automergeMajor" 28 | ], 29 | "matchFileNames": [ 30 | ".github/workflows/**" 31 | ], 32 | "groupName": "github actions" 33 | } 34 | ], 35 | "customManagers": [ 36 | { 37 | "customType": "regex", 38 | "fileMatch": [ 39 | "\\.github\\/workflows\\/.*" 40 | ], 41 | "matchStrings": [ 42 | "# renovate: depName=(?[^\\s]+)( datasource=(?[^\\s]+))?( registryUrl=(?\\S+))?\\n[^\\n]*?(?v?\\d+\\.\\d+\\.\\d+(-[\\S]+)?)" 43 | ], 44 | "datasourceTemplate": "{{#if datasource}}{{{datasource}}}{{else}}github-tags{{/if}}", 45 | "versioningTemplate": "semver" 46 | }, 47 | { 48 | "customType": "regex", 49 | "fileMatch": [ 50 | "go.mod", 51 | "\\.github\\/workflows\\/.*" 52 | ], 53 | "matchStrings": [ 54 | "(#|\\/\\/) renovate: go\\n[^\\n]*?(?v?\\d+\\.\\d+(\\.\\d+(-[\\S]+)?)?)" 55 | ], 56 | "depNameTemplate": "golang/go", 57 | "datasourceTemplate": "github-tags", 58 | "extractVersionTemplate": "^go(?\\d+\\.\\d+\\.\\d+)$" 59 | } 60 | ] 61 | } 62 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, religion, or sexual identity 10 | and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at 63 | utkuozdemir@gmail.com. 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series 86 | of actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or 93 | permanent ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within 113 | the community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.0, available at 119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 120 | 121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 122 | enforcement ladder](https://github.com/mozilla/diversity). 123 | 124 | [homepage]: https://www.contributor-covenant.org 125 | 126 | For answers to common questions about this code of conduct, see the FAQ at 127 | https://www.contributor-covenant.org/faq. Translations are available at 128 | https://www.contributor-covenant.org/translations. 129 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | Contributing to the project is simple. Just do the following: 4 | 5 | 1. Be nice :) 6 | 7 | 2. If you are not sure about something (e.g. if something is a bug, on how to solve it, if a feature makes sense etc.), 8 | before starting to work on it, create an issue for it, so that we can discuss beforehand - maybe saving your time. 9 | 10 | 3. Fork the repo, do your changes, create a PR. 11 | 12 | 4. Make sure the build succeeds. Do the changes from after the review if needed. 13 | 14 | That's it. 15 | 16 | ## Creating Releases 17 | 18 | - To make a release of `pv-migrate` itself, run the following command: 19 | ```bash 20 | task release 21 | ``` 22 | 23 | - To make a release of docker images, use their special tag prefixes, followed by the version. Example: 24 | ```bash 25 | git tag -a "docker-sshd-1.0.0" -m "Docker Sshd Image Release 1.0.0" 26 | git tag -a "docker-rsync-1.0.0" -m "Docker Rsync Image Release 1.0.0" 27 | git push 28 | push --tags 29 | ``` 30 | These will result in the Docker images with the following tags being pushed: 31 | ``` 32 | docker.io/utkuozdemir/pv-migrate-sshd:1.0.0 33 | docker.io/utkuozdemir/pv-migrate-rsync:1.0.0 34 | ``` 35 | 36 | ## Editing the helm chart 37 | 38 | The `pv-migrate` helm chart is located at `helm/pv-migrate`. It is inserted into the go code during build. 39 | The source is a helm package located in `migrator/helm-chart.tgz`. 40 | 41 | If you want to tweak the helm chart, you must run the following command before recompiling the code in order 42 | to update the chart (you need [helm](https://helm.sh/docs/intro/install/) and [helm-docs](https://github.com/norwoodj/helm-docs) installed): 43 | 44 | ```bash 45 | task update-chart 46 | ``` 47 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.22.0 2 | COPY pv-migrate /usr/local/bin/pv-migrate 3 | -------------------------------------------------------------------------------- /INSTALL.md: -------------------------------------------------------------------------------- 1 | # Installation 2 | 3 | There are various installation methods for different use cases. 4 | 5 | ## Using Homebrew (macOS/Linux) 6 | If you have homebrew, the installation is as simple as: 7 | ```bash 8 | brew tap utkuozdemir/pv-migrate 9 | brew install pv-migrate 10 | ``` 11 | 12 | ## Using Scoop (Windows) 13 | If you use [Scoop package manager](https://scoop.sh) on Windows, 14 | run the following commands in a command prompt (CMD/Powershell): 15 | ```powershell 16 | scoop bucket add pv-migrate https://github.com/utkuozdemir/scoop-pv-migrate.git 17 | scoop install pv-migrate/pv-migrate 18 | ``` 19 | 20 | ## Using krew 21 | 22 | 1. Install [krew](https://krew.sigs.k8s.io/). 23 | 2. Install pv-migrate plugin: 24 | ```bash 25 | $ kubectl krew update 26 | $ kubectl krew install pv-migrate 27 | ``` 28 | 29 | ## By downloading the binaries (macOS/Linux/Windows) 30 | 31 | 1. Go to the [releases](https://github.com/utkuozdemir/pv-migrate/releases) and download 32 | the latest release archive for your platform. 33 | 2. Extract the archive. 34 | 3. Move the binary to somewhere in your `PATH`. 35 | 36 | Sample steps for macOS: 37 | ```bash 38 | $ VERSION= 39 | $ wget https://github.com/utkuozdemir/pv-migrate/releases/download/${VERSION}/pv-migrate_${VERSION}_darwin_x86_64.tar.gz 40 | $ tar -xvzf pv-migrate_${VERSION}_darwin_x86_64.tar.gz 41 | $ mv pv-migrate /usr/local/bin 42 | $ pv-migrate --help 43 | ``` 44 | 45 | ## Running directly in Docker container 46 | 47 | Alternatively, you can use the 48 | [official Docker images](https://hub.docker.com/repository/docker/utkuozdemir/pv-migrate) 49 | that come with the `pv-migrate` binary pre-installed: 50 | ```bash 51 | docker run --rm -it utkuozdemir/pv-migrate: pv-migrate --source --dest ... 52 | ``` 53 | 54 | ## Installing Shell Completion 55 | 56 | If you install `pv-migrate` using Homebrew, completions for bash, 57 | zsh and fish will be installed for you - you don't need to do anything further. 58 | 59 | Completions are not supported when `pv-migrate` is installed using krew - see [here](https://github.com/kubernetes-sigs/krew/issues/543). 60 | 61 | If you have installed `pv-migrate` by directly downloading the binaries, 62 | run `pv-migrate completion --help` and follow the instructions. 63 | Here's the sample output of instructions: 64 | 65 | ``` 66 | To load completions: 67 | 68 | Bash: 69 | 70 | $ source <(pv-migrate completion bash) 71 | 72 | # To load completions for each session, execute once: 73 | # Linux: 74 | $ pv-migrate completion bash > /etc/bash_completion.d/pv-migrate 75 | # macOS: 76 | $ pv-migrate completion bash > /usr/local/etc/bash_completion.d/pv-migrate 77 | 78 | Zsh: 79 | 80 | # If shell completion is not already enabled in your environment, 81 | # you will need to enable it. You can execute the following once: 82 | 83 | $ echo "autoload -U compinit; compinit" >> ~/.zshrc 84 | 85 | # To load completions for each session, execute once: 86 | $ pv-migrate completion zsh > "${fpath[1]}/_pv-migrate" 87 | 88 | # You will need to start a new shell for this setup to take effect. 89 | 90 | fish: 91 | 92 | $ pv-migrate completion fish | source 93 | 94 | # To load completions for each session, execute once: 95 | $ pv-migrate completion fish > ~/.config/fish/completions/pv-migrate.fish 96 | 97 | PowerShell: 98 | 99 | PS> pv-migrate completion powershell | Out-String | Invoke-Expression 100 | 101 | # To load completions for every new session, run: 102 | PS> pv-migrate completion powershell > pv-migrate.ps1 103 | # and source this file from your PowerShell profile. 104 | 105 | Usage: 106 | pv-migrate completion [bash|zsh|fish|powershell] 107 | 108 | Flags: 109 | -h, --help help for completion 110 | ``` 111 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # pv-migrate 2 | 3 | [![build](https://github.com/utkuozdemir/pv-migrate/actions/workflows/build.yml/badge.svg)](https://github.com/utkuozdemir/pv-migrate/actions/workflows/build.yml) 4 | [![codecov](https://codecov.io/gh/utkuozdemir/pv-migrate/branch/master/graph/badge.svg?token=41ULBTVG7X)](https://codecov.io/gh/utkuozdemir/pv-migrate) 5 | [![Go Report Card](https://goreportcard.com/badge/github.com/utkuozdemir/pv-migrate)](https://goreportcard.com/report/github.com/utkuozdemir/pv-migrate) 6 | ![Latest GitHub release](https://img.shields.io/github/release/utkuozdemir/pv-migrate.svg) 7 | [![GitHub license](https://img.shields.io/github/license/utkuozdemir/pv-migrate)](https://github.com/utkuozdemir/pv-migrate/blob/master/LICENSE) 8 | ![GitHub stars](https://img.shields.io/github/stars/utkuozdemir/pv-migrate.svg?label=github%20stars) 9 | [![GitHub forks](https://img.shields.io/github/forks/utkuozdemir/pv-migrate)](https://github.com/utkuozdemir/pv-migrate/network) 10 | [![GitHub issues](https://img.shields.io/github/issues/utkuozdemir/pv-migrate)](https://github.com/utkuozdemir/pv-migrate/issues) 11 | ![GitHub all releases](https://img.shields.io/github/downloads/utkuozdemir/pv-migrate/total) 12 | ![Docker Pulls](https://img.shields.io/docker/pulls/utkuozdemir/pv-migrate) 13 | ![SSHD Docker Pulls](https://img.shields.io/docker/pulls/utkuozdemir/pv-migrate-sshd?label=sshd%20-%20docker%20pulls) 14 | ![Rsync Docker Pulls](https://img.shields.io/docker/pulls/utkuozdemir/pv-migrate-rsync?label=rsync%20-%20docker%20pulls) 15 | 16 | `pv-migrate` is a CLI tool/kubectl plugin to easily migrate 17 | the contents of one Kubernetes `PersistentVolumeClaim` to another. 18 | 19 | --- 20 | 21 | > [!WARNING] 22 | > I get that it can be frustrating not to hear back about the stuff you've brought up or the changes you've suggested. But honestly, for over a year now, I've hardly had any time to keep up with my personal open-source projects, including this one. I am still committed to keep this tool working and slowly move it forward, but please bear with me if I can't tackle your fixes or check out your code for a while. Thanks for your understanding. 23 | 24 | --- 25 | 26 | ## Demo 27 | 28 | ![pv-migrate demo GIF](img/demo.gif) 29 | 30 | ## Introduction 31 | 32 | On Kubernetes, if you need to rename a resource (like a `Deployment`) or to move it to a different namespace, 33 | you can simply create a copy of its manifest with the new namespace and/or name and apply it. 34 | 35 | However, it is not as simple with `PersistentVolumeClaim` resources: They are not only metadata, 36 | but they also store data in the underlying storage backend. 37 | 38 | In these cases, moving the data stored in the PVC can become a problem, making migrations more difficult. 39 | 40 | ## Use Cases 41 | 42 | :arrow_right: You have a database that has a PersistentVolumeClaim `db-data` of size `50Gi`. 43 | Your DB grew over time, and you need more space for it. 44 | You cannot resize the PVC because it doesn't support [volume expansion](https://kubernetes.io/blog/2018/07/12/resizing-persistent-volumes-using-kubernetes/). 45 | Simply create a new, bigger PVC `db-data-v2` and use `pv-migrate` to copy data from `db-data` to `db-data-v2`. 46 | 47 | 48 | :arrow_right: You need to move PersistentVolumeClaim `my-pvc` from namespace `ns-a` to namespace `ns-b`. 49 | Simply create the PVC with the same name and manifest in `ns-b` and use `pv-migrate` to copy its content. 50 | 51 | 52 | :arrow_right: You are moving from one cloud provider to another, 53 | and you need to move the data from one Kubernetes cluster to the other. 54 | Just use `pv-migrate` to copy the data **securely over the internet**. 55 | 56 | :arrow_right: You need to change the `StorageClass` of a volume, for instance, 57 | from a `ReadWriteOnce` one like `local-path`) to a `ReadWriteMany` like NFS. 58 | As the `storageClass` is not editable, you can use `pv-migrate` to transfer 59 | the data from the old PVC to the new one with the desired StorageClass. 60 | 61 | ## Highlights 62 | 63 | - Supports in-namespace, in-cluster as well as cross-cluster migrations 64 | - Uses rsync over SSH with a freshly generated [Ed25519](https://en.wikipedia.org/wiki/EdDSA) 65 | or RSA keys each time to securely migrate the files 66 | - Allows full customization of the manifests (e.g. specifying your own docker images for rsync and sshd, configuring affinity etc.) 67 | - Supports multiple migration strategies to do the migration efficiently and fallback to other strategies when needed 68 | - Customizable strategy order 69 | - Supports arm32v7 (Raspberry Pi etc.) and arm64 architectures as well as amd64 70 | - Supports completion for popular shells: bash, zsh, fish, powershell 71 | 72 | ## Installation 73 | 74 | See [INSTALL.md](INSTALL.md) for various installation methods and shell completion configuration. 75 | 76 | ## Usage 77 | 78 | See [USAGE.md](USAGE.md) for the CLI reference and examples. 79 | 80 | 81 | # Star History 82 | 83 | 84 | 85 | 86 | 87 | Star History Chart 88 | 89 | 90 | 91 | # Contributing 92 | 93 | See [CONTRIBUTING](CONTRIBUTING.md) for details. 94 | -------------------------------------------------------------------------------- /Taskfile.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | tasks: 4 | fmt: 5 | desc: format code 6 | vars: 7 | MODULE: 8 | sh: head -n 1 go.mod | cut -d' ' -f2 9 | cmds: 10 | - go mod tidy 11 | - golangci-lint run --fix ./... 12 | - shfmt -l -i 2 -ci -sr -w . 13 | 14 | lint: 15 | desc: lint code 16 | cmds: 17 | - go mod tidy --diff 18 | - golangci-lint run ./... 19 | - helm lint helm/pv-migrate 20 | - shfmt -l -i 2 -ci -sr . 21 | 22 | clean: 23 | desc: clean 24 | cmds: 25 | - rm -rf {{.ROOT_DIR}}/dist/ 26 | 27 | update-usage: 28 | desc: update usage 29 | env: 30 | USAGE: 31 | sh: go run ./... --help 32 | cmd: >- 33 | docker run -v {{.ROOT_DIR}}:/project -e USAGE 34 | hairyhenderson/gomplate:stable 35 | --file /project/USAGE.md.gotmpl --out /project/USAGE.md 36 | 37 | update-helm-chart-docs: 38 | desc: update helm chart docs 39 | dir: helm/pv-migrate 40 | cmds: 41 | - helm-docs 42 | 43 | build: 44 | desc: build 45 | cmds: 46 | - goreleaser build --snapshot --rm-dist --single-target 47 | 48 | release: 49 | desc: release 50 | vars: 51 | NUM_LAST_TAGS: 3 52 | LAST_TAGS: 53 | sh: git tag --sort=-version:refname | head -n {{.NUM_LAST_TAGS}} | xargs echo 54 | NEXT_TAG: 55 | sh: svu next 56 | cmds: 57 | - "echo Last {{.NUM_LAST_TAGS}} tags: {{.LAST_TAGS}}" 58 | - "echo Next tag: {{.NEXT_TAG}}" 59 | - git tag -a {{.NEXT_TAG}} -m "Release {{.NEXT_TAG}}" 60 | - git push origin {{.NEXT_TAG}} 61 | -------------------------------------------------------------------------------- /USAGE.md: -------------------------------------------------------------------------------- 1 | # Usage 2 | 3 | Root command: 4 | 5 | ``` 6 | Migrate data from one Kubernetes PersistentVolumeClaim to another 7 | 8 | Usage: 9 | pv-migrate [--source-namespace=] --source= [--dest-namespace=] --dest= [flags] 10 | pv-migrate [command] 11 | 12 | Available Commands: 13 | completion Generate completion script 14 | help Help about any command 15 | 16 | Flags: 17 | --compress compress data during migration ('-z' flag of rsync) (default true) 18 | --dest string destination PVC name 19 | -C, --dest-context string context in the kubeconfig file of the destination PVC 20 | -d, --dest-delete-extraneous-files delete extraneous files on the destination by using rsync's '--delete' flag 21 | -H, --dest-host-override string the override for the rsync host destination when it is run over SSH, in cases when you need to target a different destination IP on rsync for some reason. By default, it is determined by used strategy and differs across strategies. Has no effect for mnt2 and local strategies 22 | -K, --dest-kubeconfig string path of the kubeconfig file of the destination PVC 23 | -N, --dest-namespace string namespace of the destination PVC 24 | -P, --dest-path string the filesystem path to migrate in the destination PVC (default "/") 25 | --helm-set strings set additional Helm values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) 26 | --helm-set-file strings set additional Helm values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2) 27 | --helm-set-string strings set additional Helm STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) 28 | -t, --helm-timeout duration install/uninstall timeout for helm releases (default 1m0s) 29 | -f, --helm-values strings set additional Helm values by a YAML file or a URL (can specify multiple) 30 | -h, --help help for pv-migrate 31 | -i, --ignore-mounted do not fail if the source or destination PVC is mounted 32 | --lbsvc-timeout duration timeout for the load balancer service to receive an external IP. Only used by the lbsvc strategy (default 2m0s) 33 | --log-format string log format, must be one of: text, json (default "text") 34 | --log-level string log level, must be one of "DEBUG, INFO, WARN, ERROR" or an slog-parseable level: https://pkg.go.dev/log/slog#Level.UnmarshalText (default "INFO") 35 | -o, --no-chown omit chown on rsync 36 | -b, --no-progress-bar do not display a progress bar 37 | -x, --skip-cleanup skip cleanup of the migration 38 | --source string source PVC name 39 | -c, --source-context string context in the kubeconfig file of the source PVC 40 | -k, --source-kubeconfig string path of the kubeconfig file of the source PVC 41 | -R, --source-mount-read-only mount the source PVC in ReadOnly mode (default true) 42 | -n, --source-namespace string namespace of the source PVC 43 | -p, --source-path string the filesystem path to migrate in the source PVC (default "/") 44 | -a, --ssh-key-algorithm string ssh key algorithm to be used. Valid values are rsa,ed25519 (default "ed25519") 45 | -s, --strategies strings the comma-separated list of strategies to be used in the given order (default [mnt2,svc,lbsvc]) 46 | -v, --version version for pv-migrate 47 | 48 | Use "pv-migrate [command] --help" for more information about a command. 49 | ``` 50 | 51 | The Kubernetes resources created by pv-migrate are sourced from a [Helm chart](helm/pv-migrate). 52 | 53 | You can pass raw values to the backing Helm chart 54 | using the `--helm-*` flags for further customization: container images, 55 | resources, serviceacccounts, additional annotations etc. 56 | 57 | ## Strategies 58 | 59 | `pv-migrate` has multiple strategies implemented to carry out the migration operation. Those are the following: 60 | 61 | | Name | Description | 62 | |---------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| 63 | | `mnt2` | **Mount both** - Mounts both PVCs in a single pod and runs a regular rsync, without using SSH or the network. Only applicable if source and destination PVCs are in the same namespace and both can be mounted from a single pod. | 64 | | `svc` | **Service** - Runs rsync+ssh over a Kubernetes Service (`ClusterIP`). Only applicable when source and destination PVCs are in the same Kubernetes cluster. | 65 | | `lbsvc` | **Load Balancer Service** - Runs rsync+ssh over a Kubernetes Service of type `LoadBalancer`. Always applicable (will fail if `LoadBalancer` IP is not assigned for a long period). | 66 | | `local` | **Local Transfer** - Runs sshd on both source and destination, then uses a combination of `kubectl port-forward` logic and an SSH reverse proxy to tunnel all the traffic over the client device (the device which runs pv-migrate, e.g. your laptop). Requires `ssh` command to be available on the client device.

Note that this strategy is **experimental** (and not enabled by default), potentially can put heavy load on both apiservers and is not as resilient as others. It is recommended for small amounts of data and/or when the only access to both clusters seems to be through `kubectl` (e.g. for air-gapped clusters, on jump hosts etc.). | 67 | 68 | ## Examples 69 | 70 | See the various examples below which copy the contents of the `old-pvc` into the `new-pvc`. 71 | 72 | ### Example 1: In a single namespace (minimal example) 73 | 74 | ```bash 75 | $ pv-migrate --source old-pvc --dest new-pvc 76 | ``` 77 | 78 | ### Example 2: Between namespaces 79 | 80 | ```bash 81 | $ pv-migrate \ 82 | --source-namespace source-ns --source old-pvc \ 83 | --dest-namespace dest-ns --dest new-pvc 84 | ``` 85 | 86 | ### Example 3: Between different clusters 87 | 88 | ```bash 89 | pv-migrate \ 90 | --source-kubeconfig /path/to/source/kubeconfig \ 91 | --source-context some-context \ 92 | --source-namespace source-ns \ 93 | --source old-pvc \ 94 | --dest-kubeconfig /path/to/dest/kubeconfig \ 95 | --dest-context some-other-context \ 96 | --dest-namespace dest-ns \ 97 | --dest-delete-extraneous-files \ 98 | --dest new-pvc 99 | ``` 100 | 101 | ### Example 4: Using custom container images from custom repository 102 | 103 | ```bash 104 | $ pv-migrate \ 105 | --helm-set rsync.image.repository=mycustomrepo/rsync \ 106 | --helm-set rsync.image.tag=v1.2.3 \ 107 | --helm-set sshd.image.repository=mycustomrepo/sshd \ 108 | --helm-set sshd.image.tag=v1.2.3 \ 109 | --source old-pvc \ 110 | --dest new-pvc 111 | ``` 112 | 113 | ### Example 5: Enabling network policies (on clusters with deny-all traffic rules) 114 | 115 | ```bash 116 | $ pv-migrate \ 117 | --helm-set sshd.networkPolicy.enabled=true \ 118 | --helm-set rsync.networkPolicy.enabled=true \ 119 | --source-namespace source-ns --source old-pvc \ 120 | --dest-namespace dest-ns --dest new-pvc 121 | ``` 122 | 123 | ### Example 6: Passing additional rsync arguments 124 | 125 | ```bash 126 | $ pv-migrate \ 127 | --helm-set rsync.extraArgs="--partial --inplace" \ 128 | --source old-pvc --dest new-pvc 129 | ``` 130 | 131 | **For further customization on the rendered manifests** 132 | (custom labels, annotations, etc.), see the [Helm chart values](helm/pv-migrate). 133 | -------------------------------------------------------------------------------- /USAGE.md.gotmpl: -------------------------------------------------------------------------------- 1 | # Usage 2 | 3 | Root command: 4 | 5 | ``` 6 | {{ .Env.USAGE }} 7 | ``` 8 | 9 | The Kubernetes resources created by pv-migrate are sourced from a [Helm chart](helm/pv-migrate). 10 | 11 | You can pass raw values to the backing Helm chart 12 | using the `--helm-*` flags for further customization: container images, 13 | resources, serviceacccounts, additional annotations etc. 14 | 15 | ## Strategies 16 | 17 | `pv-migrate` has multiple strategies implemented to carry out the migration operation. Those are the following: 18 | 19 | | Name | Description | 20 | |---------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| 21 | | `mnt2` | **Mount both** - Mounts both PVCs in a single pod and runs a regular rsync, without using SSH or the network. Only applicable if source and destination PVCs are in the same namespace and both can be mounted from a single pod. | 22 | | `svc` | **Service** - Runs rsync+ssh over a Kubernetes Service (`ClusterIP`). Only applicable when source and destination PVCs are in the same Kubernetes cluster. | 23 | | `lbsvc` | **Load Balancer Service** - Runs rsync+ssh over a Kubernetes Service of type `LoadBalancer`. Always applicable (will fail if `LoadBalancer` IP is not assigned for a long period). | 24 | | `local` | **Local Transfer** - Runs sshd on both source and destination, then uses a combination of `kubectl port-forward` logic and an SSH reverse proxy to tunnel all the traffic over the client device (the device which runs pv-migrate, e.g. your laptop). Requires `ssh` command to be available on the client device.

Note that this strategy is **experimental** (and not enabled by default), potentially can put heavy load on both apiservers and is not as resilient as others. It is recommended for small amounts of data and/or when the only access to both clusters seems to be through `kubectl` (e.g. for air-gapped clusters, on jump hosts etc.). | 25 | 26 | ## Examples 27 | 28 | See the various examples below which copy the contents of the `old-pvc` into the `new-pvc`. 29 | 30 | ### Example 1: In a single namespace (minimal example) 31 | 32 | ```bash 33 | $ pv-migrate --source old-pvc --dest new-pvc 34 | ``` 35 | 36 | ### Example 2: Between namespaces 37 | 38 | ```bash 39 | $ pv-migrate \ 40 | --source-namespace source-ns --source old-pvc \ 41 | --dest-namespace dest-ns --dest new-pvc 42 | ``` 43 | 44 | ### Example 3: Between different clusters 45 | 46 | ```bash 47 | pv-migrate \ 48 | --source-kubeconfig /path/to/source/kubeconfig \ 49 | --source-context some-context \ 50 | --source-namespace source-ns \ 51 | --source old-pvc \ 52 | --dest-kubeconfig /path/to/dest/kubeconfig \ 53 | --dest-context some-other-context \ 54 | --dest-namespace dest-ns \ 55 | --dest-delete-extraneous-files \ 56 | --dest new-pvc 57 | ``` 58 | 59 | ### Example 4: Using custom container images from custom repository 60 | 61 | ```bash 62 | $ pv-migrate \ 63 | --helm-set rsync.image.repository=mycustomrepo/rsync \ 64 | --helm-set rsync.image.tag=v1.2.3 \ 65 | --helm-set sshd.image.repository=mycustomrepo/sshd \ 66 | --helm-set sshd.image.tag=v1.2.3 \ 67 | --source old-pvc \ 68 | --dest new-pvc 69 | ``` 70 | 71 | ### Example 5: Enabling network policies (on clusters with deny-all traffic rules) 72 | 73 | ```bash 74 | $ pv-migrate \ 75 | --helm-set sshd.networkPolicy.enabled=true \ 76 | --helm-set rsync.networkPolicy.enabled=true \ 77 | --source-namespace source-ns --source old-pvc \ 78 | --dest-namespace dest-ns --dest new-pvc 79 | ``` 80 | 81 | ### Example 6: Passing additional rsync arguments 82 | 83 | ```bash 84 | $ pv-migrate \ 85 | --helm-set rsync.extraArgs="--partial --inplace" \ 86 | --source old-pvc --dest new-pvc 87 | ``` 88 | 89 | **For further customization on the rendered manifests** 90 | (custom labels, annotations, etc.), see the [Helm chart values](helm/pv-migrate). 91 | -------------------------------------------------------------------------------- /app/app.go: -------------------------------------------------------------------------------- 1 | package app 2 | 3 | const appName = "pv-migrate" 4 | -------------------------------------------------------------------------------- /app/completion.go: -------------------------------------------------------------------------------- 1 | package app 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "strings" 8 | 9 | "github.com/spf13/cobra" 10 | 11 | "github.com/utkuozdemir/pv-migrate/k8s" 12 | ) 13 | 14 | var completionCmdlongDesc = fmt.Sprintf(`To load completions: 15 | 16 | Bash: 17 | 18 | $ source <(%[1]s completion bash) 19 | 20 | # To load completions for each session, execute once: 21 | # Linux: 22 | $ %[1]s completion bash > /etc/bash_completion.d/%[1]s 23 | # macOS: 24 | $ %[1]s completion bash > /usr/local/etc/bash_completion.d/%[1]s 25 | 26 | Zsh: 27 | 28 | # If shell completion is not already enabled in your environment, 29 | # you will need to enable it. You can execute the following once: 30 | 31 | $ echo "autoload -U compinit; compinit" >> ~/.zshrc 32 | 33 | # To load completions for each session, execute once: 34 | $ %[1]s completion zsh > "${fpath[1]}/_%[1]s" 35 | 36 | # You will need to start a new shell for this setup to take effect. 37 | 38 | fish: 39 | 40 | $ %[1]s completion fish | source 41 | 42 | # To load completions for each session, execute once: 43 | $ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish 44 | 45 | PowerShell: 46 | 47 | PS> %[1]s completion powershell | Out-String | Invoke-Expression 48 | 49 | # To load completions for every new session, run: 50 | PS> %[1]s completion powershell > %[1]s.ps1 51 | # and source this file from your PowerShell profile. 52 | `, appName) 53 | 54 | func buildCompletionCmd() *cobra.Command { 55 | return &cobra.Command{ 56 | Use: "completion [bash|zsh|fish|powershell]", 57 | Short: "Generate completion script", 58 | Long: completionCmdlongDesc, 59 | DisableFlagsInUseLine: true, 60 | ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, 61 | Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs), 62 | RunE: func(cmd *cobra.Command, args []string) error { 63 | var err error 64 | 65 | switch args[0] { 66 | case "bash": 67 | err = cmd.Root().GenBashCompletion(os.Stdout) 68 | case "zsh": 69 | err = cmd.Root().GenZshCompletion(os.Stdout) 70 | case "fish": 71 | err = cmd.Root().GenFishCompletion(os.Stdout, true) 72 | case "powershell": 73 | err = cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout) 74 | } 75 | 76 | if err != nil { 77 | return fmt.Errorf("failed to generate completion script: %w", err) 78 | } 79 | 80 | return nil 81 | }, 82 | } 83 | } 84 | 85 | func buildKubeContextCompletionFunc(kubeconfigFlag string) func(*cobra.Command, 86 | []string, string) ([]string, cobra.ShellCompDirective) { 87 | return func(cmd *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { 88 | logger, _, err := buildLogger(cmd.Flags()) 89 | if err != nil { 90 | return nil, cobra.ShellCompDirectiveError 91 | } 92 | 93 | srcKubeconfig, _ := cmd.Flags().GetString(kubeconfigFlag) 94 | 95 | contexts, err := k8s.GetContexts(srcKubeconfig, logger) 96 | if err != nil { 97 | logger.Debug("failed to get contexts", "error", err) 98 | 99 | return nil, cobra.ShellCompDirectiveError 100 | } 101 | 102 | return contexts, cobra.ShellCompDirectiveDefault 103 | } 104 | } 105 | 106 | func buildKubeNSCompletionFunc(ctx context.Context, kubeconfigFlag string, 107 | contextFlag string, 108 | ) func(*cobra.Command, []string, string) ([]string, cobra.ShellCompDirective) { 109 | return func(cmd *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { 110 | logger, _, err := buildLogger(cmd.Flags()) 111 | if err != nil { 112 | return nil, cobra.ShellCompDirectiveError 113 | } 114 | 115 | srcKubeconfig, _ := cmd.Flags().GetString(kubeconfigFlag) 116 | srcContext, _ := cmd.Flags().GetString(contextFlag) 117 | 118 | contexts, err := k8s.GetNamespaces(ctx, srcKubeconfig, srcContext, logger) 119 | if err != nil { 120 | logger.Debug("failed to get namespaces", "error", err) 121 | 122 | return nil, cobra.ShellCompDirectiveError 123 | } 124 | 125 | return contexts, cobra.ShellCompDirectiveDefault 126 | } 127 | } 128 | 129 | func buildStaticSliceCompletionFunc(values []string) func(*cobra.Command, 130 | []string, string) ([]string, cobra.ShellCompDirective) { 131 | return func(*cobra.Command, []string, string) ([]string, cobra.ShellCompDirective) { 132 | return values, cobra.ShellCompDirectiveNoFileComp 133 | } 134 | } 135 | 136 | func buildSliceCompletionFunc(values []string) func(*cobra.Command, 137 | []string, string) ([]string, cobra.ShellCompDirective) { 138 | return func(_ *cobra.Command, _ []string, toComplete string) ([]string, cobra.ShellCompDirective) { 139 | remaining := make(map[string]struct{}, len(values)) 140 | for _, value := range values { 141 | remaining[value] = struct{}{} 142 | } 143 | 144 | var provided []string 145 | 146 | split := strings.Split(toComplete, ",") 147 | for _, s := range split { 148 | val := strings.TrimSpace(s) 149 | provided = append(provided, val) 150 | delete(remaining, val) 151 | } 152 | 153 | var suggestions []string 154 | 155 | lastPart := provided[len(provided)-1] 156 | for value := range remaining { 157 | if !strings.HasPrefix(value, lastPart) { 158 | continue 159 | } 160 | 161 | suffix := strings.TrimPrefix(value, lastPart) 162 | suggestions = append(suggestions, toComplete+suffix) 163 | } 164 | 165 | directive := cobra.ShellCompDirectiveNoFileComp 166 | if len(suggestions) > 1 { 167 | directive = cobra.ShellCompDirectiveNoFileComp | cobra.ShellCompDirectiveNoSpace 168 | } 169 | 170 | return suggestions, directive 171 | } 172 | } 173 | 174 | func buildLegacyPVCsCompletionFunc(ctx context.Context) func(*cobra.Command, 175 | []string, string) ([]string, cobra.ShellCompDirective) { 176 | return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { 177 | if len(args) >= 2 { //nolint:mnd 178 | return nil, cobra.ShellCompDirectiveNoFileComp 179 | } 180 | 181 | isDestPVC := len(args) == 1 182 | 183 | return buildPVCCompletionFunc(ctx, isDestPVC)(cmd, args, toComplete) 184 | } 185 | } 186 | 187 | func buildPVCCompletionFunc(ctx context.Context, 188 | isDestPVC bool, 189 | ) func(*cobra.Command, []string, string) ([]string, cobra.ShellCompDirective) { 190 | return func(cmd *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { 191 | logger, _, err := buildLogger(cmd.Flags()) 192 | if err != nil { 193 | return nil, cobra.ShellCompDirectiveError 194 | } 195 | 196 | kubeconfig, _ := cmd.Flags().GetString(FlagSourceKubeconfig) 197 | useContext, _ := cmd.Flags().GetString(FlagSourceContext) 198 | namespace, _ := cmd.Flags().GetString(FlagSourceNamespace) 199 | 200 | if isDestPVC { 201 | kubeconfig, _ = cmd.Flags().GetString(FlagDestKubeconfig) 202 | useContext, _ = cmd.Flags().GetString(FlagDestContext) 203 | namespace, _ = cmd.Flags().GetString(FlagDestNamespace) 204 | } 205 | 206 | pvcs, err := k8s.GetPVCs(ctx, kubeconfig, useContext, namespace, logger) 207 | if err != nil { 208 | logger.Debug("failed to get PVCs", "error", err) 209 | 210 | return nil, cobra.ShellCompDirectiveError 211 | } 212 | 213 | return pvcs, cobra.ShellCompDirectiveNoFileComp 214 | } 215 | } 216 | -------------------------------------------------------------------------------- /cmd/pv-migrate/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "log/slog" 6 | "os" 7 | 8 | // load all auth plugins - needed for gcp, azure etc. 9 | _ "k8s.io/client-go/plugin/pkg/client/auth" 10 | 11 | "github.com/utkuozdemir/pv-migrate/app" 12 | ) 13 | 14 | var ( 15 | // will be overridden by goreleaser: https://goreleaser.com/cookbooks/using-main.version/ 16 | version = "dev" 17 | commit = "none" 18 | date = "unknown" 19 | ) 20 | 21 | func main() { 22 | if exitCode := run(); exitCode != 0 { 23 | os.Exit(exitCode) 24 | } 25 | } 26 | 27 | func run() int { 28 | ctx, cancel := context.WithCancel(context.Background()) 29 | defer cancel() 30 | 31 | rootCmd := app.BuildMigrateCmd(ctx, version, commit, date, false) 32 | 33 | if err := rootCmd.ExecuteContext(ctx); err != nil { 34 | slog.Default().Error("❌ Failed to run", "error", err.Error()) 35 | 36 | return 1 37 | } 38 | 39 | return 0 40 | } 41 | -------------------------------------------------------------------------------- /docker/rsync/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.22.0 2 | 3 | RUN apk add --no-cache rsync openssh 4 | -------------------------------------------------------------------------------- /docker/sshd/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.22.0 2 | 3 | # we unlock the root user for sshd 4 | # https://github.com/alpinelinux/docker-alpine/issues/28#issuecomment-510510532 5 | # https://github.com/alpinelinux/docker-alpine/issues/28#issuecomment-659551571 6 | RUN apk add --no-cache rsync openssh openssh-server-pam tini && \ 7 | ssh-keygen -A && \ 8 | sed -i -e 's/^root:!:/root:*:/' /etc/shadow 9 | 10 | COPY sshd_config /etc/ssh/sshd_config 11 | 12 | EXPOSE 22 13 | 14 | ENTRYPOINT ["tini", "--"] 15 | CMD ["/usr/sbin/sshd", "-D", "-e", "-f", "/etc/ssh/sshd_config"] 16 | -------------------------------------------------------------------------------- /docker/sshd/sshd_config: -------------------------------------------------------------------------------- 1 | Port 22 2 | PasswordAuthentication no 3 | ChallengeResponseAuthentication no 4 | PermitRootLogin yes 5 | ClientAliveInterval 300 6 | ClientAliveCountMax 3 7 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/utkuozdemir/pv-migrate 2 | 3 | // renovate: go 4 | go 1.24.3 5 | 6 | require ( 7 | github.com/hashicorp/go-multierror v1.1.1 8 | github.com/lmittmann/tint v1.1.1 9 | github.com/mattn/go-isatty v0.0.20 10 | github.com/neilotoole/slogt v1.1.0 11 | github.com/schollz/progressbar/v3 v3.18.0 12 | github.com/spf13/cobra v1.9.1 13 | github.com/spf13/pflag v1.0.6 14 | github.com/stretchr/testify v1.10.0 15 | golang.org/x/crypto v0.38.0 16 | golang.org/x/sync v0.14.0 17 | gopkg.in/yaml.v3 v3.0.1 18 | helm.sh/helm/v3 v3.18.2 19 | k8s.io/api v0.33.1 20 | k8s.io/apimachinery v0.33.1 21 | k8s.io/cli-runtime v0.33.1 22 | k8s.io/client-go v0.33.1 23 | k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979 24 | ) 25 | 26 | require ( 27 | dario.cat/mergo v1.0.1 // indirect 28 | github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect 29 | github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect 30 | github.com/BurntSushi/toml v1.5.0 // indirect 31 | github.com/MakeNowJust/heredoc v1.0.0 // indirect 32 | github.com/Masterminds/goutils v1.1.1 // indirect 33 | github.com/Masterminds/semver/v3 v3.3.1 // indirect 34 | github.com/Masterminds/sprig/v3 v3.3.0 // indirect 35 | github.com/Masterminds/squirrel v1.5.4 // indirect 36 | github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect 37 | github.com/blang/semver/v4 v4.0.0 // indirect 38 | github.com/chai2010/gettext-go v1.0.3 // indirect 39 | github.com/containerd/containerd v1.7.27 // indirect 40 | github.com/containerd/errdefs v1.0.0 // indirect 41 | github.com/containerd/log v0.1.0 // indirect 42 | github.com/containerd/platforms v0.2.1 // indirect 43 | github.com/cyphar/filepath-securejoin v0.4.1 // indirect 44 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 45 | github.com/docker/docker-credential-helpers v0.9.3 // indirect 46 | github.com/emicklei/go-restful/v3 v3.12.2 // indirect 47 | github.com/evanphx/json-patch v5.9.11+incompatible // indirect 48 | github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect 49 | github.com/fatih/color v1.18.0 // indirect 50 | github.com/fxamacker/cbor/v2 v2.7.0 // indirect 51 | github.com/go-errors/errors v1.5.1 // indirect 52 | github.com/go-gorp/gorp/v3 v3.1.0 // indirect 53 | github.com/go-logr/logr v1.4.2 // indirect 54 | github.com/go-openapi/jsonpointer v0.21.1 // indirect 55 | github.com/go-openapi/jsonreference v0.21.0 // indirect 56 | github.com/go-openapi/swag v0.23.1 // indirect 57 | github.com/gobwas/glob v0.2.3 // indirect 58 | github.com/gogo/protobuf v1.3.2 // indirect 59 | github.com/google/btree v1.1.3 // indirect 60 | github.com/google/gnostic-models v0.6.9 // indirect 61 | github.com/google/go-cmp v0.7.0 // indirect 62 | github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect 63 | github.com/google/uuid v1.6.0 // indirect 64 | github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect 65 | github.com/gosuri/uitable v0.0.4 // indirect 66 | github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect 67 | github.com/hashicorp/errwrap v1.1.0 // indirect 68 | github.com/huandu/xstrings v1.5.0 // indirect 69 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 70 | github.com/jmoiron/sqlx v1.4.0 // indirect 71 | github.com/josharian/intern v1.0.0 // indirect 72 | github.com/json-iterator/go v1.1.12 // indirect 73 | github.com/klauspost/compress v1.18.0 // indirect 74 | github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect 75 | github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect 76 | github.com/lib/pq v1.10.9 // indirect 77 | github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect 78 | github.com/mailru/easyjson v0.9.0 // indirect 79 | github.com/mattn/go-colorable v0.1.14 // indirect 80 | github.com/mattn/go-runewidth v0.0.16 // indirect 81 | github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect 82 | github.com/mitchellh/copystructure v1.2.0 // indirect 83 | github.com/mitchellh/go-wordwrap v1.0.1 // indirect 84 | github.com/mitchellh/reflectwalk v1.0.2 // indirect 85 | github.com/moby/spdystream v0.5.0 // indirect 86 | github.com/moby/term v0.5.2 // indirect 87 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 88 | github.com/modern-go/reflect2 v1.0.2 // indirect 89 | github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect 90 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 91 | github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect 92 | github.com/opencontainers/go-digest v1.0.0 // indirect 93 | github.com/opencontainers/image-spec v1.1.1 // indirect 94 | github.com/peterbourgon/diskv v2.0.1+incompatible // indirect 95 | github.com/pkg/errors v0.9.1 // indirect 96 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect 97 | github.com/prometheus/common v0.63.0 // indirect 98 | github.com/prometheus/procfs v0.16.0 // indirect 99 | github.com/rivo/uniseg v0.4.7 // indirect 100 | github.com/rubenv/sql-migrate v1.8.0 // indirect 101 | github.com/russross/blackfriday/v2 v2.1.0 // indirect 102 | github.com/shopspring/decimal v1.4.0 // indirect 103 | github.com/sirupsen/logrus v1.9.3 // indirect 104 | github.com/spf13/cast v1.7.1 // indirect 105 | github.com/x448/float16 v0.8.4 // indirect 106 | github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect 107 | github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect 108 | github.com/xeipuuv/gojsonschema v1.2.0 // indirect 109 | github.com/xlab/treeprint v1.2.0 // indirect 110 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect 111 | golang.org/x/net v0.38.0 // indirect 112 | golang.org/x/oauth2 v0.28.0 // indirect 113 | golang.org/x/sys v0.33.0 // indirect 114 | golang.org/x/term v0.32.0 // indirect 115 | golang.org/x/text v0.25.0 // indirect 116 | golang.org/x/time v0.11.0 // indirect 117 | google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect 118 | google.golang.org/grpc v1.71.0 // indirect 119 | google.golang.org/protobuf v1.36.6 // indirect 120 | gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect 121 | gopkg.in/inf.v0 v0.9.1 // indirect 122 | k8s.io/apiextensions-apiserver v0.33.0 // indirect 123 | k8s.io/apiserver v0.33.0 // indirect 124 | k8s.io/component-base v0.33.0 // indirect 125 | k8s.io/klog/v2 v2.130.1 // indirect 126 | k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect 127 | k8s.io/kubectl v0.33.0 // indirect 128 | oras.land/oras-go/v2 v2.5.0 // indirect 129 | sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect 130 | sigs.k8s.io/kustomize/api v0.19.0 // indirect 131 | sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect 132 | sigs.k8s.io/randfill v1.0.0 // indirect 133 | sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect 134 | sigs.k8s.io/yaml v1.4.0 // indirect 135 | ) 136 | -------------------------------------------------------------------------------- /helm/helm.go: -------------------------------------------------------------------------------- 1 | package helm 2 | 3 | import ( 4 | "embed" 5 | "fmt" 6 | "io/fs" 7 | "path/filepath" 8 | 9 | "helm.sh/helm/v3/pkg/chart" 10 | "helm.sh/helm/v3/pkg/chart/loader" 11 | ) 12 | 13 | // chartFS is the embedded Helm chart. 14 | // 15 | // Note: The prefix "all:" is important here, as otherwise the files starting with "." or "_" will be ignored. 16 | // 17 | // See: https://github.com/golang/go/issues/44393 18 | // 19 | //go:embed all:pv-migrate 20 | var chartFS embed.FS 21 | 22 | const rootDir = "pv-migrate" 23 | 24 | // LoadChart loads the embedded Helm chart. 25 | func LoadChart() (*chart.Chart, error) { 26 | files, err := chartAsBufferedFiles() 27 | if err != nil { 28 | return nil, fmt.Errorf("failed to get chart files: %w", err) 29 | } 30 | 31 | helmChart, err := loader.LoadFiles(files) 32 | if err != nil { 33 | return nil, fmt.Errorf("failed to load chart: %w", err) 34 | } 35 | 36 | return helmChart, nil 37 | } 38 | 39 | func chartAsBufferedFiles() ([]*loader.BufferedFile, error) { 40 | var files []*loader.BufferedFile 41 | 42 | err := fs.WalkDir(chartFS, rootDir, func(path string, d fs.DirEntry, err error) error { 43 | if err != nil { 44 | return err 45 | } 46 | 47 | if d.IsDir() { 48 | return nil 49 | } 50 | 51 | data, err := chartFS.ReadFile(path) 52 | if err != nil { 53 | return fmt.Errorf("failed to read file %q in chart: %w", path, err) 54 | } 55 | 56 | relativePath, err := filepath.Rel(rootDir, path) 57 | if err != nil { 58 | return fmt.Errorf("failed to relativize path %q: %w", path, err) 59 | } 60 | 61 | // fix for Windows - the Helm client library expects templates to be under "templates/", i.e., with forward-slash 62 | relativePath = filepath.ToSlash(relativePath) 63 | 64 | files = append(files, &loader.BufferedFile{ 65 | Name: relativePath, 66 | Data: data, 67 | }) 68 | 69 | return nil 70 | }) 71 | if err != nil { 72 | return nil, fmt.Errorf("failed to walk chart directory: %w", err) 73 | } 74 | 75 | return files, nil 76 | } 77 | -------------------------------------------------------------------------------- /helm/helm_test.go: -------------------------------------------------------------------------------- 1 | package helm_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | "github.com/stretchr/testify/require" 8 | 9 | "github.com/utkuozdemir/pv-migrate/helm" 10 | ) 11 | 12 | func TestLoadChart(t *testing.T) { 13 | t.Parallel() 14 | 15 | chart, err := helm.LoadChart() 16 | require.NoError(t, err) 17 | 18 | assert.Equal(t, "pv-migrate", chart.Metadata.Name) 19 | assert.NotEmpty(t, chart.Metadata.Version, "chart version should not be empty") 20 | assert.NotEmpty(t, chart.Values, "chart values should not be empty") 21 | assert.NotEmpty(t, chart.Templates, "chart templates should not be empty") 22 | } 23 | -------------------------------------------------------------------------------- /helm/pv-migrate/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /helm/pv-migrate/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: pv-migrate 3 | description: The helm chart of pv-migrate 4 | type: application 5 | version: 0.5.0 6 | appVersion: 0.5.0 7 | home: https://github.com/utkuozdemir/pv-migrate 8 | keywords: 9 | - pv-migrate 10 | - pvc 11 | - migration 12 | sources: 13 | - https://github.com/utkuozdemir/pv-migrate 14 | # icon: TBA 15 | maintainers: 16 | - name: Utku Özdemir 17 | email: utkuozdemir@gmail.com 18 | url: https://utkuozdemir.org 19 | annotations: 20 | artifacthub.io/license: Apache-2.0 21 | artifacthub.io/images: | 22 | - name: utkuozdemir/pv-migrate-sshd 23 | image: docker.io/utkuozdemir/pv-migrate-sshd:1.1.0 24 | - name: utkuozdemir/pv-migrate-rsync 25 | image: docker.io/utkuozdemir/pv-migrate-rsync:1.1.0 26 | -------------------------------------------------------------------------------- /helm/pv-migrate/README.md: -------------------------------------------------------------------------------- 1 | # pv-migrate 2 | 3 | ![Version: 0.5.0](https://img.shields.io/badge/Version-0.5.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.5.0](https://img.shields.io/badge/AppVersion-0.5.0-informational?style=flat-square) 4 | 5 | The helm chart of pv-migrate 6 | 7 | **Homepage:** 8 | 9 | ## Maintainers 10 | 11 | | Name | Email | Url | 12 | | ---- | ------ | --- | 13 | | Utku Özdemir | | | 14 | 15 | ## Source Code 16 | 17 | * 18 | 19 | ## Values 20 | 21 | | Key | Type | Default | Description | 22 | |-----|------|---------|-------------| 23 | | fullnameOverride | string | `""` | String to fully override the fullname template with a string | 24 | | nameOverride | string | `""` | String to partially override the fullname template with a string (will prepend the release name) | 25 | | rsync.affinity | object | `{}` | Rsync pod affinity | 26 | | rsync.backoffLimit | int | `0` | | 27 | | rsync.command | string | `""` | Full Rsync command and flags | 28 | | rsync.enabled | bool | `false` | Enable creation of Rsync job | 29 | | rsync.extraArgs | string | `""` | Extra args to be appended to the rsync command. Setting this might cause the tool to not function properly. | 30 | | rsync.image.pullPolicy | string | `"IfNotPresent"` | Rsync image pull policy | 31 | | rsync.image.repository | string | `"docker.io/utkuozdemir/pv-migrate-rsync"` | Rsync image repository | 32 | | rsync.image.tag | string | `"1.0.0"` | Rsync image tag | 33 | | rsync.imagePullSecrets | list | `[]` | Rsync image pull secrets | 34 | | rsync.jobAnnotations | object | `{}` | Rsync job annotations | 35 | | rsync.jobLabels | object | `{}` | Rsync job labels | 36 | | rsync.maxRetries | int | `10` | Number of retries to run rsync command | 37 | | rsync.namespace | string | `""` | Namespace to run Rsync pod in | 38 | | rsync.networkPolicy.enabled | bool | `false` | Enable Rsync network policy | 39 | | rsync.nodeName | string | `""` | The node name to schedule Rsync pod on | 40 | | rsync.nodeSelector | object | `{}` | Rsync node selector | 41 | | rsync.podAnnotations | object | `{}` | Rsync pod annotations | 42 | | rsync.podLabels | object | `{}` | Rsync pod labels | 43 | | rsync.podSecurityContext | object | `{}` | Rsync pod security context | 44 | | rsync.privateKey | string | `""` | The private key content | 45 | | rsync.privateKeyMount | bool | `false` | Mount a private key into the Rsync pod | 46 | | rsync.privateKeyMountPath | string | `"/tmp/id_ed25519"` | The path to mount the private key | 47 | | rsync.pvcMounts | list | `[]` | PVC mounts into the Rsync pod. For examples, see [values.yaml](values.yaml) | 48 | | rsync.resources | object | `{}` | Rsync pod resources | 49 | | rsync.restartPolicy | string | `"Never"` | | 50 | | rsync.retryPeriodSeconds | int | `5` | Waiting time between retries | 51 | | rsync.securityContext | object | `{}` | Rsync deployment security context | 52 | | rsync.serviceAccount.annotations | object | `{}` | Rsync service account annotations | 53 | | rsync.serviceAccount.create | bool | `true` | Create a service account for Rsync | 54 | | rsync.serviceAccount.name | string | `""` | Rsync service account name to use | 55 | | rsync.tolerations | list | see [values.yaml](values.yaml) | Rsync pod tolerations | 56 | | sshd.affinity | object | `{}` | SSHD pod affinity | 57 | | sshd.deploymentAnnotations | object | `{}` | SSHD deployment annotations | 58 | | sshd.deploymentLabels | object | `{}` | SSHD deployment labels | 59 | | sshd.enabled | bool | `false` | Enable SSHD server deployment | 60 | | sshd.image.pullPolicy | string | `"IfNotPresent"` | SSHD image pull policy | 61 | | sshd.image.repository | string | `"docker.io/utkuozdemir/pv-migrate-sshd"` | SSHD image repository | 62 | | sshd.image.tag | string | `"1.1.0"` | SSHD image tag | 63 | | sshd.imagePullSecrets | list | `[]` | SSHD image pull secrets | 64 | | sshd.namespace | string | `""` | Namespace to run SSHD pod in | 65 | | sshd.networkPolicy.enabled | bool | `false` | Enable SSHD network policy | 66 | | sshd.nodeName | string | `""` | The node name to schedule SSHD pod on | 67 | | sshd.nodeSelector | object | `{}` | SSHD node selector | 68 | | sshd.podAnnotations | object | `{}` | SSHD pod annotations | 69 | | sshd.podLabels | object | `{}` | SSHD pod labels | 70 | | sshd.podSecurityContext | object | `{}` | SSHD pod security context | 71 | | sshd.privateKey | string | `""` | The private key content | 72 | | sshd.privateKeyMount | bool | `false` | Mount a private key into the SSHD pod | 73 | | sshd.privateKeyMountPath | string | `"/tmp/id_ed25519"` | The path to mount the private key | 74 | | sshd.publicKey | string | `""` | The public key content | 75 | | sshd.publicKeyMount | bool | `true` | Mount a public key into the SSHD pod | 76 | | sshd.publicKeyMountPath | string | `"/root/.ssh/authorized_keys"` | The path to mount the public key | 77 | | sshd.pvcMounts | list | `[]` | PVC mounts into the SSHD pod. For examples, see see [values.yaml](values.yaml) | 78 | | sshd.resources | object | `{}` | SSHD pod resources | 79 | | sshd.securityContext | object | `{"capabilities":{"add":["SYS_CHROOT"]}}` | SSHD deployment security context | 80 | | sshd.service.annotations | object | `{}` | SSHD service annotations | 81 | | sshd.service.loadBalancerIP | string | `""` | SSHD service load balancer IP | 82 | | sshd.service.port | int | `22` | SSHD service port | 83 | | sshd.service.type | string | `"ClusterIP"` | SSHD service type | 84 | | sshd.serviceAccount.annotations | object | `{}` | SSHD service account annotations | 85 | | sshd.serviceAccount.create | bool | `true` | Create a service account for SSHD | 86 | | sshd.serviceAccount.name | string | `""` | SSHD service account name to use | 87 | | sshd.tolerations | list | see [values.yaml](values.yaml) | SSHD pod tolerations | 88 | 89 | ---------------------------------------------- 90 | Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2) 91 | -------------------------------------------------------------------------------- /helm/pv-migrate/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{- define "pv-migrate.name" -}} 2 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 3 | {{- end }} 4 | 5 | {{- define "pv-migrate.fullname" -}} 6 | {{- if .Values.fullnameOverride }} 7 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 8 | {{- else }} 9 | {{- $name := default .Chart.Name .Values.nameOverride }} 10 | {{- if contains $name .Release.Name }} 11 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 12 | {{- else }} 13 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 14 | {{- end }} 15 | {{- end }} 16 | {{- end }} 17 | 18 | {{- define "pv-migrate.chart" -}} 19 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 20 | {{- end }} 21 | 22 | {{- define "pv-migrate.labels" -}} 23 | helm.sh/chart: {{ include "pv-migrate.chart" . }} 24 | {{ include "pv-migrate.selectorLabels" . }} 25 | {{- if .Chart.AppVersion }} 26 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 27 | {{- end }} 28 | app.kubernetes.io/managed-by: {{ .Release.Service }} 29 | {{- end }} 30 | 31 | {{- define "pv-migrate.selectorLabels" -}} 32 | app.kubernetes.io/name: {{ include "pv-migrate.name" . }} 33 | app.kubernetes.io/instance: {{ .Release.Name }} 34 | {{- end }} 35 | 36 | {{- define "pv-migrate.sshd.serviceAccountName" -}} 37 | {{- if .Values.sshd.serviceAccount.create }} 38 | {{- default (printf "%s-%s" (include "pv-migrate.fullname" .) "sshd") .Values.sshd.serviceAccount.name }} 39 | {{- else }} 40 | {{- default "default" .Values.sshd.serviceAccount.name }} 41 | {{- end }} 42 | {{- end }} 43 | 44 | {{- define "pv-migrate.rsync.serviceAccountName" -}} 45 | {{- if .Values.rsync.serviceAccount.create }} 46 | {{- default (printf "%s-%s" (include "pv-migrate.fullname" .) "rsync") .Values.rsync.serviceAccount.name }} 47 | {{- else }} 48 | {{- default "default" .Values.rsync.serviceAccount.name }} 49 | {{- end }} 50 | {{- end }} 51 | -------------------------------------------------------------------------------- /helm/pv-migrate/templates/rsync/job.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rsync.enabled -}} 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | name: {{ include "pv-migrate.fullname" . }}-rsync 6 | namespace: {{ .Values.rsync.namespace }} 7 | labels: 8 | app.kubernetes.io/component: rsync 9 | {{- include "pv-migrate.labels" . | nindent 4 }} 10 | {{- with .Values.rsync.jobLabels }} 11 | {{- toYaml . | nindent 4 }} 12 | {{- end }} 13 | {{- with .Values.rsync.jobAnnotations }} 14 | annotations: 15 | {{- toYaml . | nindent 4 }} 16 | {{- end }} 17 | spec: 18 | backoffLimit: {{ .Values.rsync.backoffLimit }} 19 | template: 20 | metadata: 21 | {{- with .Values.rsync.podAnnotations }} 22 | annotations: 23 | {{- toYaml . | nindent 8 }} 24 | {{- end }} 25 | labels: 26 | app.kubernetes.io/component: rsync 27 | {{- include "pv-migrate.selectorLabels" . | nindent 8 }} 28 | {{- with .Values.rsync.podLabels }} 29 | {{- toYaml . | nindent 8 }} 30 | {{- end }} 31 | spec: 32 | {{- with .Values.rsync.imagePullSecrets }} 33 | imagePullSecrets: 34 | {{- toYaml . | nindent 8 }} 35 | {{- end }} 36 | serviceAccountName: {{ include "pv-migrate.rsync.serviceAccountName" . }} 37 | restartPolicy: {{ .Values.rsync.restartPolicy }} 38 | securityContext: 39 | {{- toYaml .Values.rsync.podSecurityContext | nindent 8 }} 40 | containers: 41 | - name: rsync 42 | command: 43 | - sh 44 | - -c 45 | - | 46 | set -x 47 | n=0 48 | rc=1 49 | retries={{ .Values.rsync.maxRetries }} 50 | attempts=$((retries+1)) 51 | period={{ .Values.rsync.retryPeriodSeconds }} 52 | {{ if .Values.rsync.privateKeyMount -}} 53 | privateKeyFilename=$(basename "{{ .Values.rsync.privateKeyMountPath }}") 54 | mkdir -p "$HOME/.ssh" 55 | chmod 700 "$HOME/.ssh" 56 | cp -v "{{ .Values.rsync.privateKeyMountPath }}" "$HOME/.ssh/" 57 | chmod 400 "$HOME/.ssh/$privateKeyFilename" 58 | {{- end }} 59 | while [ "$n" -le "$retries" ] 60 | do 61 | {{ required ".Values.rsync.command is required!" .Values.rsync.command }} {{ .Values.rsync.extraArgs }} && rc=0 && break 62 | n=$((n+1)) 63 | echo "rsync attempt $n/$attempts failed, waiting $period seconds before trying again" 64 | sleep $period 65 | done 66 | 67 | if [ $rc -ne 0 ]; then 68 | echo "rsync job failed after $retries retries" 69 | fi 70 | exit $rc 71 | securityContext: 72 | {{- toYaml .Values.rsync.securityContext | nindent 12 }} 73 | image: "{{ .Values.rsync.image.repository }}:{{ .Values.rsync.image.tag }}" 74 | imagePullPolicy: {{ .Values.rsync.image.pullPolicy }} 75 | resources: 76 | {{- toYaml .Values.rsync.resources | nindent 12 }} 77 | volumeMounts: 78 | {{- range $index, $mount := .Values.rsync.pvcMounts }} 79 | - mountPath: {{ $mount.mountPath }} 80 | name: vol-{{ $index }} 81 | readOnly: {{ default false $mount.readOnly }} 82 | {{- end }} 83 | {{- if .Values.rsync.privateKeyMount }} 84 | - mountPath: {{ .Values.rsync.privateKeyMountPath }} 85 | name: private-key 86 | subPath: privateKey 87 | {{- end }} 88 | nodeName: {{ .Values.rsync.nodeName }} 89 | {{- with .Values.rsync.nodeSelector }} 90 | nodeSelector: 91 | {{- toYaml . | nindent 8 }} 92 | {{- end }} 93 | {{- with .Values.rsync.affinity }} 94 | affinity: 95 | {{- toYaml . | nindent 8 }} 96 | {{- end }} 97 | {{- with .Values.rsync.tolerations }} 98 | tolerations: 99 | {{- toYaml . | nindent 8 }} 100 | {{- end }} 101 | volumes: 102 | {{- range $index, $mount := .Values.rsync.pvcMounts }} 103 | - name: vol-{{ $index }} 104 | persistentVolumeClaim: 105 | claimName: {{ required ".Values.rsync.pvcMounts[*].pvcName is required!" $mount.name }} 106 | readOnly: {{ default false $mount.readOnly }} 107 | {{- end }} 108 | {{- if .Values.rsync.privateKeyMount }} 109 | - name: private-key 110 | secret: 111 | secretName: {{ include "pv-migrate.fullname" . }}-rsync 112 | defaultMode: 0400 113 | {{- end }} 114 | {{- end }} 115 | -------------------------------------------------------------------------------- /helm/pv-migrate/templates/rsync/networkpolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rsync.networkPolicy.enabled -}} 2 | apiVersion: networking.k8s.io/v1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: {{ include "pv-migrate.fullname" . }}-rsync 6 | namespace: {{ .Values.rsync.namespace }} 7 | spec: 8 | podSelector: 9 | matchLabels: 10 | app.kubernetes.io/component: rsync 11 | {{- include "pv-migrate.selectorLabels" . | nindent 6 }} 12 | ingress: 13 | - {} 14 | egress: 15 | - {} 16 | policyTypes: 17 | - Ingress 18 | - Egress 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /helm/pv-migrate/templates/rsync/secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rsync.enabled -}} 2 | {{- if .Values.rsync.privateKeyMount -}} 3 | apiVersion: v1 4 | kind: Secret 5 | metadata: 6 | name: {{ include "pv-migrate.fullname" . }}-rsync 7 | namespace: {{ .Values.rsync.namespace }} 8 | labels: 9 | app.kubernetes.io/component: rsync 10 | {{- include "pv-migrate.labels" . | nindent 4 }} 11 | data: 12 | privateKey: {{ (required "rsync.privateKey is required!" .Values.rsync.privateKey) | b64enc | quote }} 13 | type: Opaque 14 | {{- end }} 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /helm/pv-migrate/templates/rsync/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rsync.enabled -}} 2 | {{- if .Values.rsync.serviceAccount.create -}} 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: {{ include "pv-migrate.rsync.serviceAccountName" . }} 7 | namespace: {{ .Values.rsync.namespace }} 8 | labels: 9 | app.kubernetes.io/component: rsync 10 | {{- include "pv-migrate.labels" . | nindent 4 }} 11 | {{- with .Values.rsync.serviceAccount.annotations }} 12 | annotations: 13 | {{- toYaml . | nindent 4 }} 14 | {{- end }} 15 | {{- end }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /helm/pv-migrate/templates/sshd/deployment.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.sshd.enabled -}} 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: {{ include "pv-migrate.fullname" . }}-sshd 6 | namespace: {{ .Values.sshd.namespace }} 7 | labels: 8 | app.kubernetes.io/component: sshd 9 | {{- include "pv-migrate.labels" . | nindent 4 }} 10 | {{- with .Values.sshd.deploymentLabels }} 11 | {{- toYaml . | nindent 4 }} 12 | {{- end }} 13 | {{- with .Values.sshd.deploymentAnnotations }} 14 | annotations: 15 | {{- toYaml . | nindent 4 }} 16 | {{- end }} 17 | spec: 18 | strategy: 19 | type: Recreate 20 | selector: 21 | matchLabels: 22 | app.kubernetes.io/component: sshd 23 | {{- include "pv-migrate.selectorLabels" . | nindent 6 }} 24 | template: 25 | metadata: 26 | {{- with .Values.sshd.podAnnotations }} 27 | annotations: 28 | {{- toYaml . | nindent 8 }} 29 | {{- end }} 30 | labels: 31 | app.kubernetes.io/component: sshd 32 | {{- include "pv-migrate.selectorLabels" . | nindent 8 }} 33 | {{- with .Values.sshd.podLabels }} 34 | {{- toYaml . | nindent 8 }} 35 | {{- end }} 36 | spec: 37 | {{- with .Values.sshd.imagePullSecrets }} 38 | imagePullSecrets: 39 | {{- toYaml . | nindent 8 }} 40 | {{- end }} 41 | serviceAccountName: {{ include "pv-migrate.sshd.serviceAccountName" . }} 42 | securityContext: 43 | {{- toYaml .Values.sshd.podSecurityContext | nindent 8 }} 44 | containers: 45 | - name: sshd 46 | command: 47 | - sh 48 | - -c 49 | - | 50 | set -x 51 | {{ if .Values.sshd.privateKeyMount -}} 52 | privateKeyFilename=$(basename "{{ .Values.sshd.privateKeyMountPath }}") 53 | mkdir -p "$HOME/.ssh" 54 | chmod 700 "$HOME/.ssh" 55 | cp -v "{{ .Values.sshd.privateKeyMountPath }}" "$HOME/.ssh/" 56 | chmod 400 "$HOME/.ssh/$privateKeyFilename" 57 | {{- end }} 58 | /usr/sbin/sshd -D -e -f /etc/ssh/sshd_config 59 | securityContext: 60 | {{- toYaml .Values.sshd.securityContext | nindent 12 }} 61 | image: "{{ .Values.sshd.image.repository }}:{{ .Values.sshd.image.tag }}" 62 | imagePullPolicy: {{ .Values.sshd.image.pullPolicy }} 63 | resources: 64 | {{- toYaml .Values.sshd.resources | nindent 12 }} 65 | volumeMounts: 66 | {{- range $index, $mount := .Values.sshd.pvcMounts }} 67 | - mountPath: {{ $mount.mountPath }} 68 | name: vol-{{ $index }} 69 | readOnly: {{ default false $mount.readOnly }} 70 | {{- end }} 71 | {{- if .Values.sshd.publicKeyMount }} 72 | - mountPath: {{ .Values.sshd.publicKeyMountPath }} 73 | name: keys 74 | subPath: publicKey 75 | {{- end }} 76 | {{- if .Values.sshd.privateKeyMount }} 77 | - mountPath: {{ .Values.sshd.privateKeyMountPath }} 78 | name: keys 79 | subPath: privateKey 80 | {{- end }} 81 | nodeName: {{ .Values.sshd.nodeName }} 82 | {{- with .Values.sshd.nodeSelector }} 83 | nodeSelector: 84 | {{- toYaml . | nindent 8 }} 85 | {{- end }} 86 | {{- with .Values.sshd.affinity }} 87 | affinity: 88 | {{- toYaml . | nindent 8 }} 89 | {{- end }} 90 | {{- with .Values.sshd.tolerations }} 91 | tolerations: 92 | {{- toYaml . | nindent 8 }} 93 | {{- end }} 94 | volumes: 95 | {{- range $index, $mount := .Values.sshd.pvcMounts }} 96 | - name: vol-{{ $index }} 97 | persistentVolumeClaim: 98 | claimName: {{ required ".Values.sshd.pvcMounts[*].pvcName is required!" $mount.name }} 99 | readOnly: {{ default false $mount.readOnly }} 100 | {{- end }} 101 | {{- if or .Values.sshd.publicKeyMount .Values.sshd.privateKeyMount }} 102 | - name: keys 103 | secret: 104 | secretName: {{ include "pv-migrate.fullname" . }}-sshd 105 | defaultMode: 0400 106 | {{- end }} 107 | {{- end }} 108 | -------------------------------------------------------------------------------- /helm/pv-migrate/templates/sshd/networkpolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.sshd.networkPolicy.enabled -}} 2 | apiVersion: networking.k8s.io/v1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: {{ include "pv-migrate.fullname" . }}-sshd 6 | namespace: {{ .Values.sshd.namespace }} 7 | spec: 8 | podSelector: 9 | matchLabels: 10 | app.kubernetes.io/component: sshd 11 | {{- include "pv-migrate.selectorLabels" . | nindent 6 }} 12 | ingress: 13 | - {} 14 | egress: 15 | - {} 16 | policyTypes: 17 | - Ingress 18 | - Egress 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /helm/pv-migrate/templates/sshd/secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.sshd.enabled -}} 2 | {{- if or .Values.sshd.publicKeyMount .Values.sshd.privateKeyMount -}} 3 | apiVersion: v1 4 | kind: Secret 5 | metadata: 6 | name: {{ include "pv-migrate.fullname" . }}-sshd 7 | namespace: {{ .Values.sshd.namespace }} 8 | labels: 9 | app.kubernetes.io/component: sshd 10 | {{- include "pv-migrate.labels" . | nindent 4 }} 11 | data: 12 | {{- if .Values.sshd.publicKeyMount }} 13 | publicKey: {{ (required "sshd.publicKey is required!" .Values.sshd.publicKey) | b64enc | quote }} 14 | {{- end }} 15 | {{- if .Values.sshd.privateKeyMount }} 16 | privateKey: {{ (required "sshd.privateKey is required!" .Values.sshd.privateKey) | b64enc | quote }} 17 | {{- end }} 18 | type: Opaque 19 | {{- end }} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /helm/pv-migrate/templates/sshd/service.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.sshd.enabled -}} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ include "pv-migrate.fullname" . }}-sshd 6 | namespace: {{ .Values.sshd.namespace }} 7 | labels: 8 | app.kubernetes.io/component: sshd 9 | {{- include "pv-migrate.labels" . | nindent 4 }} 10 | {{- with .Values.sshd.service.annotations }} 11 | annotations: 12 | {{- toYaml . | nindent 4 }} 13 | {{- end }} 14 | spec: 15 | type: {{ .Values.sshd.service.type }} 16 | {{- with .Values.sshd.service.loadBalancerIP }} 17 | loadBalancerIP: {{ . }} 18 | {{- end }} 19 | ports: 20 | - port: {{ .Values.sshd.service.port }} 21 | targetPort: 22 22 | protocol: TCP 23 | name: ssh 24 | selector: 25 | app.kubernetes.io/component: sshd 26 | {{- include "pv-migrate.selectorLabels" . | nindent 4 }} 27 | {{- end }} 28 | -------------------------------------------------------------------------------- /helm/pv-migrate/templates/sshd/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.sshd.enabled -}} 2 | {{- if .Values.sshd.serviceAccount.create -}} 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: {{ include "pv-migrate.sshd.serviceAccountName" . }} 7 | namespace: {{ .Values.sshd.namespace }} 8 | labels: 9 | app.kubernetes.io/component: sshd 10 | {{- include "pv-migrate.labels" . | nindent 4 }} 11 | {{- with .Values.sshd.serviceAccount.annotations }} 12 | annotations: 13 | {{- toYaml . | nindent 4 }} 14 | {{- end }} 15 | {{- end }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /helm/pv-migrate/values.yaml: -------------------------------------------------------------------------------- 1 | # -- String to partially override the fullname template with a string (will prepend the release name) 2 | nameOverride: "" 3 | # -- String to fully override the fullname template with a string 4 | fullnameOverride: "" 5 | 6 | sshd: 7 | # -- Enable SSHD server deployment 8 | enabled: false 9 | image: 10 | # -- SSHD image repository 11 | repository: docker.io/utkuozdemir/pv-migrate-sshd 12 | # -- SSHD image pull policy 13 | pullPolicy: IfNotPresent 14 | # -- SSHD image tag 15 | tag: 1.1.0 16 | # -- SSHD image pull secrets 17 | imagePullSecrets: [] 18 | serviceAccount: 19 | # -- Create a service account for SSHD 20 | create: true 21 | # -- SSHD service account annotations 22 | annotations: {} 23 | # -- SSHD service account name to use 24 | name: "" 25 | # -- SSHD pod annotations 26 | podAnnotations: {} 27 | # -- SSHD pod labels 28 | podLabels: {} 29 | # -- SSHD pod security context 30 | podSecurityContext: {} 31 | # -- SSHD deployment security context 32 | securityContext: 33 | capabilities: 34 | add: 35 | - SYS_CHROOT 36 | service: 37 | # -- SSHD service type 38 | type: ClusterIP 39 | # -- SSHD service port 40 | port: 22 41 | # -- SSHD service annotations 42 | annotations: {} 43 | # -- SSHD service load balancer IP 44 | loadBalancerIP: "" 45 | # -- SSHD pod resources 46 | resources: {} 47 | # -- The node name to schedule SSHD pod on 48 | nodeName: "" 49 | # -- SSHD node selector 50 | nodeSelector: {} 51 | # -- SSHD pod tolerations 52 | # @default -- see [values.yaml](values.yaml) 53 | tolerations: 54 | - effect: NoExecute 55 | key: node.kubernetes.io/not-ready 56 | operator: Exists 57 | tolerationSeconds: 300 58 | - effect: NoExecute 59 | key: node.kubernetes.io/unreachable 60 | operator: Exists 61 | tolerationSeconds: 300 62 | # -- SSHD pod affinity 63 | affinity: {} 64 | networkPolicy: 65 | # -- Enable SSHD network policy 66 | enabled: false 67 | # -- SSHD deployment labels 68 | deploymentLabels: {} 69 | # -- SSHD deployment annotations 70 | deploymentAnnotations: {} 71 | 72 | # -- Mount a public key into the SSHD pod 73 | publicKeyMount: true 74 | # -- The path to mount the public key 75 | publicKeyMountPath: /root/.ssh/authorized_keys 76 | # -- The public key content 77 | publicKey: "" 78 | 79 | # -- Mount a private key into the SSHD pod 80 | privateKeyMount: false 81 | # -- The path to mount the private key 82 | privateKeyMountPath: /tmp/id_ed25519 83 | # -- The private key content 84 | privateKey: "" 85 | 86 | # -- Namespace to run SSHD pod in 87 | namespace: "" 88 | # -- PVC mounts into the SSHD pod. For examples, see see [values.yaml](values.yaml) 89 | pvcMounts: [] 90 | #- name: pvc-1 91 | # readOnly: false 92 | # mountPath: /source 93 | #- name: pvc-2 94 | # readOnly: true 95 | # mountPath: /dest 96 | 97 | rsync: 98 | # -- Enable creation of Rsync job 99 | enabled: false 100 | image: 101 | # -- Rsync image repository 102 | repository: docker.io/utkuozdemir/pv-migrate-rsync 103 | # -- Rsync image pull policy 104 | pullPolicy: IfNotPresent 105 | # -- Rsync image tag 106 | tag: 1.0.0 107 | # -- Rsync image pull secrets 108 | imagePullSecrets: [] 109 | serviceAccount: 110 | # -- Create a service account for Rsync 111 | create: true 112 | # -- Rsync service account annotations 113 | annotations: {} 114 | # -- Rsync service account name to use 115 | name: "" 116 | # -- Rsync pod annotations 117 | podAnnotations: {} 118 | # -- Rsync pod labels 119 | podLabels: {} 120 | # -- Rsync pod security context 121 | podSecurityContext: {} 122 | # -- Rsync deployment security context 123 | securityContext: {} 124 | # -- Rsync pod resources 125 | resources: {} 126 | # -- The node name to schedule Rsync pod on 127 | nodeName: "" 128 | # -- Rsync node selector 129 | nodeSelector: {} 130 | # -- Rsync pod tolerations 131 | # @default -- see [values.yaml](values.yaml) 132 | tolerations: 133 | - effect: NoExecute 134 | key: node.kubernetes.io/not-ready 135 | operator: Exists 136 | tolerationSeconds: 300 137 | - effect: NoExecute 138 | key: node.kubernetes.io/unreachable 139 | operator: Exists 140 | tolerationSeconds: 300 141 | # -- Rsync pod affinity 142 | affinity: {} 143 | # Rsync job restart policy 144 | restartPolicy: Never 145 | # Rsync job backoff limit 146 | backoffLimit: 0 147 | networkPolicy: 148 | # -- Enable Rsync network policy 149 | enabled: false 150 | # -- Rsync job labels 151 | jobLabels: {} 152 | # -- Rsync job annotations 153 | jobAnnotations: {} 154 | 155 | # -- Mount a private key into the Rsync pod 156 | privateKeyMount: false 157 | # -- The path to mount the private key 158 | privateKeyMountPath: /tmp/id_ed25519 159 | # -- The private key content 160 | privateKey: "" 161 | # -- Number of retries to run rsync command 162 | maxRetries: 10 163 | # -- Waiting time between retries 164 | retryPeriodSeconds: 5 165 | # -- Full Rsync command and flags 166 | command: "" 167 | # -- Extra args to be appended to the rsync command. Setting this might cause the tool to not function properly. 168 | extraArgs: "" 169 | 170 | # -- Namespace to run Rsync pod in 171 | namespace: "" 172 | # -- PVC mounts into the Rsync pod. For examples, see [values.yaml](values.yaml) 173 | pvcMounts: [] 174 | #- name: pvc-1 175 | # readOnly: false 176 | # mountPath: /source 177 | #- name: pvc-2 178 | # readOnly: true 179 | # mountPath: /dest 180 | -------------------------------------------------------------------------------- /helm/test-vals-different-cluster.yaml: -------------------------------------------------------------------------------- 1 | rsync: 2 | enabled: true 3 | nodeName: porcupine 4 | mountSource: false 5 | 6 | privateKeyMount: true 7 | privateKeyMountPath: /tmp/id_ed25519 8 | privateKey: asdf 9 | 10 | sshRemoteHost: REMOTE_HOST 11 | 12 | namespace: pv-migrate-test-2 13 | pvcMounts: 14 | - name: pv-migrate-test-dest-2 15 | mountPath: /dest 16 | 17 | sourcePath: /source/ 18 | destPath: /dest/ 19 | 20 | sshd: 21 | enabled: true 22 | publicKey: qwer 23 | namespace: pv-migrate-test-1 24 | pvcMounts: 25 | - name: pv-migrate-test-source-1 26 | readOnly: true 27 | mountPath: /source 28 | -------------------------------------------------------------------------------- /helm/test-vals-different-ns.yaml: -------------------------------------------------------------------------------- 1 | rsync: 2 | enabled: true 3 | deleteExtraneousFiles: false 4 | noChown: false 5 | privateKeyMount: true 6 | privateKeyMountPath: /tmp/id_ed25519 7 | privateKey: | 8 | -----BEGIN OPENSSH PRIVATE KEY----- 9 | b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtz 10 | c2gtZWQyNTUxOQAAACCw++RvFPODUxObjYsfZe1IFA8FdB8bfwYSdAjo3iEz3QAA 11 | AIiKNGNUijRjVAAAAAtzc2gtZWQyNTUxOQAAACCw++RvFPODUxObjYsfZe1IFA8F 12 | dB8bfwYSdAjo3iEz3QAAAEDEOri9qDz2wm/UupFAY7ipERgoNDNXyhd/cDI5lmxv 13 | ZLD75G8U84NTE5uNix9l7UgUDwV0Hxt/BhJ0COjeITPdAAAAAAECAwQF 14 | -----END OPENSSH PRIVATE KEY----- 15 | 16 | namespace: pv-migrate-test-2 17 | pvcMounts: 18 | - name: pv-migrate-test-dest-2 19 | mountPath: /dest 20 | 21 | sourcePath: /source/ 22 | destPath: /dest/ 23 | 24 | sshd: 25 | enabled: true 26 | publicKey: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILD75G8U84NTE5uNix9l7UgUDwV0Hxt/BhJ0COjeITPd 27 | 28 | namespace: pv-migrate-test-1 29 | pvcMounts: 30 | - name: pv-migrate-test-source-1 31 | readOnly: true 32 | mountPath: /source 33 | -------------------------------------------------------------------------------- /helm/test-vals-same-ns.yaml: -------------------------------------------------------------------------------- 1 | rsync: 2 | enabled: true 3 | nodeName: porcupine 4 | mountSource: true 5 | 6 | namespace: pv-migrate-test-1 7 | pvcMounts: 8 | - name: pv-migrate-test-source-1 9 | readOnly: true 10 | mountPath: /source 11 | - name: pv-migrate-test-dest-2 12 | mountPath: /dest 13 | 14 | sourcePath: /source/ 15 | destPath: /dest/ 16 | -------------------------------------------------------------------------------- /img/demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/utkuozdemir/pv-migrate/001ed5202b31e8db03f44d9fc7b86035482db86c/img/demo.gif -------------------------------------------------------------------------------- /k8s/client.go: -------------------------------------------------------------------------------- 1 | package k8s 2 | 3 | import ( 4 | "fmt" 5 | "log/slog" 6 | 7 | "k8s.io/cli-runtime/pkg/genericclioptions" 8 | "k8s.io/client-go/kubernetes" 9 | "k8s.io/client-go/rest" 10 | "k8s.io/client-go/tools/clientcmd" 11 | ) 12 | 13 | type ClusterClient struct { 14 | RestConfig *rest.Config 15 | KubeClient kubernetes.Interface 16 | RESTClientGetter genericclioptions.RESTClientGetter 17 | NsInContext string 18 | } 19 | 20 | func GetClusterClient( 21 | kubeconfigPath string, 22 | context string, 23 | logger *slog.Logger, 24 | ) (*ClusterClient, error) { 25 | config, rcGetter, namespace, err := buildK8sConfig(kubeconfigPath, context, logger) 26 | if err != nil { 27 | return nil, err 28 | } 29 | 30 | kubeClient, err := kubernetes.NewForConfig(config) 31 | if err != nil { 32 | return nil, fmt.Errorf("failed to create kubernetes client: %w", err) 33 | } 34 | 35 | return &ClusterClient{ 36 | RestConfig: config, 37 | KubeClient: kubeClient, 38 | RESTClientGetter: rcGetter, 39 | NsInContext: namespace, 40 | }, nil 41 | } 42 | 43 | //nolint:ireturn,nolintlint 44 | func buildK8sConfig(kubeconfigPath string, context string, logger *slog.Logger) (*rest.Config, 45 | genericclioptions.RESTClientGetter, string, error, 46 | ) { 47 | clientConfigLoadingRules := clientcmd.NewDefaultClientConfigLoadingRules() 48 | if kubeconfigPath != "" { 49 | clientConfigLoadingRules.ExplicitPath = kubeconfigPath 50 | } 51 | 52 | config := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( 53 | clientConfigLoadingRules, 54 | &clientcmd.ConfigOverrides{ 55 | CurrentContext: context, 56 | }) 57 | 58 | namespace, _, err := config.Namespace() 59 | if err != nil { 60 | return nil, nil, "", fmt.Errorf("failed to get namespace from kubeconfig: %w", err) 61 | } 62 | 63 | clientConfig, err := config.ClientConfig() 64 | if err != nil { 65 | return nil, nil, "", fmt.Errorf("failed to create kubernetes client config: %w", err) 66 | } 67 | 68 | rcGetter := NewRESTClientGetter(clientConfig, config, logger) 69 | 70 | return clientConfig, rcGetter, namespace, nil 71 | } 72 | -------------------------------------------------------------------------------- /k8s/client_test.go: -------------------------------------------------------------------------------- 1 | package k8s 2 | 3 | import ( 4 | _ "embed" 5 | "os" 6 | "testing" 7 | 8 | "github.com/neilotoole/slogt" 9 | "github.com/stretchr/testify/assert" 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | //go:embed testdata/_kubeconfig_test.yaml 14 | var kubeconfigContent string 15 | 16 | func TestGetClusterClient(t *testing.T) { 17 | t.Parallel() 18 | 19 | kubeconfig := prepareKubeconfig() 20 | defer func() { 21 | os.Remove(kubeconfig) 22 | }() 23 | 24 | logger := slogt.New(t) 25 | 26 | clusterClient, err := GetClusterClient(kubeconfig, "context-1", logger) 27 | 28 | require.NoError(t, err) 29 | 30 | rcGetter := clusterClient.RESTClientGetter 31 | 32 | ns, _, err := rcGetter.ToRawKubeConfigLoader().Namespace() 33 | require.NoError(t, err) 34 | assert.Equal(t, "namespace1", ns) 35 | 36 | discoveryClient, err := rcGetter.ToDiscoveryClient() 37 | require.NoError(t, err) 38 | assert.NotNil(t, discoveryClient) 39 | 40 | restConfig, err := rcGetter.ToRESTConfig() 41 | require.NoError(t, err) 42 | assert.NotNil(t, restConfig) 43 | 44 | restMapper, err := rcGetter.ToRESTMapper() 45 | require.NoError(t, err) 46 | assert.NotNil(t, restMapper) 47 | } 48 | 49 | func TestBuildK8sConfig(t *testing.T) { 50 | t.Parallel() 51 | 52 | conf := prepareKubeconfig() 53 | defer func() { 54 | _ = os.Remove(conf) 55 | }() 56 | 57 | logger := slogt.New(t) 58 | 59 | config, _, namespace, err := buildK8sConfig(conf, "", logger) 60 | assert.NotNil(t, config) 61 | assert.Equal(t, "namespace1", namespace) 62 | require.NoError(t, err) 63 | config, _, namespace, err = buildK8sConfig(conf, "context-2", logger) 64 | require.NoError(t, err) 65 | assert.Equal(t, "namespace2", namespace) 66 | assert.NotNil(t, config) 67 | config, _, namespace, err = buildK8sConfig(conf, "context-nonexistent", logger) 68 | assert.Nil(t, config) 69 | assert.Empty(t, namespace) 70 | require.Error(t, err) 71 | } 72 | 73 | func prepareKubeconfig() string { 74 | testConfig, _ := os.CreateTemp("", "pv-migrate-testconfig-*.yaml") 75 | 76 | testConfig.WriteString(kubeconfigContent) //nolint:errcheck 77 | 78 | return testConfig.Name() 79 | } 80 | -------------------------------------------------------------------------------- /k8s/completion.go: -------------------------------------------------------------------------------- 1 | package k8s 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log/slog" 7 | 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | ) 10 | 11 | func GetContexts(kubeconfigPath string, logger *slog.Logger) ([]string, error) { 12 | client, err := GetClusterClient(kubeconfigPath, "", logger) 13 | if err != nil { 14 | return nil, err 15 | } 16 | 17 | rawConfig, err := client.RESTClientGetter.ToRawKubeConfigLoader().RawConfig() 18 | if err != nil { 19 | return nil, fmt.Errorf("failed to load kubeconfig: %w", err) 20 | } 21 | 22 | ctxs := rawConfig.Contexts 23 | 24 | contextNames := make([]string, len(ctxs)) 25 | 26 | index := 0 27 | 28 | for name := range ctxs { 29 | contextNames[index] = name 30 | index++ 31 | } 32 | 33 | return contextNames, nil 34 | } 35 | 36 | func GetNamespaces( 37 | ctx context.Context, 38 | kubeconfigPath, kubectx string, 39 | logger *slog.Logger, 40 | ) ([]string, error) { 41 | client, err := GetClusterClient(kubeconfigPath, kubectx, logger) 42 | if err != nil { 43 | return nil, err 44 | } 45 | 46 | nss, err := client.KubeClient.CoreV1(). 47 | Namespaces().List(ctx, metav1.ListOptions{}) 48 | if err != nil { 49 | return nil, fmt.Errorf("failed to list namespaces: %w", err) 50 | } 51 | 52 | nsNames := make([]string, len(nss.Items)) 53 | for i, ns := range nss.Items { 54 | nsNames[i] = ns.Name 55 | } 56 | 57 | return nsNames, nil 58 | } 59 | 60 | func GetPVCs( 61 | ctx context.Context, 62 | kubeconfigPath, kubectx, namespace string, 63 | logger *slog.Logger, 64 | ) ([]string, error) { 65 | client, err := GetClusterClient(kubeconfigPath, kubectx, logger) 66 | if err != nil { 67 | return nil, err 68 | } 69 | 70 | pvcs, err := client.KubeClient.CoreV1(). 71 | PersistentVolumeClaims(namespace).List(ctx, metav1.ListOptions{}) 72 | if err != nil { 73 | return nil, fmt.Errorf("failed to list PVCs: %w", err) 74 | } 75 | 76 | pvcNames := make([]string, len(pvcs.Items)) 77 | for i, pvc := range pvcs.Items { 78 | pvcNames[i] = pvc.Name 79 | } 80 | 81 | return pvcNames, nil 82 | } 83 | -------------------------------------------------------------------------------- /k8s/helm.go: -------------------------------------------------------------------------------- 1 | package k8s 2 | 3 | import ( 4 | "log/slog" 5 | 6 | "k8s.io/apimachinery/pkg/api/meta" 7 | "k8s.io/client-go/discovery" 8 | "k8s.io/client-go/discovery/cached/memory" 9 | "k8s.io/client-go/rest" 10 | "k8s.io/client-go/restmapper" 11 | "k8s.io/client-go/tools/clientcmd" 12 | ) 13 | 14 | type HelmRESTClientGetter struct { 15 | restConfig *rest.Config 16 | clientConfig clientcmd.ClientConfig 17 | logger *slog.Logger 18 | } 19 | 20 | func NewRESTClientGetter(restConfig *rest.Config, 21 | clientConfig clientcmd.ClientConfig, logger *slog.Logger, 22 | ) *HelmRESTClientGetter { 23 | return &HelmRESTClientGetter{ 24 | restConfig: restConfig, 25 | clientConfig: clientConfig, 26 | logger: logger, 27 | } 28 | } 29 | 30 | func (c *HelmRESTClientGetter) ToRESTConfig() (*rest.Config, error) { 31 | return c.restConfig, nil 32 | } 33 | 34 | //nolint:ireturn,nolintlint 35 | func (c *HelmRESTClientGetter) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { 36 | config, err := c.ToRESTConfig() 37 | if err != nil { 38 | return nil, err 39 | } 40 | 41 | config.Burst = 100 42 | discoveryClient, _ := discovery.NewDiscoveryClientForConfig(config) 43 | 44 | return memory.NewMemCacheClient(discoveryClient), nil 45 | } 46 | 47 | //nolint:ireturn,nolintlint 48 | func (c *HelmRESTClientGetter) ToRESTMapper() (meta.RESTMapper, error) { 49 | discoveryClient, err := c.ToDiscoveryClient() 50 | if err != nil { 51 | return nil, err 52 | } 53 | 54 | mapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient) 55 | 56 | //nolint:godox 57 | expander := restmapper.NewShortcutExpander(mapper, discoveryClient, func(warning string) { 58 | c.logger.Debug("warning from shortcut expander", "warning", warning) 59 | }) 60 | 61 | return expander, nil 62 | } 63 | 64 | //nolint:ireturn,nolintlint 65 | func (c *HelmRESTClientGetter) ToRawKubeConfigLoader() clientcmd.ClientConfig { 66 | return c.clientConfig 67 | } 68 | -------------------------------------------------------------------------------- /k8s/job.go: -------------------------------------------------------------------------------- 1 | package k8s 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "io" 8 | "log/slog" 9 | 10 | "golang.org/x/sync/errgroup" 11 | corev1 "k8s.io/api/core/v1" 12 | "k8s.io/client-go/kubernetes" 13 | 14 | "github.com/utkuozdemir/pv-migrate/rsync/progress" 15 | ) 16 | 17 | // WaitForJobCompletion waits for the Kubernetes job to complete. 18 | // 19 | 20 | func WaitForJobCompletion(ctx context.Context, cli kubernetes.Interface, 21 | namespace string, name string, progressBarRequested bool, logger *slog.Logger, 22 | ) (retErr error) { 23 | canDisplayProgressBar := ctx.Value(progress.CanDisplayProgressBarContextKey{}) != nil 24 | showProgressBar := progressBarRequested && canDisplayProgressBar 25 | labelSelector := "job-name=" + name 26 | 27 | pod, err := WaitForPod(ctx, cli, namespace, labelSelector) 28 | if err != nil { 29 | return err 30 | } 31 | 32 | var eg errgroup.Group //nolint:varnamelen 33 | 34 | defer func() { 35 | retErr = errors.Join(retErr, eg.Wait()) 36 | }() 37 | 38 | tailCtx, tailCancel := context.WithCancel(ctx) 39 | defer tailCancel() 40 | 41 | progressLogger := progress.NewLogger(progress.LoggerOptions{ 42 | ShowProgressBar: showProgressBar, 43 | LogStreamFunc: func(ctx context.Context) (io.ReadCloser, error) { 44 | return cli.CoreV1().Pods(namespace).GetLogs(pod.Name, 45 | &corev1.PodLogOptions{Follow: true}).Stream(ctx) 46 | }, 47 | }) 48 | 49 | eg.Go(func() error { 50 | return progressLogger.Start(tailCtx, logger) 51 | }) 52 | 53 | phase, err := waitForPodTermination(ctx, cli, pod.Namespace, pod.Name) 54 | if err != nil { 55 | return err 56 | } 57 | 58 | if *phase != corev1.PodSucceeded { 59 | return fmt.Errorf("job %s/%s failed", pod.Namespace, pod.Name) 60 | } 61 | 62 | if err = progressLogger.MarkAsComplete(ctx); err != nil { 63 | return fmt.Errorf("failed to mark progress logger as complete: %w", err) 64 | } 65 | 66 | return nil 67 | } 68 | -------------------------------------------------------------------------------- /k8s/pod.go: -------------------------------------------------------------------------------- 1 | package k8s 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | corev1 "k8s.io/api/core/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | "k8s.io/apimachinery/pkg/fields" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | "k8s.io/apimachinery/pkg/watch" 13 | "k8s.io/client-go/kubernetes" 14 | "k8s.io/client-go/tools/cache" 15 | watchtools "k8s.io/client-go/tools/watch" 16 | ) 17 | 18 | const ( 19 | podWatchTimeout = 2 * time.Minute 20 | ) 21 | 22 | func WaitForPod( 23 | ctx context.Context, 24 | cli kubernetes.Interface, 25 | namespace, labelSelector string, 26 | ) (*corev1.Pod, error) { 27 | var result *corev1.Pod 28 | 29 | resCli := cli.CoreV1().Pods(namespace) 30 | 31 | ctx, cancel := context.WithTimeout(ctx, podWatchTimeout) 32 | defer cancel() 33 | 34 | listWatch := &cache.ListWatch{ 35 | ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { 36 | options.LabelSelector = labelSelector 37 | 38 | list, err := resCli.List(ctx, options) 39 | if err != nil { 40 | return nil, fmt.Errorf("failed to list pods: %w", err) 41 | } 42 | 43 | return list, nil 44 | }, 45 | WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { 46 | options.LabelSelector = labelSelector 47 | 48 | resWatch, err := resCli.Watch(ctx, options) 49 | if err != nil { 50 | return nil, fmt.Errorf("failed to watch pods: %w", err) 51 | } 52 | 53 | return resWatch, nil 54 | }, 55 | } 56 | 57 | if _, err := watchtools.UntilWithSync(ctx, listWatch, &corev1.Pod{}, nil, 58 | func(event watch.Event) (bool, error) { 59 | res, ok := event.Object.(*corev1.Pod) 60 | if !ok { 61 | return false, fmt.Errorf("unexpected type while watching pods: ns: %s, labelSelector: %s", namespace, labelSelector) 62 | } 63 | 64 | phase := res.Status.Phase 65 | if phase != corev1.PodPending { 66 | result = res 67 | 68 | return true, nil 69 | } 70 | 71 | return false, nil 72 | }); err != nil { 73 | return nil, fmt.Errorf("failed to wait for pod: %w", err) 74 | } 75 | 76 | return result, nil 77 | } 78 | 79 | func waitForPodTermination(ctx context.Context, cli kubernetes.Interface, 80 | namespace string, name string, 81 | ) (*corev1.PodPhase, error) { 82 | var result *corev1.PodPhase 83 | 84 | resCli := cli.CoreV1().Pods(namespace) 85 | fieldSelector := fields.OneTermEqualSelector(metav1.ObjectNameField, name).String() 86 | listWatch := &cache.ListWatch{ 87 | ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { 88 | options.FieldSelector = fieldSelector 89 | 90 | list, err := resCli.List(ctx, options) 91 | if err != nil { 92 | return nil, fmt.Errorf("failed to list pods: %w", err) 93 | } 94 | 95 | return list, nil 96 | }, 97 | WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { 98 | options.FieldSelector = fieldSelector 99 | 100 | resWatch, err := resCli.Watch(ctx, options) 101 | if err != nil { 102 | return nil, fmt.Errorf("failed to watch pods: %w", err) 103 | } 104 | 105 | return resWatch, nil 106 | }, 107 | } 108 | 109 | if _, err := watchtools.UntilWithSync(ctx, listWatch, &corev1.Pod{}, nil, 110 | func(event watch.Event) (bool, error) { 111 | res, ok := event.Object.(*corev1.Pod) 112 | if !ok { 113 | return false, fmt.Errorf("unexpected type while watching pods: %s/%s", namespace, name) 114 | } 115 | 116 | phase := res.Status.Phase 117 | if phase != corev1.PodRunning { 118 | result = &phase 119 | 120 | return true, nil 121 | } 122 | 123 | return false, nil 124 | }); err != nil { 125 | return nil, fmt.Errorf("failed to wait for pod termination: %w", err) 126 | } 127 | 128 | return result, nil 129 | } 130 | -------------------------------------------------------------------------------- /k8s/portforward.go: -------------------------------------------------------------------------------- 1 | package k8s 2 | 3 | import ( 4 | "fmt" 5 | "log/slog" 6 | "net/http" 7 | "net/url" 8 | "path" 9 | 10 | "k8s.io/client-go/rest" 11 | "k8s.io/client-go/tools/portforward" 12 | "k8s.io/client-go/transport/spdy" 13 | ) 14 | 15 | type PortForwardRequest struct { 16 | // RestConfig is the kubernetes config 17 | RestConfig *rest.Config 18 | PodNs string 19 | PodName string 20 | LocalPort int 21 | PodPort int 22 | StopCh <-chan struct{} 23 | ReadyCh chan struct{} 24 | } 25 | 26 | func PortForward(req *PortForwardRequest, logger *slog.Logger) error { 27 | targetURL, err := url.Parse(req.RestConfig.Host) 28 | if err != nil { 29 | return fmt.Errorf("failed to parse target url: %w", err) 30 | } 31 | 32 | targetURL.Path = path.Join( 33 | "api", "v1", "namespaces", req.PodNs, "pods", req.PodName, "portforward", 34 | ) 35 | 36 | transport, upgrader, err := spdy.RoundTripperFor(req.RestConfig) 37 | if err != nil { 38 | return fmt.Errorf("failed to initialize roundtripper: %w", err) 39 | } 40 | 41 | outWriter := &slogDebugWriter{logger: logger} 42 | 43 | dialer := spdy.NewDialer( 44 | upgrader, 45 | &http.Client{Transport: transport}, 46 | http.MethodPost, 47 | targetURL, 48 | ) 49 | 50 | ports := []string{fmt.Sprintf("%d:%d", req.LocalPort, req.PodPort)} 51 | 52 | forwarder, err := portforward.New(dialer, ports, req.StopCh, req.ReadyCh, outWriter, outWriter) 53 | if err != nil { 54 | return fmt.Errorf("failed to initialize portforward: %w", err) 55 | } 56 | 57 | if err = forwarder.ForwardPorts(); err != nil { 58 | return fmt.Errorf("failed to forward ports: %w", err) 59 | } 60 | 61 | return nil 62 | } 63 | 64 | type slogDebugWriter struct { 65 | logger *slog.Logger 66 | } 67 | 68 | func (w *slogDebugWriter) Write(p []byte) (int, error) { 69 | w.logger.Debug(string(p)) 70 | 71 | return len(p), nil 72 | } 73 | -------------------------------------------------------------------------------- /k8s/service.go: -------------------------------------------------------------------------------- 1 | package k8s 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | corev1 "k8s.io/api/core/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | "k8s.io/apimachinery/pkg/fields" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | "k8s.io/apimachinery/pkg/watch" 13 | "k8s.io/client-go/kubernetes" 14 | "k8s.io/client-go/tools/cache" 15 | watchtools "k8s.io/client-go/tools/watch" 16 | ) 17 | 18 | //nolint:funlen 19 | func GetServiceAddress( 20 | ctx context.Context, 21 | cli kubernetes.Interface, 22 | namespace string, 23 | name string, 24 | lbTimeout time.Duration, 25 | ) (string, error) { 26 | var result string 27 | 28 | resCli := cli.CoreV1().Services(namespace) 29 | fieldSelector := fields.OneTermEqualSelector(metav1.ObjectNameField, name).String() 30 | 31 | ctx, cancel := context.WithTimeout(ctx, lbTimeout) 32 | defer cancel() 33 | 34 | listWatch := &cache.ListWatch{ 35 | ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { 36 | options.FieldSelector = fieldSelector 37 | 38 | list, err := resCli.List(ctx, options) 39 | if err != nil { 40 | return nil, fmt.Errorf("failed to list services %s/%s: %w", namespace, name, err) 41 | } 42 | 43 | return list, nil 44 | }, 45 | WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { 46 | options.FieldSelector = fieldSelector 47 | 48 | resWatch, err := resCli.Watch(ctx, options) 49 | if err != nil { 50 | return nil, fmt.Errorf("failed to watch services %s/%s: %w", namespace, name, err) 51 | } 52 | 53 | return resWatch, nil 54 | }, 55 | } 56 | 57 | if _, err := watchtools.UntilWithSync(ctx, listWatch, &corev1.Service{}, nil, 58 | func(event watch.Event) (bool, error) { 59 | res, ok := event.Object.(*corev1.Service) 60 | if !ok { 61 | return false, fmt.Errorf("unexpected type while watching service: %s/%s", namespace, name) 62 | } 63 | 64 | if res.Spec.Type == corev1.ServiceTypeClusterIP { 65 | result = res.Name + "." + res.Namespace 66 | 67 | return true, nil 68 | } 69 | 70 | if len(res.Status.LoadBalancer.Ingress) > 0 { 71 | if len(res.Status.LoadBalancer.Ingress[0].Hostname) > 0 { 72 | result = res.Status.LoadBalancer.Ingress[0].Hostname 73 | } else { 74 | result = res.Status.LoadBalancer.Ingress[0].IP 75 | } 76 | 77 | return true, nil 78 | } 79 | 80 | return false, nil 81 | }); err != nil { 82 | return "", fmt.Errorf("failed to get service %s/%s address: %w", namespace, name, err) 83 | } 84 | 85 | return result, nil 86 | } 87 | -------------------------------------------------------------------------------- /k8s/testdata/_kubeconfig_test.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Config 4 | clusters: 5 | - cluster: 6 | certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVsakNDQW40Q0NRQ3FHcVpYdm1WWDNqQU5CZ2txaGtpRzl3MEJBUXNGQURBTk1Rc3dDUVlEVlFRRERBSmoKWVRBZUZ3MHlNVEEwTURJeE1qRXhORGxhRncweU5EQXhNakV4TWpFeE5EbGFNQTB4Q3pBSkJnTlZCQU1NQW1OaApNSUlDSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQWc4QU1JSUNDZ0tDQWdFQTJtTktOWkNXaUZleVJ3VWRMU0FzCkNCSWRWMkpiWEJwRk9hTG9wQUVkSmIxOHVreVV2M1B0MGl2em5udmVRNk9MZm9xKytGVXRtNGV5N1FxSTlHNnUKc0VGblhRZDlZVGczbmo2Nk5BU1p5Qm9mZTJkdGdlcWNIY3BBcEtBWW55UVA4b1h5UUtyUjViN0dTMGI5VWNIbgp2akpvMTBkQnJjQVAvK2xISHVydjQzeFo0MnNWbllyMThZdlBSUzYwQUwzQXVGRG1oWSt1ZHlvOEJhQXNKdENUCjF3ZHVzUXdaVDdEUFhEdTFPMEc0SWpveGpISEtLWGNGSUI3bjc4aGRpSGNSa0o5aUVsaDdsMmFGS0FRSHVaU2MKaFg2ZG0wVEJMa1Q4cnBCbUxUT0tuTmtnMy94MXd1VlVXWS9RNDFnM3lDdGpBRTVuVXpmbi9XdnZGcktFUVpBMgpLVGI3Zyt6eWkzeHlxdDkwb003TFprR1l0dzhQemNWMmFTRzdjSXo5TnJLMHpXMlhyWFc1RHRobWd3K241RTErCkJobG9zWHcvcDlvdHEwVnlXMDJGb3Q0RnZ3cnNqWCs5ejRjZkZGUisxRy9IQ25ua05BWXdZVUo3UWNFMmRqWFMKd09aS1JVMHZHRnl1aElwcG4rT1RTMFFQQVFGdk1yaFdkOTRGcmJHRUY5dno5aXpTdzJ3SjdwZmVqVDhmejFqWQpqS3BtUURnQUEwZ2tLa1ZxS3o3Vzl1c1AvRDEwK3kxOUlKTFRSLzRCNjNQSW9Gek1DSFZQb2lKK0phckRxc0VjCk5XRGNKVjVZQ3RJUW9FeUQ3SXJJRDltV2J3dkFKOCtMTEtxY0pRWGJIenpTWmJYRXZ3SkZnWjl5TUlFbXlSVDQKb3VWMHVuWkxZV0lPWCsrTWhET0tLUXNDQXdFQUFUQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FnRUFJWE1acnVIOApIeUxEdVN1M3dzTTExT29CZUlWb1N1Q3hJZnVDUjBDNHoyWlhJMk84ZmJIMUlOcHl5NmhNN0VkQVc1eXV4QVZBClUyUEVPYmRVTHE5YVI5UFl3dmhWczhLRWh5ZG9NakFGeW83dk5oSWxha3ZST1ZOK2FuS2Jld2plZHBZekhsK3QKK01nZVFPVE9adjZWNC9iZTRjc3Q5OEVFYTN2M2huU0pMN0dKeUdReXh5dVZ6VTZzampxVTFjY3JXMWxTd2VaZgpiS2w4dGszeER2RnJ2QW9XZFR1M0h6a1kvcjE3SmlnQmN2TzZPYkdJcStEekx0YkN1R1hzaS9Ob0VnVlFuZm9PCnpnS2ZKNWkzUjJOK2dBLzVpUUFaSnVNa3c1SWJFdjNsUGozQjRoWW03RlZyamxxcjA1Q2hab1crSFNoWGlROWwKSElWcVNqSnBvMkhCYitZbzV3RDU4b1dFZ2wyMXd1a0FEalc4VkE4S0pNc1krTW5Zb2dRTHhkdzZ0dSsrSzdwVQpmM3FGQjlGNmk5M0srMEE4b2piUmJhTE55dVhNeGtMdE1kaFd4R0ZKekxXWW50Z1JiaWxUV2VvZDNPMzVMeWd6CnJZaURyRWVLOEY0RW9ncHZXL04xbFlXaURrYkY5RU1SVzFuQUJCNUdldDlhQXV0N08vVTQvTTdrcWE4dGlHSUkKVFlXZStBMjBvaGRwejlHekxUTG9ST1h2WTlHRFlGZGpWWms5NUFpR1VzYlVmanZwMmVMMlI3QUdrK1dXTVQyaQpnZUlVNHNaRFR0dGNua21tazBJRFJXRENDOWRuQjg0WEZyK01nTElYcmgxK2tIN2R5LzBIcGFlYVQ5azBRMGlhCk03ZlRyczFiMXhpWDBWaGpWdFFhZDJ5cVBiV3oyV0ozK0xJPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== 7 | server: https://example.internal:6443 8 | name: cluster-1 9 | contexts: 10 | - context: 11 | cluster: cluster-1 12 | namespace: namespace1 13 | user: user-1 14 | name: context-1 15 | - context: 16 | cluster: cluster-1 17 | namespace: namespace2 18 | user: user-1 19 | name: context-2 20 | current-context: context-1 21 | users: 22 | - name: user-1 23 | user: 24 | client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURtakNDQVlJQ0NRQy94VFJiWHR5cUFEQU5CZ2txaGtpRzl3MEJBUXNGQURBTk1Rc3dDUVlEVlFRRERBSmoKWVRBZUZ3MHlNVEEwTURJeE1qRXlOVE5hRncweU1qQTRNVFV4TWpFeU5UTmFNQkV4RHpBTkJnTlZCQU1NQm1OcwphV1Z1ZERDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBS05oc1lEbWRwWFJITDhhCjRFTUUxdTRGUXVnNW5hZ3k1UXJZWEFNTWtrTWNpVk5iaHNidXhQZk04NmpUSGQrSVNNMmIvQkQxdVZrWE55N2EKTnRQeDNMRUFRSEE0aEhTTVYrd2FvdzRXanYzZm50c2QyeCtYZUxlbHIzVGo2LzlpOElWdDE0V0FqNkxNbVJaWgovTHp5V2VrK2ZjZUNoWUNhVkc0ek9yZEx4b0F2MHFYMG1tM1laYkkxYlFIS0NQNUZJeWN4MjgvbVZGdWlCRWxQClBQckxXeS9ONHhmUkhMekJPTEFiTCtFNG5XRTdPc211U3ZWclVnTFpnMzF4ZGtURlp1MlNUREoxWGxXdVo4b0cKWFRKeVhIeW92YTFjTUwyS2c2Z0RQb1FLRmNMZVJseHMvUVNmWno5QVF4K1JudFhiQXhiWGRmdUg5d3BNVVpaVgpmN2VyYi84Q0F3RUFBVEFOQmdrcWhraUc5dzBCQVFzRkFBT0NBZ0VBRFdGL2ZkM2Q5VnltOXBGTVVWWGVIRUh6Cklick9sVXVHdEoyOVRhOGw0Yk0vSi9FNmxucEFQeFJ3R3M0Tm13c3JpQm9tT3VIR0hQcUsyeVVnZ3FXTFNsTVAKV3FIa01yaUpnb05yZlZRMFlZQjlTZ3prRzRLTUc3Q09KdjhpT095WlFudldwZEpTazgrY3FDbnNLR01aUElJbgpBUjRpSDZ2b2lZQysvbG1aL1FVOHdlNVUvSXZhclhtZDZVZEN2K3BtZy9qc2I2eGh2dkdMSlVZQ0dKMzdFMUhZCjBDZlV5bzU4OHMvMCtQdmZDR0x5TkNPejViQ2RueFVDbGZ5YW1QR1RIUXkyOXo1dXVzNGk5MjF3Mm9ORTNNdmcKbmNKNUZBMWlwT3NvOHFqOGQ2V0RFU3RWbExmQzBDaFlqNDJwb0J3cWMwakJ4OVhQcDltREo4V2lnWU80NzJvZApDSHZmdjM0YW9sUzZIRGNKUmFpTS9EaTJsczZyTzhldmRXdVFJWk1GNDdLNnZKZGgzTGE1S29NQ0dWN0xtU0hWClpvRjV3Q2lVNHpyRzBwV3JZcFNySFpHQ25aZVF1OHlzVWNRby9yemVsMlBQUDVsQm1va01XcERoc1lmUmF2ajUKUWJmSk84SHlteU1HTFdvOEVYcjJLSkVaZjl1R3Mzems4QlBkbFRaZjBWVlF0NHgvc0RFZ0ZBVzNNR0RZb2NybAowNkdBM1U4Um1paWE0RC9WVGR3VnozdVR3RGVnVkFOb0dyTFU4R2hQKzAxSGNMb0p6Y3krRkR1R2dEUWk2RUJNCjM5VVY4aUs2djdtcnZ5TWoycU03cjIwYjFqQWZORlk2KzBJRms4VFNINTlvaHEwUnl6azZidnRVUGx1UDE4WXoKdStZOUJQTGM3cmRSbTdoSXdpYz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= 25 | client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBbzJHeGdPWjJsZEVjdnhyZ1F3VFc3Z1ZDNkRtZHFETGxDdGhjQXd5U1F4eUpVMXVHCnh1N0U5OHp6cU5NZDM0aEl6WnY4RVBXNVdSYzNMdG8yMC9IY3NRQkFjRGlFZEl4WDdCcWpEaGFPL2QrZTJ4M2IKSDVkNHQ2V3ZkT1ByLzJMd2hXM1hoWUNQb3N5WkZsbjh2UEpaNlQ1OXg0S0ZnSnBVYmpNNnQwdkdnQy9TcGZTYQpiZGhsc2pWdEFjb0kva1VqSnpIYnorWlVXNklFU1U4OCtzdGJMODNqRjlFY3ZNRTRzQnN2NFRpZFlUczZ5YTVLCjlXdFNBdG1EZlhGMlJNVm03WkpNTW5WZVZhNW55Z1pkTW5KY2ZLaTlyVnd3dllxRHFBTStoQW9Wd3Q1R1hHejkKQko5blAwQkRINUdlMWRzREZ0ZDErNGYzQ2t4UmxsVi90NnR2L3dJREFRQUJBb0lCQUhXVXNFZEJGT1RiR3FJYQpPSDFpOVQ5VlFBS2F2SEM1T0FjbkVJa05PaEhjYnFRMEFYb1hpbDkvKzNkcmdGdEgzSVZFbGhLcWxScWR6NVFECmJXUDQ4MGZiRTRUdjE3dDg2U1c5UWMyNHhHM0d5cStNL0E3dlZ1eHI3Q3JtZ1FoSjd5bEhsTXk2YWlUU3MvWFgKQzBINzlHTWRHeTVESzhzU3htS01zZHdLUjdBbUZEb0UwQ0Q3VjRocEplcGorQXdzTTFjNnhPNGoxcXNuMWpPMQpsczE2K05mVzU3VWJUbjhDM3lISVNQS0xnNjVxak5VZG5WS29ldDUwd2lKeEx5K1hldnZjRTVodXBnT0JtWVcxCjE1cWgvUFVTK3UvenFYTFJMZ0NXdG5XYjlqTVRXRDhxUEFSdmNIL1FMWlBSSDlwM0VOK1l1dzFyby8vODBkSisKTGFXaTljRUNnWUVBMEg3djJ5YWJONXNnY1NrUjhPeXhSU1kvNFFNRUNZRTRiVzRBQ3Jia29LV3RwRjlJaUNTcwpZcjRURXJsQ1N2TDJtcGlkOGgvbkhZRy9PTEpUcFpzOXhiOVdmc0h3elN0SDIvSVZ4NGlLdWJTQjhLb3RjSmR4CklVTUVjWitqK1Q1dHY4eTY5MVB0RUVtOSt0Ylc0QytRUVo2WGg0Z3dvcjlBK2E4K0Ria01LWjhDZ1lFQXlKdFoKODRXQ1VPT0tzREp3eFYxK2h5VjlYUkNiZDNnbXJlNjV2dXpUTnRIaDJab0tVbnQ0UG9kK1ZQTHJjQ2l1MmRqWgpmZU5KcHk2bTF6UHc0WGUvamRiMGVyWmR4RjFpdEFqTEF4TGluZzh5SGgxQ2ZUaldlTlFrRnJsakZ0UXZ3KzRlCmtpVmY1SGhVbEJ3ZTBXajl6SmswS24zVnF6Y1h4aUljNjgrSjNhRUNnWUFkWGZQTTYzZ3JjZWMwNlRNRXUySVEKdWwxL0JSeTVkZ0VMc05YQ3JKWXhab1M1eFU0REZ4OU0zVGIwaVRHa091cXZSZFhoZDhybTVoS1lHczRiWW0xbQpBQmhmekZOdG9xNEJkZXpKYVJzWkl5cTJTbElTTE5LYmU4UVJlUW53bm5GNDlZNVVMZmNUTnovU0tCYW1CeTBMCmdSb1kxZjc3dlhLYWVLdUY1VTJvUXdLQmdRQ052Y2FtM0RTaHRzU3RqbFpEL2JXREJDZzU4NXdGbysyZ2c4YlcKY2pzaXh0VlBqUm15QWUrS0x1NUNVNG1ySy9PdmxSckhhaXNHWjE0WFBHbjh4VUdobExvVllubUtYM2w0UDZaQgp1N3hIZGZFeGVmY2F4Mmd5NDZIRU5LVWc0amdGdGwvL1gwMEwvMEd6ak5MeG43K0VLdFBER2pvNHlhcVFvbFhSCkFFa25RUUtCZ0Z4S0I0MUFYbXphekdMRnhGZ3NJcDRNVXAxNndKdzRaeFA4ZUdFZmVsRFI0MVdyYVpFT1V6Sk8KT3pmRHJCc0c5K204SmtzZmE1b1BMRVk4aDlHczYwb01tU3JDb2d3YnV5YU11Q1dGdXZUZmwyVWhQbU5ZTTU5Qgp1WXJVdm5sOGlIN2hNbUR0MVJkSkNPWFZSeWIrUUcvYW5hY2Y0ZFpOTndmYTA3dDBvcjFGCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== 26 | -------------------------------------------------------------------------------- /migration/types.go: -------------------------------------------------------------------------------- 1 | package migration 2 | 3 | import ( 4 | "time" 5 | 6 | "helm.sh/helm/v3/pkg/chart" 7 | 8 | "github.com/utkuozdemir/pv-migrate/pvc" 9 | ) 10 | 11 | type PVCInfo struct { 12 | KubeconfigPath string 13 | Context string 14 | Namespace string 15 | Name string 16 | Path string 17 | } 18 | 19 | type Request struct { 20 | Source *PVCInfo 21 | Dest *PVCInfo 22 | DeleteExtraneousFiles bool 23 | IgnoreMounted bool 24 | NoChown bool 25 | SkipCleanup bool 26 | NoProgressBar bool 27 | SourceMountReadOnly bool 28 | KeyAlgorithm string 29 | HelmTimeout time.Duration 30 | HelmValuesFiles []string 31 | HelmValues []string 32 | HelmFileValues []string 33 | HelmStringValues []string 34 | Strategies []string 35 | DestHostOverride string 36 | LBSvcTimeout time.Duration 37 | Compress bool 38 | } 39 | 40 | type Migration struct { 41 | Chart *chart.Chart 42 | Request *Request 43 | SourceInfo *pvc.Info 44 | DestInfo *pvc.Info 45 | } 46 | 47 | type Attempt struct { 48 | ID string 49 | HelmReleaseNamePrefix string 50 | Migration *Migration 51 | } 52 | -------------------------------------------------------------------------------- /migrator/migrator.go: -------------------------------------------------------------------------------- 1 | package migrator 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "log/slog" 8 | "strings" 9 | 10 | "github.com/utkuozdemir/pv-migrate/helm" 11 | "github.com/utkuozdemir/pv-migrate/k8s" 12 | "github.com/utkuozdemir/pv-migrate/migration" 13 | "github.com/utkuozdemir/pv-migrate/pvc" 14 | "github.com/utkuozdemir/pv-migrate/strategy" 15 | "github.com/utkuozdemir/pv-migrate/util" 16 | ) 17 | 18 | const ( 19 | attemptIDLength = 5 20 | ) 21 | 22 | type ( 23 | strategyMapGetter func(names []string) (map[string]strategy.Strategy, error) 24 | clusterClientGetter func(kubeconfigPath, context string, logger *slog.Logger) (*k8s.ClusterClient, error) 25 | ) 26 | 27 | type Migrator struct { 28 | getKubeClient clusterClientGetter 29 | getStrategyMap strategyMapGetter 30 | } 31 | 32 | // New creates a new migrator. 33 | func New() *Migrator { 34 | return &Migrator{ 35 | getKubeClient: k8s.GetClusterClient, 36 | getStrategyMap: strategy.GetStrategiesMapForNames, 37 | } 38 | } 39 | 40 | func (m *Migrator) Run(ctx context.Context, request *migration.Request, logger *slog.Logger) error { 41 | nameToStrategyMap, err := m.getStrategyMap(request.Strategies) 42 | if err != nil { 43 | return err 44 | } 45 | 46 | logger = logger.With("source", request.Source.Namespace+"/"+request.Source.Name, 47 | "dest", request.Dest.Namespace+"/"+request.Dest.Name) 48 | 49 | mig, err := m.buildMigration(ctx, request, logger) 50 | if err != nil { 51 | return err 52 | } 53 | 54 | logger.Info("💭 Attempting migration", "strategies", strings.Join(request.Strategies, ",")) 55 | 56 | for _, name := range request.Strategies { 57 | attemptID := util.RandomHexadecimalString(attemptIDLength) 58 | 59 | attemptLogger := logger.With("attempt_id", attemptID, "strategy", name) 60 | 61 | attemptLogger.Info("🚁 Attempt using strategy") 62 | 63 | attempt := migration.Attempt{ 64 | ID: attemptID, 65 | HelmReleaseNamePrefix: "pv-migrate-" + attemptID, 66 | Migration: mig, 67 | } 68 | 69 | s := nameToStrategyMap[name] 70 | 71 | if runErr := s.Run(ctx, &attempt, attemptLogger); runErr != nil { 72 | if errors.Is(runErr, strategy.ErrUnaccepted) { 73 | attemptLogger.Info( 74 | "🦊 This strategy cannot handle this migration, will try the next one", 75 | ) 76 | 77 | continue 78 | } 79 | 80 | attemptLogger.Warn("🔶 Migration failed with this strategy, "+ 81 | "will try with the remaining strategies", "error", runErr) 82 | 83 | continue 84 | } 85 | 86 | attemptLogger.Info("✅ Migration succeeded") 87 | 88 | return nil 89 | } 90 | 91 | return errors.New("all strategies failed for this migration") 92 | } 93 | 94 | func (m *Migrator) buildMigration(ctx context.Context, request *migration.Request, 95 | logger *slog.Logger, 96 | ) (*migration.Migration, error) { 97 | chart, err := helm.LoadChart() 98 | if err != nil { 99 | return nil, fmt.Errorf("failed to load helm chart: %w", err) 100 | } 101 | 102 | source := request.Source 103 | dest := request.Dest 104 | 105 | sourceClient, destClient, err := m.getClusterClients(request, logger) 106 | if err != nil { 107 | return nil, err 108 | } 109 | 110 | sourceNs := source.Namespace 111 | if sourceNs == "" { 112 | sourceNs = sourceClient.NsInContext 113 | } 114 | 115 | destNs := dest.Namespace 116 | if destNs == "" { 117 | destNs = destClient.NsInContext 118 | } 119 | 120 | sourcePvcInfo, err := pvc.New(ctx, sourceClient, sourceNs, source.Name) 121 | if err != nil { 122 | return nil, fmt.Errorf("failed to get PVC info for source PVC: %w", err) 123 | } 124 | 125 | destPvcInfo, err := pvc.New(ctx, destClient, destNs, dest.Name) 126 | if err != nil { 127 | return nil, fmt.Errorf("failed to get PVC info for destination PVC: %w", err) 128 | } 129 | 130 | err = handleMountedPVCs(request, sourcePvcInfo, destPvcInfo, logger) 131 | if err != nil { 132 | return nil, err 133 | } 134 | 135 | if !destPvcInfo.SupportsRWO && !destPvcInfo.SupportsRWX { 136 | return nil, errors.New("destination PVC is not writable") 137 | } 138 | 139 | mig := migration.Migration{ 140 | Chart: chart, 141 | Request: request, 142 | SourceInfo: sourcePvcInfo, 143 | DestInfo: destPvcInfo, 144 | } 145 | 146 | return &mig, nil 147 | } 148 | 149 | func (m *Migrator) getClusterClients(r *migration.Request, 150 | logger *slog.Logger, 151 | ) (*k8s.ClusterClient, *k8s.ClusterClient, error) { 152 | source := r.Source 153 | dest := r.Dest 154 | 155 | sourceClient, err := m.getKubeClient(source.KubeconfigPath, source.Context, logger) 156 | if err != nil { 157 | return nil, nil, err 158 | } 159 | 160 | destClient := sourceClient 161 | if source.KubeconfigPath != dest.KubeconfigPath || source.Context != dest.Context { 162 | destClient, err = m.getKubeClient(dest.KubeconfigPath, dest.Context, logger) 163 | if err != nil { 164 | return nil, nil, err 165 | } 166 | } 167 | 168 | return sourceClient, destClient, nil 169 | } 170 | 171 | func handleMountedPVCs( 172 | r *migration.Request, 173 | sourcePvcInfo, destPvcInfo *pvc.Info, 174 | logger *slog.Logger, 175 | ) error { 176 | ignoreMounted := r.IgnoreMounted 177 | 178 | err := handleMounted(sourcePvcInfo, ignoreMounted, logger) 179 | if err != nil { 180 | return err 181 | } 182 | 183 | err = handleMounted(destPvcInfo, ignoreMounted, logger) 184 | if err != nil { 185 | return err 186 | } 187 | 188 | return nil 189 | } 190 | 191 | func handleMounted(info *pvc.Info, ignoreMounted bool, logger *slog.Logger) error { 192 | if info.MountedNode == "" { 193 | return nil 194 | } 195 | 196 | if ignoreMounted { 197 | logger.Info("💡 PVC is mounted to a node, but --ignore-mounted is requested, ignoring...", 198 | "pvc", info.Claim.Namespace+"/"+info.Claim.Name, "mounted_node", info.MountedNode) 199 | 200 | return nil 201 | } 202 | 203 | return fmt.Errorf("PVC is mounted to a node and --ignore-mounted is not requested: "+ 204 | "node: %s claim %s", info.MountedNode, info.Claim.Name) 205 | } 206 | -------------------------------------------------------------------------------- /migrator/migrator_test.go: -------------------------------------------------------------------------------- 1 | package migrator 2 | 3 | import ( 4 | "context" 5 | "log/slog" 6 | "testing" 7 | 8 | "github.com/neilotoole/slogt" 9 | "github.com/stretchr/testify/assert" 10 | "github.com/stretchr/testify/require" 11 | corev1 "k8s.io/api/core/v1" 12 | "k8s.io/apimachinery/pkg/api/resource" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | "k8s.io/client-go/kubernetes/fake" 15 | 16 | "github.com/utkuozdemir/pv-migrate/k8s" 17 | "github.com/utkuozdemir/pv-migrate/migration" 18 | "github.com/utkuozdemir/pv-migrate/strategy" 19 | ) 20 | 21 | const ( 22 | sourceNS = "namespace1" 23 | destNS = "namespace2" 24 | sourcePVC = "pvc1" 25 | destPVC = "pvc2" 26 | sourcePod = "pod1" 27 | destPod = "pod2" 28 | sourceNode = "node1" 29 | destNode = "node2" 30 | ) 31 | 32 | func TestBuildTask(t *testing.T) { 33 | t.Parallel() 34 | 35 | ctx := t.Context() 36 | 37 | logger := slogt.New(t) 38 | 39 | m := Migrator{getKubeClient: fakeClusterClientGetter()} 40 | mig := buildMigration(true) 41 | tsk, err := m.buildMigration(ctx, mig, logger) 42 | require.NoError(t, err) 43 | 44 | sourceInfo := tsk.SourceInfo 45 | destInfo := tsk.DestInfo 46 | 47 | assert.Equal(t, "namespace1", sourceInfo.Claim.Namespace) 48 | assert.Equal(t, "pvc1", sourceInfo.Claim.Name) 49 | assert.Equal(t, "node1", sourceInfo.MountedNode) 50 | assert.False(t, sourceInfo.SupportsRWO) 51 | assert.True(t, sourceInfo.SupportsROX) 52 | assert.False(t, sourceInfo.SupportsRWX) 53 | assert.Equal(t, "namespace2", destInfo.Claim.Namespace) 54 | assert.Equal(t, "pvc2", destInfo.Claim.Name) 55 | assert.Equal(t, "node2", destInfo.MountedNode) 56 | assert.True(t, destInfo.SupportsRWO) 57 | assert.False(t, destInfo.SupportsROX) 58 | assert.True(t, destInfo.SupportsRWX) 59 | } 60 | 61 | func TestBuildTaskMounted(t *testing.T) { 62 | t.Parallel() 63 | 64 | ctx := t.Context() 65 | logger := slogt.New(t) 66 | 67 | m := Migrator{getKubeClient: fakeClusterClientGetter()} 68 | mig := buildMigration(false) 69 | tsk, err := m.buildMigration(ctx, mig, logger) 70 | assert.Nil(t, tsk) 71 | require.Error(t, err) 72 | } 73 | 74 | func TestRunStrategiesInOrder(t *testing.T) { 75 | t.Parallel() 76 | 77 | ctx := t.Context() 78 | 79 | logger := slogt.New(t) 80 | 81 | var result []int 82 | 83 | str1 := mockStrategy{ 84 | runFunc: func(_ context.Context, _ *migration.Attempt) error { 85 | result = append(result, 1) 86 | 87 | return strategy.ErrUnaccepted 88 | }, 89 | } 90 | 91 | str2 := mockStrategy{ 92 | runFunc: func(_ context.Context, _ *migration.Attempt) error { 93 | result = append(result, 2) 94 | 95 | return nil 96 | }, 97 | } 98 | 99 | str3 := mockStrategy{ 100 | runFunc: func(_ context.Context, _ *migration.Attempt) error { 101 | result = append(result, 3) 102 | 103 | return strategy.ErrUnaccepted 104 | }, 105 | } 106 | 107 | migrator := Migrator{ 108 | getKubeClient: fakeClusterClientGetter(), 109 | getStrategyMap: func([]string) (map[string]strategy.Strategy, error) { 110 | return map[string]strategy.Strategy{ 111 | "str1": &str1, 112 | "str2": &str2, 113 | "str3": &str3, 114 | }, nil 115 | }, 116 | } 117 | 118 | strs := []string{"str3", "str1", "str2"} 119 | mig := buildMigrationRequestWithStrategies(strs, true) 120 | 121 | err := migrator.Run(ctx, mig, logger) 122 | require.NoError(t, err) 123 | assert.Equal(t, []int{3, 1, 2}, result) 124 | } 125 | 126 | func buildMigration(ignoreMounted bool) *migration.Request { 127 | return buildMigrationRequestWithStrategies(strategy.DefaultStrategies, ignoreMounted) 128 | } 129 | 130 | func buildMigrationRequestWithStrategies( 131 | strategies []string, 132 | ignoreMounted bool, 133 | ) *migration.Request { 134 | return &migration.Request{ 135 | Source: &migration.PVCInfo{ 136 | Namespace: sourceNS, 137 | Name: sourcePVC, 138 | }, 139 | Dest: &migration.PVCInfo{ 140 | Namespace: destNS, 141 | Name: destPVC, 142 | }, 143 | IgnoreMounted: ignoreMounted, 144 | Strategies: strategies, 145 | } 146 | } 147 | 148 | func fakeClusterClientGetter() clusterClientGetter { 149 | pvcA := buildTestPVC(sourceNS, sourcePVC, corev1.ReadOnlyMany) 150 | pvcB := buildTestPVC(destNS, destPVC, corev1.ReadWriteOnce, corev1.ReadWriteMany) 151 | podA := buildTestPod(sourceNS, sourcePod, sourceNode, sourcePVC) 152 | podB := buildTestPod(destNS, destPod, destNode, destPVC) 153 | 154 | return func(string, string, *slog.Logger) (*k8s.ClusterClient, error) { 155 | return &k8s.ClusterClient{ 156 | KubeClient: fake.NewSimpleClientset(pvcA, pvcB, podA, podB), 157 | }, nil 158 | } 159 | } 160 | 161 | func buildTestPod(namespace string, name string, node string, pvc string) *corev1.Pod { 162 | return &corev1.Pod{ 163 | ObjectMeta: metav1.ObjectMeta{ 164 | Namespace: namespace, 165 | Name: name, 166 | }, 167 | Spec: corev1.PodSpec{ 168 | NodeName: node, 169 | Volumes: []corev1.Volume{ 170 | {Name: "a", VolumeSource: corev1.VolumeSource{ 171 | PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ 172 | ClaimName: pvc, 173 | }, 174 | }}, 175 | }, 176 | }, 177 | } 178 | } 179 | 180 | func buildTestPVC(namespace string, name string, 181 | accessModes ...corev1.PersistentVolumeAccessMode, 182 | ) *corev1.PersistentVolumeClaim { 183 | return &corev1.PersistentVolumeClaim{ 184 | ObjectMeta: metav1.ObjectMeta{ 185 | Namespace: namespace, 186 | Name: name, 187 | }, 188 | Spec: corev1.PersistentVolumeClaimSpec{ 189 | AccessModes: accessModes, 190 | Resources: corev1.VolumeResourceRequirements{ 191 | Requests: map[corev1.ResourceName]resource.Quantity{ 192 | "storage": resource.MustParse("512Mi"), 193 | }, 194 | }, 195 | }, 196 | } 197 | } 198 | 199 | type mockStrategy struct { 200 | runFunc func(context.Context, *migration.Attempt) error 201 | } 202 | 203 | func (m *mockStrategy) Run(ctx context.Context, attempt *migration.Attempt, _ *slog.Logger) error { 204 | return m.runFunc(ctx, attempt) 205 | } 206 | -------------------------------------------------------------------------------- /pvc/info.go: -------------------------------------------------------------------------------- 1 | package pvc 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | corev1 "k8s.io/api/core/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | "k8s.io/client-go/kubernetes" 10 | 11 | "github.com/utkuozdemir/pv-migrate/k8s" 12 | ) 13 | 14 | type Info struct { 15 | ClusterClient *k8s.ClusterClient 16 | Claim *corev1.PersistentVolumeClaim 17 | MountedNode string 18 | AffinityHelmValues map[string]any 19 | SupportsRWO bool 20 | SupportsROX bool 21 | SupportsRWX bool 22 | } 23 | 24 | // 25 | //nolint:cyclop 26 | func New( 27 | ctx context.Context, 28 | client *k8s.ClusterClient, 29 | namespace string, 30 | name string, 31 | ) (*Info, error) { 32 | kubeClient := client.KubeClient 33 | 34 | claim, err := kubeClient.CoreV1().PersistentVolumeClaims(namespace). 35 | Get(ctx, name, metav1.GetOptions{}) 36 | if err != nil { 37 | return nil, fmt.Errorf("failed to get pvc %s/%s: %w", namespace, name, err) 38 | } 39 | 40 | supportsRWO := false 41 | supportsROX := false 42 | supportsRWX := false 43 | 44 | readWriteOncePod := false 45 | 46 | for _, accessMode := range claim.Spec.AccessModes { 47 | switch accessMode { 48 | case corev1.ReadWriteOncePod: 49 | supportsRWO = true 50 | readWriteOncePod = true 51 | case corev1.ReadWriteOnce: 52 | supportsRWO = true 53 | case corev1.ReadOnlyMany: 54 | supportsROX = true 55 | case corev1.ReadWriteMany: 56 | supportsRWX = true 57 | } 58 | } 59 | 60 | mountedNode, err := findMountedNode(ctx, kubeClient, claim) 61 | if err != nil { 62 | return nil, err 63 | } 64 | 65 | if readWriteOncePod && mountedNode != "" { 66 | return nil, fmt.Errorf("pvc %s/%s is mounted to a pod and has ReadWriteOncePod "+ 67 | "access mode, it cannot be mounted to the migration pod", namespace, name) 68 | } 69 | 70 | required := !supportsRWX && !supportsROX 71 | 72 | affinityHelmValues := buildAffinityHelmValues(mountedNode, required) 73 | 74 | return &Info{ 75 | ClusterClient: client, 76 | Claim: claim, 77 | MountedNode: mountedNode, 78 | AffinityHelmValues: affinityHelmValues, 79 | SupportsRWO: supportsRWO, 80 | SupportsROX: supportsROX, 81 | SupportsRWX: supportsRWX, 82 | }, nil 83 | } 84 | 85 | func findMountedNode(ctx context.Context, kubeClient kubernetes.Interface, 86 | pvc *corev1.PersistentVolumeClaim, 87 | ) (string, error) { 88 | podList, err := kubeClient.CoreV1().Pods(pvc.Namespace).List(ctx, metav1.ListOptions{}) 89 | if err != nil { 90 | return "", fmt.Errorf("failed to list pods: %w", err) 91 | } 92 | 93 | for _, pod := range podList.Items { 94 | for _, volume := range pod.Spec.Volumes { 95 | persistentVolumeClaim := volume.PersistentVolumeClaim 96 | if persistentVolumeClaim != nil && persistentVolumeClaim.ClaimName == pvc.Name { 97 | return pod.Spec.NodeName, nil 98 | } 99 | } 100 | } 101 | 102 | return "", nil 103 | } 104 | 105 | func buildAffinityHelmValues(nodeName string, required bool) map[string]any { 106 | if nodeName == "" { 107 | return nil 108 | } 109 | 110 | terms := map[string]any{ 111 | "matchFields": []map[string]any{ 112 | { 113 | "key": "metadata.name", 114 | "operator": "In", 115 | "values": []string{nodeName}, 116 | }, 117 | }, 118 | } 119 | 120 | if required { 121 | return map[string]any{ 122 | "nodeAffinity": map[string]any{ 123 | "requiredDuringSchedulingIgnoredDuringExecution": map[string]any{ 124 | "nodeSelectorTerms": []map[string]any{terms}, 125 | }, 126 | }, 127 | } 128 | } 129 | 130 | return map[string]any{ 131 | "nodeAffinity": map[string]any{ 132 | "preferredDuringSchedulingIgnoredDuringExecution": []map[string]any{ 133 | { 134 | "weight": 100, //nolint:mnd 135 | "preference": terms, 136 | }, 137 | }, 138 | }, 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /pvc/info_test.go: -------------------------------------------------------------------------------- 1 | // This Source Code Form is subject to the terms of the Mozilla Public 2 | // License, v. 2.0. If a copy of the MPL was not distributed with this 3 | // file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | //nolint:funlen 6 | package pvc_test 7 | 8 | import ( 9 | "testing" 10 | 11 | "github.com/stretchr/testify/assert" 12 | "github.com/stretchr/testify/require" 13 | corev1 "k8s.io/api/core/v1" 14 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 15 | "k8s.io/apimachinery/pkg/runtime" 16 | "k8s.io/client-go/kubernetes/fake" 17 | 18 | "github.com/utkuozdemir/pv-migrate/k8s" 19 | "github.com/utkuozdemir/pv-migrate/pvc" 20 | ) 21 | 22 | func TestNew(t *testing.T) { 23 | t.Parallel() 24 | 25 | t.Run("should have required affinity when only RWO is supported", func(t *testing.T) { 26 | t.Parallel() 27 | 28 | ctx := t.Context() 29 | clusterClient := buildClusterClient("node-2", corev1.ReadWriteOnce) 30 | 31 | pvcInfo, err := pvc.New(ctx, clusterClient, "testns", "test") 32 | require.NoError(t, err) 33 | 34 | assert.Equal(t, clusterClient, pvcInfo.ClusterClient) 35 | assert.Equal(t, "test", pvcInfo.Claim.Name) 36 | assert.Equal(t, "testns", pvcInfo.Claim.Namespace) 37 | assert.Equal(t, "node-2", pvcInfo.MountedNode) 38 | assert.True(t, pvcInfo.SupportsRWO) 39 | assert.False(t, pvcInfo.SupportsROX) 40 | assert.False(t, pvcInfo.SupportsRWX) 41 | assert.Equal(t, map[string]any{ 42 | "nodeAffinity": map[string]any{ 43 | "requiredDuringSchedulingIgnoredDuringExecution": map[string]any{ 44 | "nodeSelectorTerms": []map[string]any{ 45 | { 46 | "matchFields": []map[string]any{ 47 | { 48 | "key": "metadata.name", 49 | "operator": "In", 50 | "values": []string{"node-2"}, 51 | }, 52 | }, 53 | }, 54 | }, 55 | }, 56 | }, 57 | }, pvcInfo.AffinityHelmValues) 58 | }) 59 | 60 | t.Run("should have preferred affinity if it supports ROX", func(t *testing.T) { 61 | t.Parallel() 62 | 63 | ctx := t.Context() 64 | clusterClient := buildClusterClient("node-2", corev1.ReadWriteOnce, corev1.ReadOnlyMany) 65 | 66 | pvcInfo, err := pvc.New(ctx, clusterClient, "testns", "test") 67 | require.NoError(t, err) 68 | 69 | assert.Equal(t, clusterClient, pvcInfo.ClusterClient) 70 | assert.Equal(t, "test", pvcInfo.Claim.Name) 71 | assert.Equal(t, "testns", pvcInfo.Claim.Namespace) 72 | assert.Equal(t, "node-2", pvcInfo.MountedNode) 73 | assert.True(t, pvcInfo.SupportsRWO) 74 | assert.True(t, pvcInfo.SupportsROX) 75 | assert.False(t, pvcInfo.SupportsRWX) 76 | assert.Equal(t, map[string]any{ 77 | "nodeAffinity": map[string]any{ 78 | "preferredDuringSchedulingIgnoredDuringExecution": []map[string]any{ 79 | { 80 | "weight": 100, 81 | "preference": map[string]any{ 82 | "matchFields": []map[string]any{ 83 | { 84 | "key": "metadata.name", 85 | "operator": "In", 86 | "values": []string{"node-2"}, 87 | }, 88 | }, 89 | }, 90 | }, 91 | }, 92 | }, 93 | }, pvcInfo.AffinityHelmValues) 94 | }) 95 | 96 | t.Run("ReadWriteOncePod with mounting pod is not supported", func(t *testing.T) { 97 | t.Parallel() 98 | 99 | ctx := t.Context() 100 | clusterClient := buildClusterClient("node-2", corev1.ReadWriteOncePod) 101 | 102 | _, err := pvc.New(ctx, clusterClient, "testns", "test") 103 | require.ErrorContains(t, err, "ReadWriteOncePod") 104 | }) 105 | 106 | t.Run("ReadWriteOncePod with no mounting pod is supported", func(t *testing.T) { 107 | t.Parallel() 108 | 109 | ctx := t.Context() 110 | clusterClient := buildClusterClient("", corev1.ReadWriteOncePod) 111 | 112 | pvcInfo, err := pvc.New(ctx, clusterClient, "testns", "test") 113 | require.NoError(t, err) 114 | 115 | assert.Empty(t, pvcInfo.MountedNode) 116 | }) 117 | } 118 | 119 | func buildClusterClient( 120 | mountingNode string, 121 | pvcAccessModes ...corev1.PersistentVolumeAccessMode, 122 | ) *k8s.ClusterClient { 123 | testPVC := &corev1.PersistentVolumeClaim{ 124 | ObjectMeta: metav1.ObjectMeta{ 125 | Name: "test", 126 | Namespace: "testns", 127 | }, 128 | Spec: corev1.PersistentVolumeClaimSpec{ 129 | AccessModes: pvcAccessModes, 130 | }, 131 | } 132 | 133 | pod1 := &corev1.Pod{ 134 | ObjectMeta: metav1.ObjectMeta{ 135 | Name: "pod1", 136 | Namespace: "testns", 137 | }, 138 | Spec: corev1.PodSpec{ 139 | NodeName: "node-1", 140 | Volumes: []corev1.Volume{ 141 | { 142 | Name: "something", 143 | VolumeSource: corev1.VolumeSource{ 144 | PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ 145 | ClaimName: "something", 146 | }, 147 | }, 148 | }, 149 | }, 150 | }, 151 | } 152 | 153 | pod2 := &corev1.Pod{ 154 | ObjectMeta: metav1.ObjectMeta{ 155 | Name: "pod2", 156 | Namespace: "testns", 157 | }, 158 | Spec: corev1.PodSpec{ 159 | NodeName: mountingNode, 160 | Volumes: []corev1.Volume{ 161 | { 162 | Name: "something-else", 163 | VolumeSource: corev1.VolumeSource{ 164 | PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ 165 | ClaimName: "something-else", 166 | }, 167 | }, 168 | }, 169 | }, 170 | }, 171 | } 172 | 173 | if mountingNode != "" { 174 | pod2.Spec.Volumes = append(pod2.Spec.Volumes, corev1.Volume{ 175 | Name: "test", 176 | VolumeSource: corev1.VolumeSource{ 177 | PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ 178 | ClaimName: "test", 179 | }, 180 | }, 181 | }) 182 | } 183 | 184 | objects := []runtime.Object{ 185 | testPVC, 186 | pod1, 187 | pod2, 188 | } 189 | kubeClient := fake.NewSimpleClientset(objects...) 190 | 191 | return &k8s.ClusterClient{ 192 | KubeClient: kubeClient, 193 | } 194 | } 195 | -------------------------------------------------------------------------------- /rsync/cmd.go: -------------------------------------------------------------------------------- 1 | package rsync 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "strconv" 7 | "strings" 8 | ) 9 | 10 | type Cmd struct { 11 | Port int 12 | NoChown bool 13 | Delete bool 14 | SrcUseSSH bool 15 | DestUseSSH bool 16 | Command string 17 | SrcSSHUser string 18 | SrcSSHHost string 19 | SrcPath string 20 | DestSSHUser string 21 | DestSSHHost string 22 | DestPath string 23 | Compress bool 24 | } 25 | 26 | func (c *Cmd) Build() (string, error) { 27 | if c.SrcUseSSH && c.DestUseSSH { 28 | return "", errors.New("cannot use ssh on both source and destination") 29 | } 30 | 31 | cmd := "rsync" 32 | if c.Command != "" { 33 | cmd = c.Command 34 | } 35 | 36 | sshArgs := []string{ 37 | "ssh", "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=/dev/null", 38 | "-o", "ConnectTimeout=5", 39 | } 40 | if c.Port != 0 { 41 | sshArgs = append(sshArgs, "-p", strconv.Itoa(c.Port)) 42 | } 43 | 44 | sshArgsStr := fmt.Sprintf("\"%s\"", strings.Join(sshArgs, " ")) 45 | 46 | rsyncArgs := []string{ 47 | "-av", "--info=progress2,misc0,flist0", 48 | "--no-inc-recursive", "-e", sshArgsStr, 49 | } 50 | 51 | if c.Compress { 52 | rsyncArgs = append(rsyncArgs, "-z") 53 | } 54 | 55 | if c.NoChown { 56 | rsyncArgs = append(rsyncArgs, "--no-o", "--no-g") 57 | } 58 | 59 | if c.Delete { 60 | rsyncArgs = append(rsyncArgs, "--delete") 61 | } 62 | 63 | rsyncArgsStr := strings.Join(rsyncArgs, " ") 64 | 65 | src := c.buildSrc() 66 | dest := c.buildDest() 67 | 68 | return fmt.Sprintf("%s %s %s %s", cmd, rsyncArgsStr, src, dest), nil 69 | } 70 | 71 | func (c *Cmd) buildSrc() string { 72 | var src strings.Builder 73 | 74 | if c.SrcUseSSH { 75 | sshDestUser := "root" 76 | if c.SrcSSHUser != "" { 77 | sshDestUser = c.SrcSSHUser 78 | } 79 | 80 | src.WriteString(fmt.Sprintf("%s@%s:", sshDestUser, c.SrcSSHHost)) 81 | } 82 | 83 | src.WriteString(c.SrcPath) 84 | 85 | return src.String() 86 | } 87 | 88 | func (c *Cmd) buildDest() string { 89 | var dest strings.Builder 90 | 91 | if c.DestUseSSH { 92 | sshDestUser := "root" 93 | if c.DestSSHUser != "" { 94 | sshDestUser = c.DestSSHUser 95 | } 96 | 97 | dest.WriteString(fmt.Sprintf("%s@%s:", sshDestUser, c.DestSSHHost)) 98 | } 99 | 100 | dest.WriteString(c.DestPath) 101 | 102 | return dest.String() 103 | } 104 | -------------------------------------------------------------------------------- /rsync/progress/logger.go: -------------------------------------------------------------------------------- 1 | package progress 2 | 3 | import ( 4 | "bufio" 5 | "context" 6 | "errors" 7 | "fmt" 8 | "io" 9 | "log/slog" 10 | "os" 11 | 12 | "github.com/schollz/progressbar/v3" 13 | "golang.org/x/sync/errgroup" 14 | ) 15 | 16 | type LogStreamFunc func(ctx context.Context) (io.ReadCloser, error) 17 | 18 | type Logger struct { 19 | options LoggerOptions 20 | successCh chan struct{} 21 | } 22 | 23 | type LoggerOptions struct { 24 | ShowProgressBar bool 25 | LogStreamFunc LogStreamFunc 26 | } 27 | 28 | func NewLogger(options LoggerOptions) *Logger { 29 | return &Logger{ 30 | options: options, 31 | successCh: make(chan struct{}, 1), 32 | } 33 | } 34 | 35 | func (l *Logger) Start(ctx context.Context, logger *slog.Logger) error { 36 | for { 37 | err := l.startSingle(ctx, logger) 38 | if err == nil || errors.Is(err, context.Canceled) { 39 | return nil 40 | } 41 | 42 | logger.Debug("log tail failed, retrying", "error", err) 43 | } 44 | } 45 | 46 | func (l *Logger) MarkAsComplete(ctx context.Context) error { 47 | select { 48 | case <-ctx.Done(): 49 | return ctx.Err() //nolint:wrapcheck 50 | case l.successCh <- struct{}{}: 51 | } 52 | 53 | return nil 54 | } 55 | 56 | func (l *Logger) startSingle(ctx context.Context, logger *slog.Logger) error { 57 | logCh := make(chan string) 58 | 59 | var eg errgroup.Group //nolint:varnamelen 60 | 61 | ctx, cancel := context.WithCancel(ctx) 62 | defer cancel() 63 | 64 | logStream, err := l.options.LogStreamFunc(ctx) 65 | if err != nil { 66 | return fmt.Errorf("failed to get log stream: %w", err) 67 | } 68 | 69 | defer func() { 70 | if closeErr := logStream.Close(); closeErr != nil { 71 | logger.Error("failed to close log stream", "error", closeErr) 72 | } 73 | }() 74 | 75 | eg.Go(func() error { 76 | defer cancel() 77 | 78 | return tailLogs(ctx, logStream, logCh) 79 | }) 80 | 81 | eg.Go(func() error { 82 | defer cancel() 83 | 84 | return handleLogs(ctx, logCh, l.successCh, l.options.ShowProgressBar, logger) 85 | }) 86 | 87 | if err = eg.Wait(); err != nil { 88 | return fmt.Errorf("failed to wait for log tailing: %w", err) 89 | } 90 | 91 | return nil 92 | } 93 | 94 | func tailLogs(ctx context.Context, stream io.Reader, logCh chan<- string) error { 95 | scanner := bufio.NewScanner(stream) 96 | 97 | for { 98 | select { 99 | case <-ctx.Done(): 100 | return ctx.Err() //nolint:wrapcheck 101 | default: 102 | if scanner.Scan() { 103 | select { 104 | case <-ctx.Done(): 105 | return ctx.Err() //nolint:wrapcheck 106 | case logCh <- scanner.Text(): 107 | } 108 | } 109 | } 110 | } 111 | } 112 | 113 | //nolint:cyclop,funlen 114 | func handleLogs(ctx context.Context, logCh <-chan string, successCh <-chan struct{}, 115 | showProgressBar bool, logger *slog.Logger, 116 | ) error { 117 | var progressBar *progressbar.ProgressBar 118 | 119 | if showProgressBar { 120 | progressBar = progressbar.NewOptions64( 121 | 1, 122 | progressbar.OptionSetWriter(os.Stderr), 123 | progressbar.OptionEnableColorCodes(true), 124 | progressbar.OptionShowBytes(true), 125 | progressbar.OptionSetRenderBlankState(true), 126 | progressbar.OptionFullWidth(), 127 | progressbar.OptionOnCompletion(func() { 128 | fmt.Fprintln(os.Stderr) 129 | }), 130 | progressbar.OptionSetDescription("📂 Copying data..."), 131 | ) 132 | } 133 | 134 | for { 135 | select { 136 | case <-ctx.Done(): 137 | return ctx.Err() //nolint:wrapcheck 138 | case <-successCh: 139 | if showProgressBar { 140 | if err := progressBar.Finish(); err != nil { 141 | logger.Debug("failed to finish progress bar", "error", err) 142 | } 143 | } 144 | 145 | return nil 146 | case logLine := <-logCh: 147 | progress, err := ParseLine(logLine) 148 | if err != nil { 149 | logger.Log(ctx, slog.LevelDebug-1, "failed to parse progress line", "error", err) 150 | 151 | continue 152 | } 153 | 154 | if !showProgressBar { 155 | logger.Debug( 156 | logLine, 157 | slog.String("source", "rsync"), 158 | slog.Group( 159 | "progress", 160 | "transferred", 161 | progress.Transferred, 162 | "total", 163 | progress.Total, 164 | "percentage", 165 | progress.Percentage, 166 | ), 167 | ) 168 | } else { 169 | if err = updateProgressBar(progressBar, progress.Transferred, progress.Total); err != nil { 170 | logger.Warn("failed to update progress bar", "error", err, "progress", progress) 171 | } 172 | } 173 | 174 | if progress.Percentage >= 100 { //nolint:mnd 175 | return nil 176 | } 177 | } 178 | } 179 | } 180 | 181 | func updateProgressBar(progressBar *progressbar.ProgressBar, transferred, total int64) error { 182 | progressBar.ChangeMax64(total) 183 | 184 | if total == 0 { // cannot update progress bar when its max is 0 185 | return nil 186 | } 187 | 188 | if err := progressBar.Set64(transferred); err != nil { 189 | return fmt.Errorf("failed to set progress bar value: %w", err) 190 | } 191 | 192 | return nil 193 | } 194 | -------------------------------------------------------------------------------- /rsync/progress/progress.go: -------------------------------------------------------------------------------- 1 | package progress 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "regexp" 7 | "strconv" 8 | "strings" 9 | ) 10 | 11 | var ( 12 | progressRegex = regexp.MustCompile( 13 | `\s*(?P[0-9]+(,[0-9]+)*)\s+(?P[0-9]{1,3})%`, 14 | ) 15 | rsyncEndRegex = regexp.MustCompile(`\s*total size is (?P[0-9]+(,[0-9]+)*)`) 16 | ) 17 | 18 | const ( 19 | percentHundred = 100 20 | 21 | bytesTransferredIntBase = 10 22 | bytesTransferredInt64Bits = 64 23 | ) 24 | 25 | // CanDisplayProgressBarContextKey is a context key for whether a progress bar can be displayed. 26 | type CanDisplayProgressBarContextKey struct{} 27 | 28 | type Progress struct { 29 | Line string 30 | Percentage int 31 | Transferred int64 32 | Total int64 33 | } 34 | 35 | func ParseLine(line string) (Progress, error) { 36 | endMatches := findNamedMatches(rsyncEndRegex, line) 37 | if len(endMatches) > 0 { 38 | total, err := parseNumBytes(endMatches["bytes"]) 39 | if err != nil { 40 | return Progress{}, err 41 | } 42 | 43 | return Progress{ 44 | Line: line, 45 | Percentage: percentHundred, 46 | Transferred: total, 47 | Total: total, 48 | }, nil 49 | } 50 | 51 | prMatches := findNamedMatches(progressRegex, line) 52 | if len(prMatches) == 0 { 53 | return Progress{}, errors.New("no match") 54 | } 55 | 56 | percentage, err := strconv.Atoi(prMatches["percentage"]) 57 | if err != nil { 58 | return Progress{}, fmt.Errorf("cannot parse percentage: %w", err) 59 | } 60 | 61 | if percentage == 0 { 62 | return Progress{ 63 | Line: line, 64 | Percentage: 0, 65 | Transferred: 0, 66 | Total: 0, 67 | }, nil 68 | } 69 | 70 | transferred, err := parseNumBytes(prMatches["bytes"]) 71 | if err != nil { 72 | return Progress{}, err 73 | } 74 | 75 | total := int64((float64(transferred) / float64(percentage)) * percentHundred) 76 | 77 | if transferred > total { 78 | // in case of a rounding error, update total, since transferred is more accurate 79 | total = transferred 80 | } 81 | 82 | return Progress{ 83 | Line: line, 84 | Percentage: percentage, 85 | Transferred: transferred, 86 | Total: total, 87 | }, nil 88 | } 89 | 90 | func parseNumBytes(numBytes string) (int64, error) { 91 | parsed, err := strconv.ParseInt(strings.ReplaceAll(numBytes, ",", ""), 92 | bytesTransferredIntBase, bytesTransferredInt64Bits) 93 | if err != nil { 94 | return 0, fmt.Errorf("cannot parse number of bytes: %w", err) 95 | } 96 | 97 | return parsed, nil 98 | } 99 | 100 | func findNamedMatches(r *regexp.Regexp, str string) map[string]string { 101 | results := map[string]string{} 102 | 103 | match := r.FindStringSubmatch(str) 104 | for i, name := range match { 105 | results[r.SubexpNames()[i]] = name 106 | } 107 | 108 | return results 109 | } 110 | -------------------------------------------------------------------------------- /rsync/progress/progress_test.go: -------------------------------------------------------------------------------- 1 | package progress_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | "github.com/stretchr/testify/require" 8 | 9 | "github.com/utkuozdemir/pv-migrate/rsync/progress" 10 | ) 11 | 12 | func TestParseLogLineEndMatch(t *testing.T) { 13 | t.Parallel() 14 | 15 | l := "total size is 1,879,048,192 speedup is 31,548.30" 16 | p, err := progress.ParseLine(l) 17 | require.NoError(t, err) 18 | assert.Equal(t, 100, p.Percentage) 19 | assert.Equal(t, int64(1879048192), p.Transferred) 20 | assert.Equal(t, int64(1879048192), p.Total) 21 | } 22 | -------------------------------------------------------------------------------- /scripts/completions.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | rm -rf completions 4 | mkdir completions 5 | for sh in bash zsh fish; do 6 | go run cmd/pv-migrate/main.go completion "$sh" > "completions/pv-migrate.$sh" 7 | done 8 | -------------------------------------------------------------------------------- /ssh/ssh.go: -------------------------------------------------------------------------------- 1 | package ssh 2 | 3 | import ( 4 | "crypto/rand" 5 | "crypto/rsa" 6 | "crypto/x509" 7 | "encoding/pem" 8 | "errors" 9 | "fmt" 10 | "math" 11 | "math/big" 12 | "strings" 13 | 14 | "golang.org/x/crypto/ed25519" 15 | "golang.org/x/crypto/ssh" 16 | ) 17 | 18 | type KeyAlgorithm string 19 | 20 | const ( 21 | RSAKeyAlgorithm = "rsa" 22 | Ed25519KeyAlgorithm = "ed25519" 23 | 24 | RSAKeyLengthBits = 2048 25 | ) 26 | 27 | var KeyAlgorithms = []string{RSAKeyAlgorithm, Ed25519KeyAlgorithm} 28 | 29 | func CreateSSHKeyPair(keyAlgorithm string) (string, string, error) { 30 | switch keyAlgorithm { 31 | case RSAKeyAlgorithm: 32 | return createSSHRSAKeyPair() 33 | case Ed25519KeyAlgorithm: 34 | return createSSHEd25519KeyPair() 35 | default: 36 | return "", "", fmt.Errorf("unsupported key algorithm: %s", keyAlgorithm) 37 | } 38 | } 39 | 40 | func createSSHRSAKeyPair() (string, string, error) { 41 | privateKey, err := rsa.GenerateKey(rand.Reader, RSAKeyLengthBits) 42 | if err != nil { 43 | return "", "", fmt.Errorf("failed to generate rsa key pair: %w", err) 44 | } 45 | 46 | // generate and write private key as PEM 47 | var privKeyBuf strings.Builder 48 | 49 | privateKeyPEM := &pem.Block{ 50 | Type: "RSA PRIVATE KEY", 51 | Bytes: x509.MarshalPKCS1PrivateKey(privateKey), 52 | } 53 | if err := pem.Encode(&privKeyBuf, privateKeyPEM); err != nil { 54 | return "", "", fmt.Errorf("failed to encode private key: %w", err) 55 | } 56 | 57 | // generate and write public key 58 | pub, err := ssh.NewPublicKey(&privateKey.PublicKey) 59 | if err != nil { 60 | return "", "", fmt.Errorf("failed to generate public key: %w", err) 61 | } 62 | 63 | var pubKeyBuf strings.Builder 64 | 65 | pubKeyBuf.Write(ssh.MarshalAuthorizedKey(pub)) 66 | 67 | return pubKeyBuf.String(), privKeyBuf.String(), nil 68 | } 69 | 70 | func createSSHEd25519KeyPair() (string, string, error) { 71 | pubKey, privateKey, err := ed25519.GenerateKey(rand.Reader) 72 | if err != nil { 73 | return "", "", fmt.Errorf("failed to generate ed25519 key pair: %w", err) 74 | } 75 | 76 | // generate and write private key as PEM 77 | var privKeyBuf strings.Builder 78 | 79 | ed25519PrivateKey, err := marshalED25519PrivateKey(privateKey) 80 | if err != nil { 81 | return "", "", err 82 | } 83 | 84 | privateKeyPEM := &pem.Block{Type: "OPENSSH PRIVATE KEY", Bytes: ed25519PrivateKey} 85 | if err := pem.Encode(&privKeyBuf, privateKeyPEM); err != nil { 86 | return "", "", fmt.Errorf("failed to encode private key: %w", err) 87 | } 88 | 89 | pub, _ := ssh.NewPublicKey(pubKey) 90 | 91 | var pubKeyBuf strings.Builder 92 | 93 | pubKeyBuf.Write(ssh.MarshalAuthorizedKey(pub)) 94 | 95 | return pubKeyBuf.String(), privKeyBuf.String(), nil 96 | } 97 | 98 | // marshalED25519PrivateKey is taken from https://github.com/mikesmitty/edkey 99 | func marshalED25519PrivateKey(key ed25519.PrivateKey) ([]byte, error) { 100 | magic := append([]byte("openssh-key-v1"), 0) 101 | 102 | message := ed25519message{} 103 | pk1 := ed25519pk1{} 104 | 105 | rnd, err := rand.Int(rand.Reader, big.NewInt(math.MaxUint32)) 106 | if err != nil { 107 | return nil, fmt.Errorf("failed to generate random number: %w", err) 108 | } 109 | 110 | //nolint:gosec // it won't overflow, as the max value is set to math.MaxUint32 111 | { 112 | pk1.Check1 = uint32(rnd.Uint64()) 113 | pk1.Check2 = uint32(rnd.Uint64()) 114 | } 115 | 116 | pk1.Keytype = ssh.KeyAlgoED25519 117 | 118 | publicKey, ok := key.Public().(ed25519.PublicKey) 119 | if !ok { 120 | return nil, errors.New("failed to convert public key") 121 | } 122 | 123 | pubKey := []byte(publicKey) 124 | 125 | pk1.Pub = pubKey 126 | pk1.Priv = key 127 | pk1.Comment = "" 128 | 129 | bs := 8 130 | blockLen := len(ssh.Marshal(pk1)) 131 | padLen := (bs - (blockLen % bs)) % bs 132 | pk1.Pad = make([]byte, padLen) 133 | 134 | for i := range padLen { 135 | pk1.Pad[i] = byte(i + 1) 136 | } 137 | 138 | pubkeyFull := []byte{0x0, 0x0, 0x0, 0x0b} 139 | pubkeyFull = append(pubkeyFull, []byte(ssh.KeyAlgoED25519)...) 140 | pubkeyFull = append(pubkeyFull, []byte{0x0, 0x0, 0x0, 0x20}...) 141 | pubkeyFull = append(pubkeyFull, pubKey...) 142 | 143 | message.CipherName = "none" 144 | message.KdfName = "none" 145 | message.KdfOpts = "" 146 | message.NumKeys = 1 147 | message.PubKey = pubkeyFull 148 | message.PrivKeyBlock = ssh.Marshal(pk1) 149 | 150 | magic = append(magic, ssh.Marshal(message)...) 151 | 152 | return magic, nil 153 | } 154 | -------------------------------------------------------------------------------- /ssh/types.go: -------------------------------------------------------------------------------- 1 | package ssh 2 | 3 | type ed25519message struct { 4 | CipherName string 5 | KdfName string 6 | KdfOpts string 7 | NumKeys uint32 8 | PubKey []byte 9 | PrivKeyBlock []byte 10 | } 11 | 12 | type ed25519pk1 struct { 13 | Check1 uint32 14 | Check2 uint32 15 | Keytype string 16 | Pub []byte 17 | Priv []byte 18 | Comment string 19 | Pad []byte `ssh:"rest"` 20 | } 21 | -------------------------------------------------------------------------------- /strategy/lbsvc.go: -------------------------------------------------------------------------------- 1 | package strategy 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log/slog" 7 | 8 | "github.com/utkuozdemir/pv-migrate/k8s" 9 | "github.com/utkuozdemir/pv-migrate/migration" 10 | "github.com/utkuozdemir/pv-migrate/rsync" 11 | "github.com/utkuozdemir/pv-migrate/ssh" 12 | "github.com/utkuozdemir/pv-migrate/util" 13 | ) 14 | 15 | type LbSvc struct{} 16 | 17 | //nolint:funlen 18 | func (r *LbSvc) Run(ctx context.Context, attempt *migration.Attempt, logger *slog.Logger) error { 19 | mig := attempt.Migration 20 | 21 | sourceInfo := mig.SourceInfo 22 | destInfo := mig.DestInfo 23 | sourceNs := sourceInfo.Claim.Namespace 24 | destNs := destInfo.Claim.Namespace 25 | keyAlgorithm := mig.Request.KeyAlgorithm 26 | 27 | logger.Info("🔑 Generating SSH key pair", "algorithm", keyAlgorithm) 28 | 29 | publicKey, privateKey, err := ssh.CreateSSHKeyPair(keyAlgorithm) 30 | if err != nil { 31 | return fmt.Errorf("failed to create ssh key pair: %w", err) 32 | } 33 | 34 | privateKeyMountPath := "/tmp/id_" + keyAlgorithm 35 | 36 | srcReleaseName := attempt.HelmReleaseNamePrefix + "-src" 37 | destReleaseName := attempt.HelmReleaseNamePrefix + "-dest" 38 | releaseNames := []string{srcReleaseName, destReleaseName} 39 | 40 | doneCh := registerCleanupHook(attempt, releaseNames, logger) 41 | defer cleanupAndReleaseHook(ctx, attempt, releaseNames, doneCh, logger) 42 | 43 | err = installOnSource(attempt, srcReleaseName, publicKey, srcMountPath, logger) 44 | if err != nil { 45 | return fmt.Errorf("failed to install on source: %w", err) 46 | } 47 | 48 | sourceKubeClient := attempt.Migration.SourceInfo.ClusterClient.KubeClient 49 | svcName := srcReleaseName + "-sshd" 50 | 51 | lbSvcAddress, err := k8s.GetServiceAddress( 52 | ctx, 53 | sourceKubeClient, 54 | sourceNs, 55 | svcName, 56 | mig.Request.LBSvcTimeout, 57 | ) 58 | if err != nil { 59 | return fmt.Errorf("failed to get service address: %w", err) 60 | } 61 | 62 | sshTargetHost := formatSSHTargetHost(lbSvcAddress) 63 | if mig.Request.DestHostOverride != "" { 64 | sshTargetHost = mig.Request.DestHostOverride 65 | } 66 | 67 | err = installOnDest(attempt, destReleaseName, privateKey, privateKeyMountPath, 68 | sshTargetHost, srcMountPath, destMountPath, logger) 69 | if err != nil { 70 | return fmt.Errorf("failed to install on dest: %w", err) 71 | } 72 | 73 | showProgressBar := !attempt.Migration.Request.NoProgressBar 74 | kubeClient := destInfo.ClusterClient.KubeClient 75 | jobName := destReleaseName + "-rsync" 76 | 77 | if err = k8s.WaitForJobCompletion(ctx, kubeClient, destNs, jobName, showProgressBar, logger); err != nil { 78 | return fmt.Errorf("failed to wait for job completion: %w", err) 79 | } 80 | 81 | return nil 82 | } 83 | 84 | func installOnSource(attempt *migration.Attempt, releaseName, 85 | publicKey, srcMountPath string, logger *slog.Logger, 86 | ) error { 87 | mig := attempt.Migration 88 | sourceInfo := mig.SourceInfo 89 | namespace := sourceInfo.Claim.Namespace 90 | 91 | vals := map[string]any{ 92 | "sshd": map[string]any{ 93 | "enabled": true, 94 | "namespace": namespace, 95 | "publicKey": publicKey, 96 | "service": map[string]any{ 97 | "type": "LoadBalancer", 98 | }, 99 | "pvcMounts": []map[string]any{ 100 | { 101 | "name": sourceInfo.Claim.Name, 102 | "readOnly": mig.Request.SourceMountReadOnly, 103 | "mountPath": srcMountPath, 104 | }, 105 | }, 106 | "affinity": sourceInfo.AffinityHelmValues, 107 | }, 108 | } 109 | 110 | return installHelmChart(attempt, sourceInfo, releaseName, vals, logger) 111 | } 112 | 113 | func installOnDest(attempt *migration.Attempt, releaseName, privateKey, 114 | privateKeyMountPath, sshHost, srcMountPath, destMountPath string, logger *slog.Logger, 115 | ) error { 116 | mig := attempt.Migration 117 | destInfo := mig.DestInfo 118 | namespace := destInfo.Claim.Namespace 119 | 120 | srcPath := srcMountPath + "/" + mig.Request.Source.Path 121 | destPath := destMountPath + "/" + mig.Request.Dest.Path 122 | rsyncCmd := rsync.Cmd{ 123 | NoChown: mig.Request.NoChown, 124 | Delete: mig.Request.DeleteExtraneousFiles, 125 | SrcPath: srcPath, 126 | DestPath: destPath, 127 | SrcUseSSH: true, 128 | SrcSSHHost: sshHost, 129 | Compress: mig.Request.Compress, 130 | } 131 | 132 | rsyncCmdStr, err := rsyncCmd.Build() 133 | if err != nil { 134 | return fmt.Errorf("failed to build rsync command: %w", err) 135 | } 136 | 137 | vals := map[string]any{ 138 | "rsync": map[string]any{ 139 | "enabled": true, 140 | "namespace": namespace, 141 | "privateKeyMount": true, 142 | "privateKey": privateKey, 143 | "privateKeyMountPath": privateKeyMountPath, 144 | "sshRemoteHost": sshHost, 145 | "pvcMounts": []map[string]any{ 146 | { 147 | "name": destInfo.Claim.Name, 148 | "mountPath": destMountPath, 149 | }, 150 | }, 151 | "command": rsyncCmdStr, 152 | "affinity": destInfo.AffinityHelmValues, 153 | }, 154 | } 155 | 156 | return installHelmChart(attempt, destInfo, releaseName, vals, logger) 157 | } 158 | 159 | func formatSSHTargetHost(host string) string { 160 | if util.IsIPv6(host) { 161 | return fmt.Sprintf("[%s]", host) 162 | } 163 | 164 | return host 165 | } 166 | -------------------------------------------------------------------------------- /strategy/lbsvc_test.go: -------------------------------------------------------------------------------- 1 | package strategy 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestFormatSSHTargetHost(t *testing.T) { 10 | t.Parallel() 11 | 12 | assert.Equal(t, "1.2.3.4", formatSSHTargetHost("1.2.3.4")) 13 | assert.Equal(t, "example.com", formatSSHTargetHost("example.com")) 14 | assert.Equal(t, "[2001:0db8:85a3:0000:0000:8a2e:0370:7334]", 15 | formatSSHTargetHost("2001:0db8:85a3:0000:0000:8a2e:0370:7334")) 16 | assert.Equal(t, "[::1]", formatSSHTargetHost("::1")) 17 | } 18 | -------------------------------------------------------------------------------- /strategy/mnt2.go: -------------------------------------------------------------------------------- 1 | package strategy 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log/slog" 7 | 8 | "github.com/utkuozdemir/pv-migrate/k8s" 9 | "github.com/utkuozdemir/pv-migrate/migration" 10 | "github.com/utkuozdemir/pv-migrate/rsync" 11 | ) 12 | 13 | type Mnt2 struct{} 14 | 15 | func (r *Mnt2) canDo(t *migration.Migration) bool { 16 | sourceInfo := t.SourceInfo 17 | destInfo := t.DestInfo 18 | 19 | sameCluster := sourceInfo.ClusterClient.RestConfig.Host == destInfo.ClusterClient.RestConfig.Host 20 | if !sameCluster { 21 | return false 22 | } 23 | 24 | sameNamespace := sourceInfo.Claim.Namespace == destInfo.Claim.Namespace 25 | if !sameNamespace { 26 | return false 27 | } 28 | 29 | sameNode := sourceInfo.MountedNode == destInfo.MountedNode 30 | oneUnmounted := sourceInfo.MountedNode == "" || destInfo.MountedNode == "" 31 | 32 | return sameNode || oneUnmounted || sourceInfo.SupportsROX || sourceInfo.SupportsRWX || 33 | destInfo.SupportsRWX 34 | } 35 | 36 | func (r *Mnt2) Run(ctx context.Context, attempt *migration.Attempt, logger *slog.Logger) error { 37 | mig := attempt.Migration 38 | if !r.canDo(mig) { 39 | return ErrUnaccepted 40 | } 41 | 42 | sourceInfo := attempt.Migration.SourceInfo 43 | destInfo := attempt.Migration.DestInfo 44 | namespace := sourceInfo.Claim.Namespace 45 | 46 | node := determineTargetNode(mig) 47 | 48 | rsyncCmd, err := buildRsyncCmdMnt2(mig) 49 | if err != nil { 50 | return fmt.Errorf("failed to build rsync command: %w", err) 51 | } 52 | 53 | vals := map[string]any{ 54 | "rsync": map[string]any{ 55 | "enabled": true, 56 | "namespace": namespace, 57 | "nodeName": node, 58 | "pvcMounts": []map[string]any{ 59 | { 60 | "name": sourceInfo.Claim.Name, 61 | "mountPath": srcMountPath, 62 | "readOnly": mig.Request.SourceMountReadOnly, 63 | }, 64 | { 65 | "name": destInfo.Claim.Name, 66 | "mountPath": destMountPath, 67 | }, 68 | }, 69 | "command": rsyncCmd, 70 | "affinity": sourceInfo.AffinityHelmValues, 71 | }, 72 | } 73 | 74 | releaseName := attempt.HelmReleaseNamePrefix 75 | releaseNames := []string{releaseName} 76 | 77 | doneCh := registerCleanupHook(attempt, releaseNames, logger) 78 | defer cleanupAndReleaseHook(ctx, attempt, releaseNames, doneCh, logger) 79 | 80 | err = installHelmChart(attempt, sourceInfo, releaseName, vals, logger) 81 | if err != nil { 82 | return fmt.Errorf("failed to install helm chart: %w", err) 83 | } 84 | 85 | showProgressBar := !mig.Request.NoProgressBar 86 | kubeClient := mig.SourceInfo.ClusterClient.KubeClient 87 | jobName := attempt.HelmReleaseNamePrefix + "-rsync" 88 | 89 | if err = k8s.WaitForJobCompletion(ctx, kubeClient, namespace, jobName, showProgressBar, logger); err != nil { 90 | return fmt.Errorf("failed to wait for job completion: %w", err) 91 | } 92 | 93 | return nil 94 | } 95 | 96 | func buildRsyncCmdMnt2(mig *migration.Migration) (string, error) { 97 | srcPath := srcMountPath + "/" + mig.Request.Source.Path 98 | destPath := destMountPath + "/" + mig.Request.Dest.Path 99 | 100 | rsyncCmd := rsync.Cmd{ 101 | NoChown: mig.Request.NoChown, 102 | Delete: mig.Request.DeleteExtraneousFiles, 103 | SrcPath: srcPath, 104 | DestPath: destPath, 105 | Compress: mig.Request.Compress, 106 | } 107 | 108 | cmd, err := rsyncCmd.Build() 109 | if err != nil { 110 | return "", fmt.Errorf("failed to build rsync command: %w", err) 111 | } 112 | 113 | return cmd, nil 114 | } 115 | 116 | func determineTargetNode(t *migration.Migration) string { 117 | sourceInfo := t.SourceInfo 118 | destInfo := t.DestInfo 119 | 120 | if sourceInfo.MountedNode != "" && !sourceInfo.SupportsROX && !sourceInfo.SupportsRWX { 121 | return sourceInfo.MountedNode 122 | } 123 | 124 | if destInfo.MountedNode != "" && !destInfo.SupportsROX && !destInfo.SupportsRWX { 125 | return destInfo.MountedNode 126 | } 127 | 128 | if sourceInfo.MountedNode != "" { 129 | return sourceInfo.MountedNode 130 | } 131 | 132 | return destInfo.MountedNode 133 | } 134 | -------------------------------------------------------------------------------- /strategy/strategy.go: -------------------------------------------------------------------------------- 1 | package strategy 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "log/slog" 8 | "os" 9 | "os/signal" 10 | "syscall" 11 | "time" 12 | 13 | "github.com/hashicorp/go-multierror" 14 | "gopkg.in/yaml.v3" 15 | "helm.sh/helm/v3/pkg/action" 16 | "helm.sh/helm/v3/pkg/cli" 17 | "helm.sh/helm/v3/pkg/cli/values" 18 | "helm.sh/helm/v3/pkg/getter" 19 | "helm.sh/helm/v3/pkg/storage/driver" 20 | apierrors "k8s.io/apimachinery/pkg/api/errors" 21 | 22 | "github.com/utkuozdemir/pv-migrate/migration" 23 | "github.com/utkuozdemir/pv-migrate/pvc" 24 | ) 25 | 26 | const ( 27 | Mnt2Strategy = "mnt2" 28 | SvcStrategy = "svc" 29 | LbSvcStrategy = "lbsvc" 30 | LocalStrategy = "local" 31 | 32 | helmValuesYAMLIndent = 2 33 | 34 | srcMountPath = "/source" 35 | destMountPath = "/dest" 36 | ) 37 | 38 | var ( 39 | DefaultStrategies = []string{Mnt2Strategy, SvcStrategy, LbSvcStrategy} 40 | AllStrategies = []string{Mnt2Strategy, SvcStrategy, LbSvcStrategy, LocalStrategy} 41 | 42 | nameToStrategy = map[string]Strategy{ 43 | Mnt2Strategy: &Mnt2{}, 44 | SvcStrategy: &Svc{}, 45 | LbSvcStrategy: &LbSvc{}, 46 | LocalStrategy: &Local{}, 47 | } 48 | 49 | helmProviders = getter.All(cli.New()) 50 | 51 | ErrUnaccepted = errors.New("unaccepted") 52 | ) 53 | 54 | type Strategy interface { 55 | // Run runs the migration for the given task execution. 56 | // 57 | // This is the actual implementation of the migration. 58 | Run(ctx context.Context, a *migration.Attempt, logger *slog.Logger) error 59 | } 60 | 61 | func GetStrategiesMapForNames(names []string) (map[string]Strategy, error) { 62 | sts := make(map[string]Strategy) 63 | 64 | for _, name := range names { 65 | s, ok := nameToStrategy[name] 66 | if !ok { 67 | return nil, fmt.Errorf("strategy not found: %s", name) 68 | } 69 | 70 | sts[name] = s 71 | } 72 | 73 | return sts, nil 74 | } 75 | 76 | func registerCleanupHook( 77 | attempt *migration.Attempt, 78 | releaseNames []string, 79 | logger *slog.Logger, 80 | ) chan<- bool { 81 | doneCh := make(chan bool) 82 | signalCh := make(chan os.Signal, 1) 83 | 84 | signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM) 85 | 86 | go func() { 87 | select { 88 | case <-signalCh: 89 | logger.Warn("🔶 Received termination signal") 90 | 91 | cleanup(attempt, releaseNames, logger) 92 | 93 | os.Exit(1) 94 | case <-doneCh: 95 | return 96 | } 97 | }() 98 | 99 | return doneCh 100 | } 101 | 102 | func cleanupAndReleaseHook(ctx context.Context, a *migration.Attempt, 103 | releaseNames []string, doneCh chan<- bool, logger *slog.Logger, 104 | ) { 105 | cleanup(a, releaseNames, logger) 106 | 107 | select { 108 | case <-ctx.Done(): 109 | logger.Warn("🔶 Context cancelled") 110 | case doneCh <- true: 111 | } 112 | } 113 | 114 | func cleanup(attempt *migration.Attempt, releaseNames []string, logger *slog.Logger) { 115 | if attempt.Migration.Request.SkipCleanup { 116 | logger.Info("🧹 Cleanup skipped") 117 | 118 | return 119 | } 120 | 121 | mig := attempt.Migration 122 | req := mig.Request 123 | 124 | logger.Info("🧹 Cleaning up") 125 | 126 | var errs error 127 | 128 | for _, info := range []*pvc.Info{mig.SourceInfo, mig.DestInfo} { 129 | for _, name := range releaseNames { 130 | err := cleanupForPVC(name, req.HelmTimeout, info, logger) 131 | if err != nil { 132 | errs = multierror.Append(errs, err) 133 | } 134 | } 135 | } 136 | 137 | if errs != nil { 138 | logger.Warn("🔶 Cleanup failed, you might want to clean up manually", "error", errs) 139 | 140 | return 141 | } 142 | 143 | logger.Info("✨ Cleanup done") 144 | } 145 | 146 | func cleanupForPVC(helmReleaseName string, helmUninstallTimeout time.Duration, 147 | pvcInfo *pvc.Info, logger *slog.Logger, 148 | ) error { 149 | ac, err := initHelmActionConfig(pvcInfo, logger) 150 | if err != nil { 151 | return err 152 | } 153 | 154 | uninstall := action.NewUninstall(ac) 155 | uninstall.Wait = true 156 | uninstall.Timeout = helmUninstallTimeout 157 | _, err = uninstall.Run(helmReleaseName) 158 | 159 | if err != nil && !errors.Is(err, driver.ErrReleaseNotFound) && !apierrors.IsNotFound(err) { 160 | return fmt.Errorf("failed to uninstall helm release %s: %w", helmReleaseName, err) 161 | } 162 | 163 | return nil 164 | } 165 | 166 | func initHelmActionConfig(pvcInfo *pvc.Info, logger *slog.Logger) (*action.Configuration, error) { 167 | actionConfig := new(action.Configuration) 168 | 169 | err := actionConfig.Init(pvcInfo.ClusterClient.RESTClientGetter, 170 | pvcInfo.Claim.Namespace, os.Getenv("HELM_DRIVER"), func(format string, v ...any) { 171 | logger.Debug(fmt.Sprintf(format, v...)) 172 | }) 173 | if err != nil { 174 | return nil, fmt.Errorf("failed to initialize helm action config: %w", err) 175 | } 176 | 177 | return actionConfig, nil 178 | } 179 | 180 | func getMergedHelmValues( 181 | helmValuesFile string, 182 | request *migration.Request, 183 | ) (map[string]any, error) { 184 | allValuesFiles := append([]string{helmValuesFile}, request.HelmValuesFiles...) 185 | valsOptions := values.Options{ 186 | Values: request.HelmValues, 187 | ValueFiles: allValuesFiles, 188 | StringValues: request.HelmStringValues, 189 | FileValues: request.HelmFileValues, 190 | } 191 | 192 | mergedValues, err := valsOptions.MergeValues(helmProviders) 193 | if err != nil { 194 | return nil, fmt.Errorf("failed to merge helm values: %w", err) 195 | } 196 | 197 | return mergedValues, nil 198 | } 199 | 200 | func installHelmChart(attempt *migration.Attempt, pvcInfo *pvc.Info, name string, 201 | values map[string]any, logger *slog.Logger, 202 | ) error { 203 | helmValuesFile, err := writeHelmValuesToTempFile(attempt.ID, values) 204 | if err != nil { 205 | return fmt.Errorf("failed to write helm values to temp file: %w", err) 206 | } 207 | 208 | defer func() { 209 | os.Remove(helmValuesFile) 210 | }() 211 | 212 | helmActionConfig, err := initHelmActionConfig(pvcInfo, logger) 213 | if err != nil { 214 | return fmt.Errorf("failed to init helm action config: %w", err) 215 | } 216 | 217 | mig := attempt.Migration 218 | 219 | install := action.NewInstall(helmActionConfig) 220 | install.Namespace = pvcInfo.Claim.Namespace 221 | install.ReleaseName = name 222 | install.Wait = true 223 | 224 | if req := mig.Request; req.HelmTimeout < req.LBSvcTimeout { 225 | install.Timeout = req.LBSvcTimeout 226 | } else { 227 | install.Timeout = req.HelmTimeout 228 | } 229 | 230 | vals, err := getMergedHelmValues(helmValuesFile, mig.Request) 231 | if err != nil { 232 | return fmt.Errorf("failed to get merged helm values: %w", err) 233 | } 234 | 235 | if _, err = install.Run(mig.Chart, vals); err != nil { 236 | return fmt.Errorf("failed to install helm chart: %w", err) 237 | } 238 | 239 | return nil 240 | } 241 | 242 | func writeHelmValuesToTempFile(id string, vals map[string]any) (string, error) { 243 | file, err := os.CreateTemp("", fmt.Sprintf("pv-migrate-vals-%s-*.yaml", id)) 244 | if err != nil { 245 | return "", fmt.Errorf("failed to create temp file for helm values: %w", err) 246 | } 247 | 248 | defer func() { _ = file.Close() }() 249 | 250 | encoder := yaml.NewEncoder(file) 251 | encoder.SetIndent(helmValuesYAMLIndent) 252 | 253 | err = encoder.Encode(vals) 254 | if err != nil { 255 | return "", fmt.Errorf("failed to encode helm values: %w", err) 256 | } 257 | 258 | return file.Name(), nil 259 | } 260 | -------------------------------------------------------------------------------- /strategy/strategy_test.go: -------------------------------------------------------------------------------- 1 | package strategy 2 | 3 | import ( 4 | corev1 "k8s.io/api/core/v1" 5 | "k8s.io/apimachinery/pkg/api/resource" 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | ) 8 | 9 | func buildTestPod(namespace string, name string, node string, pvc string) *corev1.Pod { 10 | return &corev1.Pod{ 11 | ObjectMeta: metav1.ObjectMeta{ 12 | Namespace: namespace, 13 | Name: name, 14 | }, 15 | Spec: corev1.PodSpec{ 16 | NodeName: node, 17 | Volumes: []corev1.Volume{ 18 | {Name: "a", VolumeSource: corev1.VolumeSource{ 19 | PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ 20 | ClaimName: pvc, 21 | }, 22 | }}, 23 | }, 24 | }, 25 | } 26 | } 27 | 28 | func buildTestPVC(namespace string, name string, 29 | accessModes ...corev1.PersistentVolumeAccessMode, 30 | ) *corev1.PersistentVolumeClaim { 31 | return &corev1.PersistentVolumeClaim{ 32 | ObjectMeta: metav1.ObjectMeta{ 33 | Namespace: namespace, 34 | Name: name, 35 | }, 36 | Spec: corev1.PersistentVolumeClaimSpec{ 37 | AccessModes: accessModes, 38 | Resources: corev1.VolumeResourceRequirements{ 39 | Requests: map[corev1.ResourceName]resource.Quantity{ 40 | "storage": resource.MustParse("512Mi"), 41 | }, 42 | }, 43 | }, 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /strategy/svc.go: -------------------------------------------------------------------------------- 1 | package strategy 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log/slog" 7 | 8 | "github.com/utkuozdemir/pv-migrate/k8s" 9 | "github.com/utkuozdemir/pv-migrate/migration" 10 | "github.com/utkuozdemir/pv-migrate/rsync" 11 | "github.com/utkuozdemir/pv-migrate/ssh" 12 | ) 13 | 14 | type Svc struct{} 15 | 16 | func (r *Svc) canDo(t *migration.Migration) bool { 17 | s := t.SourceInfo 18 | d := t.DestInfo 19 | sameCluster := s.ClusterClient.RestConfig.Host == d.ClusterClient.RestConfig.Host 20 | 21 | return sameCluster 22 | } 23 | 24 | func (r *Svc) Run(ctx context.Context, attempt *migration.Attempt, logger *slog.Logger) error { 25 | mig := attempt.Migration 26 | if !r.canDo(mig) { 27 | return ErrUnaccepted 28 | } 29 | 30 | releaseName := attempt.HelmReleaseNamePrefix 31 | releaseNames := []string{releaseName} 32 | 33 | helmVals, err := buildHelmVals(mig, releaseName, logger) 34 | if err != nil { 35 | return fmt.Errorf("failed to build helm values: %w", err) 36 | } 37 | 38 | doneCh := registerCleanupHook(attempt, releaseNames, logger) 39 | defer cleanupAndReleaseHook(ctx, attempt, releaseNames, doneCh, logger) 40 | 41 | err = installHelmChart(attempt, mig.DestInfo, releaseName, helmVals, logger) 42 | if err != nil { 43 | return fmt.Errorf("failed to install helm chart: %w", err) 44 | } 45 | 46 | showProgressBar := !mig.Request.NoProgressBar 47 | kubeClient := mig.SourceInfo.ClusterClient.KubeClient 48 | jobName := releaseName + "-rsync" 49 | 50 | if err = k8s.WaitForJobCompletion(ctx, kubeClient, 51 | mig.DestInfo.Claim.Namespace, jobName, showProgressBar, logger); err != nil { 52 | return fmt.Errorf("failed to wait for job completion: %w", err) 53 | } 54 | 55 | return nil 56 | } 57 | 58 | // 59 | //nolint:funlen 60 | func buildHelmVals( 61 | mig *migration.Migration, 62 | helmReleaseName string, 63 | logger *slog.Logger, 64 | ) (map[string]any, error) { 65 | sourceInfo := mig.SourceInfo 66 | destInfo := mig.DestInfo 67 | sourceNs := sourceInfo.Claim.Namespace 68 | destNs := destInfo.Claim.Namespace 69 | keyAlgorithm := mig.Request.KeyAlgorithm 70 | 71 | logger.Info("🔑 Generating SSH key pair", "algorithm", keyAlgorithm) 72 | 73 | publicKey, privateKey, err := ssh.CreateSSHKeyPair(keyAlgorithm) 74 | if err != nil { 75 | return nil, fmt.Errorf("failed to create ssh key pair: %w", err) 76 | } 77 | 78 | privateKeyMountPath := "/tmp/id_" + keyAlgorithm 79 | 80 | sshTargetHost := helmReleaseName + "-sshd." + sourceNs 81 | if mig.Request.DestHostOverride != "" { 82 | sshTargetHost = mig.Request.DestHostOverride 83 | } 84 | 85 | srcPath := srcMountPath + "/" + mig.Request.Source.Path 86 | destPath := destMountPath + "/" + mig.Request.Dest.Path 87 | rsyncCmd := rsync.Cmd{ 88 | NoChown: mig.Request.NoChown, 89 | Delete: mig.Request.DeleteExtraneousFiles, 90 | SrcPath: srcPath, 91 | DestPath: destPath, 92 | SrcUseSSH: true, 93 | SrcSSHHost: sshTargetHost, 94 | Compress: mig.Request.Compress, 95 | } 96 | 97 | rsyncCmdStr, err := rsyncCmd.Build() 98 | if err != nil { 99 | return nil, fmt.Errorf("failed to build rsync command: %w", err) 100 | } 101 | 102 | return map[string]any{ 103 | "rsync": map[string]any{ 104 | "enabled": true, 105 | "namespace": destNs, 106 | "privateKeyMount": true, 107 | "privateKey": privateKey, 108 | "privateKeyMountPath": privateKeyMountPath, 109 | "pvcMounts": []map[string]any{ 110 | { 111 | "name": destInfo.Claim.Name, 112 | "mountPath": destMountPath, 113 | }, 114 | }, 115 | "command": rsyncCmdStr, 116 | "affinity": destInfo.AffinityHelmValues, 117 | }, 118 | "sshd": map[string]any{ 119 | "enabled": true, 120 | "namespace": sourceNs, 121 | "publicKey": publicKey, 122 | "pvcMounts": []map[string]any{ 123 | { 124 | "name": sourceInfo.Claim.Name, 125 | "mountPath": srcMountPath, 126 | "readOnly": mig.Request.SourceMountReadOnly, 127 | }, 128 | }, 129 | "affinity": sourceInfo.AffinityHelmValues, 130 | }, 131 | }, nil 132 | } 133 | -------------------------------------------------------------------------------- /strategy/svc_test.go: -------------------------------------------------------------------------------- 1 | package strategy 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | v1 "k8s.io/api/core/v1" 9 | 10 | "github.com/utkuozdemir/pv-migrate/migration" 11 | "github.com/utkuozdemir/pv-migrate/pvc" 12 | ) 13 | 14 | func TestSvcCanDoSameCluster(t *testing.T) { 15 | t.Parallel() 16 | 17 | ctx, cancel := context.WithCancel(t.Context()) 18 | t.Cleanup(cancel) 19 | 20 | sourceNS := "namespace1" 21 | sourcePVC := "pvc1" 22 | sourcePod := "pod1" 23 | sourceNode := "node1" 24 | sourceModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce} 25 | 26 | destNS := "namespace2" 27 | destPvc := "pvc2" 28 | destPod := "pod2" 29 | destNode := "node2" 30 | destModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce} 31 | 32 | pvcA := buildTestPVC(sourceNS, sourcePVC, sourceModes...) 33 | pvcB := buildTestPVC(destNS, destPvc, destModes...) 34 | podA := buildTestPod(sourceNS, sourcePod, sourceNode, sourcePVC) 35 | podB := buildTestPod(destNS, destPod, destNode, destPvc) 36 | c := buildTestClient(pvcA, pvcB, podA, podB) 37 | src, _ := pvc.New(ctx, c, sourceNS, sourcePVC) 38 | dst, _ := pvc.New(ctx, c, destNS, destPvc) 39 | 40 | mig := migration.Migration{ 41 | SourceInfo: src, 42 | DestInfo: dst, 43 | } 44 | 45 | s := Svc{} 46 | canDo := s.canDo(&mig) 47 | assert.True(t, canDo) 48 | } 49 | -------------------------------------------------------------------------------- /test/.gitignore: -------------------------------------------------------------------------------- 1 | .kubeconfig-*.yaml 2 | -------------------------------------------------------------------------------- /test/k8s/_ns-1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: pv-migrate-test-1 5 | -------------------------------------------------------------------------------- /test/k8s/_ns-2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: pv-migrate-test-2 5 | 6 | -------------------------------------------------------------------------------- /test/k8s/dest-1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: pv-migrate-test-dest-1 5 | namespace: pv-migrate-test-1 6 | spec: 7 | strategy: 8 | type: Recreate 9 | selector: 10 | matchLabels: 11 | app: pv-migrate-test-dest-1 12 | template: 13 | metadata: 14 | labels: 15 | app: pv-migrate-test-dest-1 16 | spec: 17 | terminationGracePeriodSeconds: 0 18 | containers: 19 | - name: pv-migrate-test-dest-1 20 | image: docker.io/busybox:stable 21 | command: 22 | - /bin/sh 23 | - -c 24 | args: 25 | - echo "generating some files and directories"; 26 | mkdir -p /pv-migrate-test-dest-1/dir1/subdir1; 27 | mkdir -p /pv-migrate-test-dest-1/dir2; 28 | dd if=/dev/urandom bs=1M count=50 of=/pv-migrate-test-dest-1/d50mb.dat; 29 | dd if=/dev/urandom bs=1M count=10 of=/pv-migrate-test-dest-1/dir1/d10mb.dat; 30 | dd if=/dev/urandom bs=1M count=20 of=/pv-migrate-test-dest-1/dir1/d20mb.dat; 31 | dd if=/dev/urandom bs=1M count=10 of=/pv-migrate-test-dest-1/dir1/subdir1/d10mb.dat; 32 | dd if=/dev/urandom bs=1M count=50 of=/pv-migrate-test-dest-1/dir2/d50mb.dat; 33 | echo "done"; 34 | tail -f /dev/null; 35 | volumeMounts: 36 | - mountPath: /pv-migrate-test-dest-1 37 | name: pv-migrate-test-dest-1 38 | volumes: 39 | - name: pv-migrate-test-dest-1 40 | persistentVolumeClaim: 41 | claimName: pv-migrate-test-dest-1 42 | --- 43 | apiVersion: v1 44 | kind: PersistentVolumeClaim 45 | metadata: 46 | name: pv-migrate-test-dest-1 47 | namespace: pv-migrate-test-1 48 | spec: 49 | accessModes: 50 | - ReadWriteOnce 51 | resources: 52 | requests: 53 | storage: 512Mi 54 | -------------------------------------------------------------------------------- /test/k8s/dest-2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: pv-migrate-test-dest-2 5 | namespace: pv-migrate-test-2 6 | spec: 7 | strategy: 8 | type: Recreate 9 | selector: 10 | matchLabels: 11 | app: pv-migrate-test-dest-2 12 | template: 13 | metadata: 14 | labels: 15 | app: pv-migrate-test-dest-2 16 | spec: 17 | terminationGracePeriodSeconds: 0 18 | containers: 19 | - name: pv-migrate-test-dest-2 20 | image: docker.io/busybox:stable 21 | command: 22 | - /bin/sh 23 | - -c 24 | args: 25 | - echo "generating some files and directories"; 26 | mkdir -p /pv-migrate-test-dest-2/dir1/subdir1; 27 | mkdir -p /pv-migrate-test-dest-2/dir2; 28 | dd if=/dev/urandom bs=1M count=50 of=/pv-migrate-test-dest-2/d50mb.dat; 29 | dd if=/dev/urandom bs=1M count=10 of=/pv-migrate-test-dest-2/dir1/d10mb.dat; 30 | dd if=/dev/urandom bs=1M count=20 of=/pv-migrate-test-dest-2/dir1/d20mb.dat; 31 | dd if=/dev/urandom bs=1M count=10 of=/pv-migrate-test-dest-2/dir1/subdir1/d10mb.dat; 32 | dd if=/dev/urandom bs=1M count=50 of=/pv-migrate-test-dest-2/dir2/d50mb.dat; 33 | echo "done"; 34 | tail -f /dev/null; 35 | volumeMounts: 36 | - mountPath: /pv-migrate-test-dest-2 37 | name: pv-migrate-test-dest-2 38 | volumes: 39 | - name: pv-migrate-test-dest-2 40 | persistentVolumeClaim: 41 | claimName: pv-migrate-test-dest-2 42 | --- 43 | apiVersion: v1 44 | kind: PersistentVolumeClaim 45 | metadata: 46 | name: pv-migrate-test-dest-2 47 | namespace: pv-migrate-test-2 48 | spec: 49 | accessModes: 50 | - ReadWriteOnce 51 | resources: 52 | requests: 53 | storage: 512Mi 54 | -------------------------------------------------------------------------------- /test/k8s/source-1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: pv-migrate-test-source-1 5 | namespace: pv-migrate-test-1 6 | spec: 7 | strategy: 8 | type: Recreate 9 | selector: 10 | matchLabels: 11 | app: pv-migrate-test-source-1 12 | template: 13 | metadata: 14 | labels: 15 | app: pv-migrate-test-source-1 16 | spec: 17 | terminationGracePeriodSeconds: 0 18 | containers: 19 | - name: pv-migrate-test-source-1 20 | image: docker.io/busybox:stable 21 | command: 22 | - /bin/sh 23 | - -c 24 | args: 25 | - echo "generating some files and directories"; 26 | mkdir -p /pv-migrate-test-source-1/dir1/subdir1; 27 | mkdir -p /pv-migrate-test-source-1/dir2; 28 | dd if=/dev/urandom bs=1M count=50 of=/pv-migrate-test-source-1/s50mb.dat; 29 | dd if=/dev/urandom bs=1M count=10 of=/pv-migrate-test-source-1/dir1/s10mb.dat; 30 | dd if=/dev/urandom bs=1M count=20 of=/pv-migrate-test-source-1/dir1/s20mb.dat; 31 | dd if=/dev/urandom bs=1M count=10 of=/pv-migrate-test-source-1/dir1/subdir1/s10mb.dat; 32 | dd if=/dev/urandom bs=1M count=50 of=/pv-migrate-test-source-1/dir2/s50mb.dat; 33 | echo "done"; 34 | tail -f /dev/null; 35 | volumeMounts: 36 | - mountPath: /pv-migrate-test-source-1 37 | name: pv-migrate-test-source-1 38 | volumes: 39 | - name: pv-migrate-test-source-1 40 | persistentVolumeClaim: 41 | claimName: pv-migrate-test-source-1 42 | --- 43 | apiVersion: v1 44 | kind: PersistentVolumeClaim 45 | metadata: 46 | name: pv-migrate-test-source-1 47 | namespace: pv-migrate-test-1 48 | spec: 49 | accessModes: 50 | - ReadWriteOnce 51 | resources: 52 | requests: 53 | storage: 512Mi 54 | -------------------------------------------------------------------------------- /test/kind-config.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | networking: 4 | disableDefaultCNI: true 5 | -------------------------------------------------------------------------------- /test/metallb-manifests.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: L2Advertisement 3 | metadata: 4 | name: default 5 | namespace: metallb-system 6 | --- 7 | apiVersion: metallb.io/v1beta1 8 | kind: IPAddressPool 9 | metadata: 10 | name: default 11 | namespace: metallb-system 12 | spec: 13 | autoAssign: true 14 | addresses: 15 | - ${ADDRESS_RANGE} 16 | -------------------------------------------------------------------------------- /test/netpol-allow-all.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: allow-all 5 | spec: 6 | podSelector: {} 7 | ingress: 8 | - {} 9 | egress: 10 | - {} 11 | policyTypes: 12 | - Ingress 13 | - Egress 14 | -------------------------------------------------------------------------------- /test/terraform/.gitignore: -------------------------------------------------------------------------------- 1 | .serviceaccount.json 2 | -------------------------------------------------------------------------------- /test/terraform/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/google" { 5 | version = "4.2.1" 6 | hashes = [ 7 | "h1:5DT4u29kEakawD9OxxFFxd7XXhdDEoIDC+LIm17viT0=", 8 | "zh:0edb592e41a9a7a698d7dac9a24acc0512b7bedcf09b374df0f46d82281366d8", 9 | "zh:0f6ec72b2116d0781dd3709455813b5924586c2adeced8ecd37630be3eaec82d", 10 | "zh:1233077bbfd6b41991f6123618a9728a6f5c4ca70f2ed0bbd2142b1f55b35049", 11 | "zh:40f21f997acb1b3919d4e2382482f4af6314fc6166a0b815d020f9ae1bf270d4", 12 | "zh:467fe911820ad22f8f3ec02e40571ed36b1d12ba52c162a761ec87ee5a01b81f", 13 | "zh:70bfb636947ae5c478e06935c4a82a7d3be0124237aaec6edec70034b46a7c0b", 14 | "zh:85bd1fb9989832f22291bfb27d5677d2615fe3e4cb404729580a4d2bf8d373f8", 15 | "zh:ab41f8ba0b18f16d579f875ca120c5d48d773b9eb791377ec388da4b6b595087", 16 | "zh:cb3a38506303835e39e72b57e698f627661a99f7adf00480d92e406a8c095102", 17 | "zh:d677c52656f49f7d5371e45ff14ebd415206cfebdb1a61b71739d3543e3ceb4f", 18 | "zh:f9e33259b9429b92aff0a03cb930a2cdac859716b8443089485158a65b5cbfe1", 19 | ] 20 | } 21 | -------------------------------------------------------------------------------- /test/terraform/gcp.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "gcs" { 3 | bucket = "pv-migrate-terraform-backend" 4 | credentials = ".serviceaccount.json" 5 | } 6 | } 7 | 8 | provider "google" { 9 | credentials = file(".serviceaccount.json") 10 | project = var.gcp_project_id 11 | region = var.gcp_region 12 | zone = var.gcp_zone 13 | } 14 | 15 | resource "google_container_cluster" "cluster_1" { 16 | name = "pv-migrate-test-1" 17 | location = var.gcp_zone 18 | initial_node_count = 1 19 | logging_service = "none" 20 | monitoring_service = "none" 21 | cluster_autoscaling { 22 | enabled = false 23 | } 24 | vertical_pod_autoscaling { 25 | enabled = false 26 | } 27 | node_config { 28 | machine_type = "e2-micro" 29 | disk_size_gb = 16 30 | } 31 | } 32 | 33 | resource "google_container_cluster" "cluster_2" { 34 | name = "pv-migrate-test-2" 35 | location = var.gcp_zone 36 | initial_node_count = 1 37 | logging_service = "none" 38 | monitoring_service = "none" 39 | cluster_autoscaling { 40 | enabled = false 41 | } 42 | vertical_pod_autoscaling { 43 | enabled = false 44 | } 45 | node_config { 46 | machine_type = "e2-micro" 47 | disk_size_gb = 16 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /test/terraform/terraform.tfvars: -------------------------------------------------------------------------------- 1 | gcp_project_id = "pv-migrate" 2 | gcp_region = "europe-west3" 3 | gcp_zone = "europe-west3-a" 4 | -------------------------------------------------------------------------------- /test/terraform/variables.tf: -------------------------------------------------------------------------------- 1 | variable "gcp_project_id" { 2 | type = string 3 | } 4 | 5 | variable "gcp_region" { 6 | type = string 7 | } 8 | 9 | variable "gcp_zone" { 10 | type = string 11 | } 12 | 13 | -------------------------------------------------------------------------------- /test/test-destroy-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | terraform -chdir=terraform/ destroy -auto-approve 5 | 6 | GCP_PROJECT=pv-migrate 7 | 8 | gcloud --project $GCP_PROJECT compute disks list --format="value(zone.basename(), name)" | 9 | awk '{print "--zone " $1 " " $2}' | 10 | xargs -L 1 gcloud --project pv-migrate compute disks delete --quiet 11 | -------------------------------------------------------------------------------- /test/test-prepare-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | GCP_PROJECT=pv-migrate 5 | GCP_ZONE=europe-west3-a 6 | 7 | SOURCE_KUBECONFIG=.kubeconfig-source.yaml 8 | DEST_KUBECONFIG=.kubeconfig-dest.yaml 9 | 10 | terraform -chdir=terraform/ apply -auto-approve 11 | 12 | KUBECONFIG=$SOURCE_KUBECONFIG gcloud \ 13 | --project $GCP_PROJECT \ 14 | container clusters get-credentials \ 15 | --zone $GCP_ZONE pv-migrate-test-1 16 | 17 | KUBECONFIG=$DEST_KUBECONFIG gcloud \ 18 | --project $GCP_PROJECT \ 19 | container clusters get-credentials \ 20 | --zone $GCP_ZONE pv-migrate-test-2 21 | 22 | kubectl --kubeconfig $SOURCE_KUBECONFIG apply -f k8s/ 23 | kubectl --kubeconfig $DEST_KUBECONFIG apply -f k8s/ 24 | -------------------------------------------------------------------------------- /test/test-run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | SOURCE_KUBECONFIG=.kubeconfig-source.yaml 5 | DEST_KUBECONFIG=.kubeconfig-dest.yaml 6 | 7 | echo "----------------------------------" 8 | echo "Same namespace - legacy migrate subcommand" 9 | pv-migrate migrate \ 10 | --ignore-mounted \ 11 | --source-kubeconfig $SOURCE_KUBECONFIG \ 12 | --source-namespace pv-migrate-test-1 \ 13 | --dest-kubeconfig $SOURCE_KUBECONFIG \ 14 | --dest-namespace pv-migrate-test-1 \ 15 | --dest-delete-extraneous-files \ 16 | pv-migrate-test-source-1 \ 17 | pv-migrate-test-dest-1 18 | 19 | echo "----------------------------------" 20 | echo "Same namespace" 21 | pv-migrate \ 22 | --ignore-mounted \ 23 | --source-kubeconfig $SOURCE_KUBECONFIG \ 24 | --source-namespace pv-migrate-test-1 \ 25 | --source pv-migrate-test-source-1 \ 26 | --dest-kubeconfig $SOURCE_KUBECONFIG \ 27 | --dest-namespace pv-migrate-test-1 \ 28 | --dest-delete-extraneous-files \ 29 | --dest pv-migrate-test-dest-1 30 | 31 | echo "----------------------------------" 32 | echo "Different namespace" 33 | pv-migrate \ 34 | --ignore-mounted \ 35 | --source-kubeconfig $SOURCE_KUBECONFIG \ 36 | --source-namespace pv-migrate-test-1 \ 37 | --source pv-migrate-test-source-1 \ 38 | --dest-kubeconfig $SOURCE_KUBECONFIG \ 39 | --dest-namespace pv-migrate-test-2 \ 40 | --dest-delete-extraneous-files \ 41 | --dest pv-migrate-test-dest-2 42 | 43 | echo "----------------------------------" 44 | echo "Different cluster" 45 | pv-migrate \ 46 | --log-level info \ 47 | --log-format fancy \ 48 | migrate \ 49 | --ignore-mounted \ 50 | --source-kubeconfig $SOURCE_KUBECONFIG \ 51 | -n pv-migrate-test-1 \ 52 | --dest-kubeconfig $DEST_KUBECONFIG \ 53 | -N pv-migrate-test-1 \ 54 | --dest-delete-extraneous-files \ 55 | pv-migrate-test-source-1 pv-migrate-test-dest-1 56 | 57 | echo "----------------------------------" 58 | echo "Different cluster - local strategy" 59 | pv-migrate \ 60 | --strategies local \ 61 | --ignore-mounted \ 62 | --source-kubeconfig $SOURCE_KUBECONFIG \ 63 | -n pv-migrate-test-1 \ 64 | --source pv-migrate-test-source-1 \ 65 | --dest-kubeconfig $DEST_KUBECONFIG \ 66 | -N pv-migrate-test-1 \ 67 | --dest-delete-extraneous-files \ 68 | --dest pv-migrate-test-dest-1 69 | -------------------------------------------------------------------------------- /util/util.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "crypto/rand" 5 | "fmt" 6 | "math/big" 7 | "net" 8 | ) 9 | 10 | var letters = []rune("abcdefghijklmnopqrstuvwxyz0123456789") 11 | 12 | // RandomHexadecimalString returns a random lowercase hexadecimal string of given length. 13 | func RandomHexadecimalString(length int) string { 14 | lengthBigInt := big.NewInt(int64(length)) 15 | 16 | runes := make([]rune, length) 17 | for rune := range runes { 18 | rnd, err := rand.Int(rand.Reader, lengthBigInt) 19 | if err != nil { 20 | panic(fmt.Sprintf("failed to generate random number: %v", err)) 21 | } 22 | 23 | runes[rune] = letters[rnd.Int64()] 24 | } 25 | 26 | return string(runes) 27 | } 28 | 29 | func IsIPv6(host string) bool { 30 | ip := net.ParseIP(host) 31 | if ip == nil { 32 | return false 33 | } 34 | 35 | return ip.To4() == nil 36 | } 37 | -------------------------------------------------------------------------------- /util/util_test.go: -------------------------------------------------------------------------------- 1 | package util_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | 8 | "github.com/utkuozdemir/pv-migrate/util" 9 | ) 10 | 11 | func TestIsIPv6(t *testing.T) { 12 | t.Parallel() 13 | 14 | assert.False(t, util.IsIPv6("192.168.1.1")) 15 | assert.True(t, util.IsIPv6("2001:0db8:85a3:0000:0000:8a2e:0370:7334")) 16 | assert.True(t, util.IsIPv6("::1")) 17 | } 18 | --------------------------------------------------------------------------------