├── .dockerignore ├── .github └── workflows │ ├── e2e.yml │ └── release.yml ├── .gitignore ├── .golangci.yml ├── .goreleaser.yaml ├── Dockerfile ├── Dockerfile.goreleaser ├── Dockerfile.restic ├── LICENSE ├── Makefile ├── PROJECT ├── README.md ├── api └── v1alpha1 │ ├── groupversion_info.go │ ├── pvcbackup_types.go │ ├── pvcrestore_types.go │ └── zz_generated.deepcopy.go ├── chart ├── .helmignore ├── Chart.template.yaml ├── Chart.yaml ├── README.md ├── bump.sh ├── release.sh ├── templates │ ├── _helpers.tpl │ ├── crd │ │ ├── backsnap.skyb.it_pvcbackups.yaml │ │ └── backsnap.skyb.it_pvcrestores.yaml │ ├── manager.yaml │ ├── rbac │ │ ├── leader_election_role.yaml │ │ ├── leader_election_role_binding.yaml │ │ ├── role.yaml │ │ └── role_binding.yaml │ └── service_account.yaml └── values.yaml ├── cmd └── main.go ├── config ├── crd │ ├── bases │ │ ├── backsnap.skyb.it_pvcbackups.yaml │ │ └── backsnap.skyb.it_pvcrestores.yaml │ ├── kustomization.yaml │ └── kustomizeconfig.yaml ├── default │ ├── kustomization.yaml │ ├── manager_auth_proxy_patch.yaml │ └── manager_config_patch.yaml ├── manager │ ├── kustomization.yaml │ └── manager.yaml ├── prometheus │ ├── kustomization.yaml │ └── monitor.yaml ├── rbac │ ├── auth_proxy_client_clusterrole.yaml │ ├── auth_proxy_role.yaml │ ├── auth_proxy_role_binding.yaml │ ├── auth_proxy_service.yaml │ ├── kustomization.yaml │ ├── leader_election_role.yaml │ ├── leader_election_role_binding.yaml │ ├── pvcbackup_editor_role.yaml │ ├── pvcbackup_viewer_role.yaml │ ├── pvcrestore_editor_role.yaml │ ├── pvcrestore_viewer_role.yaml │ ├── role.yaml │ ├── role_binding.yaml │ └── service_account.yaml └── samples │ ├── backsnap_v1alpha1_pvcbackup.yaml │ ├── backsnap_v1alpha1_pvcrestore.yaml │ └── kustomization.yaml ├── docs ├── backsnap-0.7.0.tgz ├── backsnap-0.7.1.tgz ├── backsnap-0.7.2.tgz ├── backsnap-0.8.0.tgz ├── backsnap-0.9.0.tgz ├── backsnap-0.9.1.tgz ├── index.yaml └── migrate_pvc_to_another_az.md ├── go.mod ├── go.sum ├── internal └── controller │ ├── automatic.go │ ├── automatic_test.go │ ├── pvcbackup_controller.go │ ├── pvcbackup_controller_test.go │ ├── pvcrestore_controller.go │ ├── suite_test.go │ └── util.go ├── restic.sh └── retag-images-for-test.sh /.dockerignore: -------------------------------------------------------------------------------- 1 | # More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file 2 | # Ignore build and test binaries. 3 | bin/ 4 | -------------------------------------------------------------------------------- /.github/workflows/e2e.yml: -------------------------------------------------------------------------------- 1 | name: Run against Minikube 2 | on: 3 | push: 4 | tags-ignore: 5 | - '**' 6 | branches: 7 | - '**' 8 | workflow_call: {} 9 | jobs: 10 | e2e: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v4 14 | - uses: actions/setup-go@v5 15 | with: 16 | go-version-file: 'go.mod' 17 | 18 | - name: go build 19 | run: | 20 | make 21 | if [ "$(git status --porcelain=v1 2>/dev/null | wc -l)" != "0" ]; then 22 | echo "error: Makefile caused changes" 23 | git status 24 | exit 1 25 | fi 26 | 27 | - name: Start minikube 28 | uses: medyagh/setup-minikube@latest 29 | with: 30 | addons: volumesnapshots,csi-hostpath-driver 31 | - name: Minikube version 32 | run: kubectl version 33 | - name: Run tests 34 | run: 35 | make test 36 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: goreleaser 2 | 3 | on: 4 | push: 5 | # run only against tags 6 | tags: 7 | - '*' 8 | 9 | permissions: 10 | contents: write 11 | # packages: write 12 | # issues: write 13 | 14 | jobs: 15 | e2e: 16 | uses: ./.github/workflows/e2e.yml 17 | 18 | goreleaser: 19 | needs: e2e 20 | runs-on: ubuntu-latest 21 | steps: 22 | - uses: actions/checkout@v4 23 | with: 24 | fetch-depth: 0 25 | fetch-tags: true 26 | - uses: actions/setup-go@v5 27 | with: 28 | go-version-file: 'go.mod' 29 | - uses: docker/setup-qemu-action@v3 30 | - uses: docker/setup-buildx-action@v3 31 | - uses: docker/login-action@v3 32 | with: 33 | username: ${{ secrets.DOCKERHUB_USERNAME }} 34 | password: ${{ secrets.DOCKERHUB_TOKEN }} 35 | - uses: goreleaser/goreleaser-action@v6 36 | with: 37 | version: latest 38 | args: release --clean 39 | env: 40 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 41 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | bin/* 9 | Dockerfile.cross 10 | 11 | # Test binary, built with `go test -c` 12 | *.test 13 | 14 | # Output of the go coverage tool, specifically when used with LiteIDE 15 | *.out 16 | 17 | # Kubernetes Generated files - skip generated files, except for vendored files 18 | 19 | !vendor/**/zz_generated.* 20 | 21 | # editor and IDE paraphernalia 22 | .idea 23 | .vscode 24 | *.swp 25 | *.swo 26 | *~ 27 | 28 | dist/ 29 | hack 30 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | run: 2 | deadline: 5m 3 | allow-parallel-runners: true 4 | 5 | issues: 6 | # don't skip warning about doc comments 7 | # don't exclude the default set of lint 8 | exclude-use-default: false 9 | # restore some of the defaults 10 | # (fill in the rest as needed) 11 | exclude-rules: 12 | - path: "api/*" 13 | linters: 14 | - lll 15 | - path: "internal/*" 16 | linters: 17 | - dupl 18 | - lll 19 | linters: 20 | disable-all: true 21 | enable: 22 | - dupl 23 | - errcheck 24 | - exportloopref 25 | - goconst 26 | - gocyclo 27 | - gofmt 28 | - goimports 29 | - gosimple 30 | - govet 31 | - ineffassign 32 | - lll 33 | - misspell 34 | - nakedret 35 | - prealloc 36 | - staticcheck 37 | - typecheck 38 | - unconvert 39 | - unparam 40 | - unused 41 | -------------------------------------------------------------------------------- /.goreleaser.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://goreleaser.com/static/schema.json 2 | # vim: set ts=2 sw=2 tw=0 fo=cnqoj 3 | 4 | version: 2 5 | 6 | builds: 7 | - main: ./cmd 8 | env: 9 | - CGO_ENABLED=0 10 | goos: 11 | - linux 12 | - darwin 13 | goarch: 14 | - amd64 15 | - arm64 16 | 17 | dockers: 18 | - image_templates: 19 | - 'sjorsgielen/backsnap-restic:latest-amd64' 20 | - 'sjorsgielen/backsnap-restic:{{ .Tag }}-amd64' 21 | dockerfile: 'Dockerfile.restic' 22 | use: buildx 23 | build_flag_templates: 24 | - "--pull" 25 | - "--platform=linux/amd64" 26 | extra_files: 27 | - "restic.sh" 28 | - image_templates: 29 | - 'sjorsgielen/backsnap-restic:latest-arm64' 30 | - 'sjorsgielen/backsnap-restic:{{ .Tag }}-arm64' 31 | dockerfile: 'Dockerfile.restic' 32 | use: buildx 33 | build_flag_templates: 34 | - "--pull" 35 | - "--platform=linux/arm64" 36 | goarch: arm64 37 | extra_files: 38 | - "restic.sh" 39 | - image_templates: 40 | - 'sjorsgielen/backsnap:latest-amd64' 41 | - 'sjorsgielen/backsnap:{{ .Tag }}-amd64' 42 | dockerfile: 'Dockerfile.goreleaser' 43 | use: buildx 44 | build_flag_templates: 45 | - "--pull" 46 | - "--platform=linux/amd64" 47 | - image_templates: 48 | - 'sjorsgielen/backsnap:latest-arm64' 49 | - 'sjorsgielen/backsnap:{{ .Tag }}-arm64' 50 | dockerfile: 'Dockerfile.goreleaser' 51 | use: buildx 52 | build_flag_templates: 53 | - "--pull" 54 | - "--platform=linux/arm64" 55 | goarch: arm64 56 | 57 | docker_manifests: 58 | - name_template: 'sjorsgielen/backsnap-restic:latest' 59 | image_templates: 60 | - 'sjorsgielen/backsnap-restic:latest-amd64' 61 | - 'sjorsgielen/backsnap-restic:latest-arm64' 62 | - name_template: 'sjorsgielen/backsnap-restic:{{ .Tag }}' 63 | image_templates: 64 | - 'sjorsgielen/backsnap-restic:{{ .Tag }}-amd64' 65 | - 'sjorsgielen/backsnap-restic:{{ .Tag }}-arm64' 66 | - name_template: 'sjorsgielen/backsnap:latest' 67 | image_templates: 68 | - 'sjorsgielen/backsnap:latest-amd64' 69 | - 'sjorsgielen/backsnap:latest-arm64' 70 | - name_template: 'sjorsgielen/backsnap:{{ .Tag }}' 71 | image_templates: 72 | - 'sjorsgielen/backsnap:{{ .Tag }}-amd64' 73 | - 'sjorsgielen/backsnap:{{ .Tag }}-arm64' 74 | 75 | archives: 76 | - format: tar.gz 77 | # this name template makes the OS and Arch compatible with the results of `uname`. 78 | name_template: >- 79 | {{ .ProjectName }}_ 80 | {{- title .Os }}_ 81 | {{- if eq .Arch "amd64" }}x86_64 82 | {{- else if eq .Arch "386" }}i386 83 | {{- else }}{{ .Arch }}{{ end }} 84 | {{- if .Arm }}v{{ .Arm }}{{ end }} 85 | # use zip for windows archives 86 | format_overrides: 87 | - goos: windows 88 | format: zip 89 | 90 | changelog: 91 | sort: asc 92 | filters: 93 | exclude: 94 | - "^docs:" 95 | - "^test:" 96 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Build the manager binary 2 | FROM golang:1.21.5-bookworm AS builder 3 | ARG TARGETOS 4 | ARG TARGETARCH 5 | 6 | WORKDIR /workspace 7 | # Copy the Go Modules manifests 8 | COPY go.mod go.mod 9 | COPY go.sum go.sum 10 | # cache deps before building and copying source so that we don't need to re-download as much 11 | # and so that source changes don't invalidate our downloaded layer 12 | RUN go mod download 13 | 14 | # Copy the go source 15 | COPY cmd/ cmd/ 16 | COPY api/ api/ 17 | COPY internal/controller/ internal/controller/ 18 | 19 | # Build 20 | # the GOARCH has not a default value to allow the binary be built according to the host where the command 21 | # was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO 22 | # the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore, 23 | # by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. 24 | RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/main.go 25 | 26 | # Use distroless as minimal base image to package the manager binary 27 | # Refer to https://github.com/GoogleContainerTools/distroless for more details 28 | FROM gcr.io/distroless/static:nonroot 29 | WORKDIR / 30 | COPY --from=builder /workspace/manager . 31 | USER 65532:65532 32 | 33 | ENTRYPOINT ["/manager"] 34 | -------------------------------------------------------------------------------- /Dockerfile.goreleaser: -------------------------------------------------------------------------------- 1 | # Use distroless as minimal base image to package the manager binary 2 | # Refer to https://github.com/GoogleContainerTools/distroless for more details 3 | FROM gcr.io/distroless/static:nonroot 4 | WORKDIR / 5 | COPY backsnap manager 6 | USER 65532:65532 7 | 8 | ENTRYPOINT ["/manager"] 9 | -------------------------------------------------------------------------------- /Dockerfile.restic: -------------------------------------------------------------------------------- 1 | FROM restic/restic:0.16.0 AS restic 2 | 3 | FROM ubuntu:focal 4 | 5 | RUN apt-get update \ 6 | && apt-get install -y --no-install-recommends ca-certificates curl \ 7 | && rm -rf /var/lib/apt/lists/* 8 | 9 | RUN curl -L \ 10 | https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/$(dpkg --print-architecture)/kubectl \ 11 | -o /usr/local/bin/kubectl && \ 12 | ls -la /usr/local/bin && \ 13 | chmod +x /usr/local/bin/kubectl && \ 14 | kubectl version --client 15 | 16 | COPY --from=restic /usr/bin/restic /usr/bin/restic 17 | RUN restic version 18 | 19 | COPY restic.sh /usr/local/bin 20 | CMD restic.sh 21 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Sjors Gielen 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | 2 | # Image URL to use all building/pushing image targets 3 | IMG ?= controller:latest 4 | # ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. 5 | ENVTEST_K8S_VERSION = 1.28.0 6 | 7 | # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) 8 | ifeq (,$(shell go env GOBIN)) 9 | GOBIN=$(shell go env GOPATH)/bin 10 | else 11 | GOBIN=$(shell go env GOBIN) 12 | endif 13 | 14 | # CONTAINER_TOOL defines the container tool to be used for building images. 15 | # Be aware that the target commands are only tested with Docker which is 16 | # scaffolded by default. However, you might want to replace it to use other 17 | # tools. (i.e. podman) 18 | CONTAINER_TOOL ?= docker 19 | 20 | # Setting SHELL to bash allows bash commands to be executed by recipes. 21 | # Options are set to exit when a recipe line exits non-zero or a piped command fails. 22 | SHELL = /usr/bin/env bash -o pipefail 23 | .SHELLFLAGS = -ec 24 | 25 | .PHONY: all 26 | all: build 27 | 28 | ##@ General 29 | 30 | # The help target prints out all targets with their descriptions organized 31 | # beneath their categories. The categories are represented by '##@' and the 32 | # target descriptions by '##'. The awk command is responsible for reading the 33 | # entire set of makefiles included in this invocation, looking for lines of the 34 | # file as xyz: ## something, and then pretty-format the target and help. Then, 35 | # if there's a line with ##@ something, that gets pretty-printed as a category. 36 | # More info on the usage of ANSI control characters for terminal formatting: 37 | # https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters 38 | # More info on the awk command: 39 | # http://linuxcommand.org/lc3_adv_awk.php 40 | 41 | .PHONY: help 42 | help: ## Display this help. 43 | @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) 44 | 45 | ##@ Development 46 | 47 | .PHONY: manifests 48 | manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. 49 | $(CONTROLLER_GEN) rbac:roleName=backsnap-manager crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases 50 | 51 | .PHONY: generate 52 | generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. 53 | mkdir -p hack 54 | touch hack/boilerplate.go.txt 55 | $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." 56 | rm hack/boilerplate.go.txt 57 | rmdir hack 58 | 59 | .PHONY: fmt 60 | fmt: ## Run go fmt against code. 61 | go fmt ./... 62 | 63 | .PHONY: vet 64 | vet: ## Run go vet against code. 65 | go vet ./... 66 | 67 | .PHONY: test 68 | test: manifests generate fmt vet envtest ## Run tests. 69 | @[ "$(shell $(KUBECTL) config current-context)" = "minikube" ] || { \ 70 | echo >&2 ; \ 71 | echo "Cowardly refusing to run tests with a context other than minikube. Please see the README." >&2 ; \ 72 | echo >&2 ; \ 73 | exit 1 ; \ 74 | } 75 | KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test ./... -coverprofile cover.out -ginkgo.vv -test.v 76 | 77 | GOLANGCI_LINT = $(shell pwd)/bin/golangci-lint 78 | GOLANGCI_LINT_VERSION ?= v1.54.2 79 | golangci-lint: 80 | @[ -f $(GOLANGCI_LINT) ] || { \ 81 | set -e ;\ 82 | curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell dirname $(GOLANGCI_LINT)) $(GOLANGCI_LINT_VERSION) ;\ 83 | } 84 | 85 | .PHONY: lint 86 | lint: golangci-lint ## Run golangci-lint linter & yamllint 87 | $(GOLANGCI_LINT) run 88 | 89 | .PHONY: lint-fix 90 | lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes 91 | $(GOLANGCI_LINT) run --fix 92 | 93 | ##@ Build 94 | 95 | .PHONY: build 96 | build: manifests generate fmt vet ## Build manager binary. 97 | go build -o bin/manager cmd/main.go 98 | 99 | .PHONY: run 100 | run: manifests generate fmt vet ## Run a controller from your host. 101 | go run ./cmd/main.go 102 | 103 | # If you wish to build the manager image targeting other platforms you can use the --platform flag. 104 | # (i.e. docker build --platform linux/arm64). However, you must enable docker buildKit for it. 105 | # More info: https://docs.docker.com/develop/develop-images/build_enhancements/ 106 | .PHONY: docker-build 107 | docker-build: ## Build docker image with the manager. 108 | $(CONTAINER_TOOL) build -t ${IMG} . 109 | 110 | .PHONY: docker-push 111 | docker-push: ## Push docker image with the manager. 112 | $(CONTAINER_TOOL) push ${IMG} 113 | 114 | # PLATFORMS defines the target platforms for the manager image be built to provide support to multiple 115 | # architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: 116 | # - be able to use docker buildx. More info: https://docs.docker.com/build/buildx/ 117 | # - have enabled BuildKit. More info: https://docs.docker.com/develop/develop-images/build_enhancements/ 118 | # - be able to push the image to your registry (i.e. if you do not set a valid value via IMG=> then the export will fail) 119 | # To adequately provide solutions that are compatible with multiple platforms, you should consider using this option. 120 | PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le 121 | .PHONY: docker-buildx 122 | docker-buildx: ## Build and push docker image for the manager for cross-platform support 123 | # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile 124 | sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross 125 | - $(CONTAINER_TOOL) buildx create --name project-v3-builder 126 | $(CONTAINER_TOOL) buildx use project-v3-builder 127 | - $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross . 128 | - $(CONTAINER_TOOL) buildx rm project-v3-builder 129 | rm Dockerfile.cross 130 | 131 | ##@ Deployment 132 | 133 | ifndef ignore-not-found 134 | ignore-not-found = false 135 | endif 136 | 137 | .PHONY: install 138 | install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. 139 | $(KUSTOMIZE) build config/crd | $(KUBECTL) apply -f - 140 | 141 | .PHONY: uninstall 142 | uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. 143 | $(KUSTOMIZE) build config/crd | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - 144 | 145 | .PHONY: deploy 146 | deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. 147 | cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} 148 | $(KUSTOMIZE) build config/default | $(KUBECTL) apply -f - 149 | 150 | .PHONY: undeploy 151 | undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. 152 | $(KUSTOMIZE) build config/default | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - 153 | 154 | ##@ Build Dependencies 155 | 156 | ## Location to install dependencies to 157 | LOCALBIN ?= $(shell pwd)/bin 158 | $(LOCALBIN): 159 | mkdir -p $(LOCALBIN) 160 | 161 | ## Tool Binaries 162 | KUBECTL ?= kubectl 163 | KUSTOMIZE ?= $(LOCALBIN)/kustomize 164 | CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen 165 | ENVTEST ?= $(LOCALBIN)/setup-envtest 166 | 167 | ## Tool Versions 168 | KUSTOMIZE_VERSION ?= v5.2.1 169 | CONTROLLER_TOOLS_VERSION ?= v0.15.0 170 | 171 | .PHONY: kustomize 172 | kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. If wrong version is installed, it will be removed before downloading. 173 | $(KUSTOMIZE): $(LOCALBIN) 174 | @if test -x $(LOCALBIN)/kustomize && ! $(LOCALBIN)/kustomize version | grep -q $(KUSTOMIZE_VERSION); then \ 175 | echo "$(LOCALBIN)/kustomize version is not expected $(KUSTOMIZE_VERSION). Removing it before installing."; \ 176 | rm -rf $(LOCALBIN)/kustomize; \ 177 | fi 178 | test -s $(LOCALBIN)/kustomize || GOBIN=$(LOCALBIN) GO111MODULE=on go install sigs.k8s.io/kustomize/kustomize/v5@$(KUSTOMIZE_VERSION) 179 | 180 | .PHONY: controller-gen 181 | controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. If wrong version is installed, it will be overwritten. 182 | $(CONTROLLER_GEN): $(LOCALBIN) 183 | test -s $(LOCALBIN)/controller-gen && $(LOCALBIN)/controller-gen --version | grep -q $(CONTROLLER_TOOLS_VERSION) || \ 184 | GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION) 185 | 186 | .PHONY: envtest 187 | envtest: $(ENVTEST) ## Download envtest-setup locally if necessary. 188 | $(ENVTEST): $(LOCALBIN) 189 | test -s $(LOCALBIN)/setup-envtest || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest 190 | -------------------------------------------------------------------------------- /PROJECT: -------------------------------------------------------------------------------- 1 | # Code generated by tool. DO NOT EDIT. 2 | # This file is used to track the info used to scaffold your project 3 | # and allow the plugins properly work. 4 | # More info: https://book.kubebuilder.io/reference/project-config.html 5 | domain: skyb.it 6 | layout: 7 | - go.kubebuilder.io/v4 8 | projectName: backsnap 9 | repo: github.com/skybitsnl/backsnap 10 | resources: 11 | - api: 12 | crdVersion: v1 13 | namespaced: true 14 | controller: true 15 | domain: skyb.it 16 | group: backsnap 17 | kind: PVCBackup 18 | path: github.com/skybitsnl/backsnap/api/v1alpha1 19 | version: v1alpha1 20 | - api: 21 | crdVersion: v1 22 | namespaced: true 23 | controller: true 24 | domain: skyb.it 25 | group: backsnap 26 | kind: PVCRestore 27 | path: github.com/skybitsnl/backsnap/api/v1alpha1 28 | version: v1alpha1 29 | version: "3" 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # backsnap - a kubernetes backup operator 2 | 3 | *Backsnap: kubernetes backups, chiropractor approved!* 4 | 5 | [![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/backsnap)](https://artifacthub.io/packages/search?repo=backsnap) 6 | 7 | Backsnap performs backups of persistent volumes (PV/PVC) in a Kubernetes 8 | cluster, for use in your disaster recovery scenarios. 9 | 10 | It is unique in supporting point-in-time VolumeSnapshots, then making incremental 11 | off-site backups of those using `restic`. 12 | 13 | Backsnap by default assumes that all PVCs in your cluster should be backed up. 14 | No per-PVC object is necessary to start backing up any data. You can change this 15 | per PVC, per namespace or in operator configuration. 16 | 17 | ## How does it work? 18 | 19 | By default, Backsnap enumerates all PersistentVolumeClaims in your cluster, 20 | takes a point-in-time VolumeSnapshot of them, and uses `restic` to take a backup 21 | of the snapshot. 22 | 23 | By using VolumeSnapshots we are certain that a backup is internally consistant, 24 | which is important when backing up workloads such as databases. By using 25 | `restic` the backups are incremental and we automatically support all its 26 | features, such as restoring from a point in history, client-side encryption and 27 | multiple storage backends. 28 | 29 | The operator can run in automatic or manual mode. In manual mode (`-manual` 30 | flag), you create PVCBackup objects in the same namespace as a PVC you want to 31 | be backed up. The operator reacts to this by creating a snapshot, a 32 | point-in-time PVC and a Job to perform the backup, and cleans up afterwards. In 33 | automatic mode, the operator creates PVCBackup objects automatically according 34 | to schedule (you can still also create your own). 35 | 36 | The automatic schedule can be adjusted using a `backsnap.skyb.it/schedule` 37 | annotation on the target PVC or target namespace. By setting the annotation to 38 | the empty string, the PVC (or all PVCs in the namespace) are not backed up. If 39 | both the PVC and namespace have no annotation, the default schedule from the 40 | `-schedule` flag is used. You can set `-schedule=""` to disable automatic 41 | backups (this is the same as setting `-manual`, unless any PVCs or 42 | namespaces do have an overriding schedule set). 43 | 44 | ## Getting started 45 | 46 | ### Quick start using Helm 47 | 48 | ``` 49 | helm repo add backsnap https://skybitsnl.github.io/backsnap 50 | helm install --create-namespace --namespace backsnap backsnap -f values.yaml 51 | ``` 52 | 53 | An example `values.yaml` follows. For more configuration options, see the 54 | ["Default values" page on ArtifactHub](https://artifacthub.io/packages/helm/backsnap/backsnap?modal=values). 55 | 56 | ``` 57 | app: 58 | # In the default configuration for Backsnap, it creates a daily backup for all 59 | # PVCs in the cluster, starting immediately. In this example configuration, we 60 | # configure a single namespace instead, which causes it to back up only that 61 | # namespace. In order to back up the entire cluster state, it is recommended 62 | # not to configure any namespaces here, but configure per-namespace schedules 63 | # accordingly using annotations on the namespace or PVC. 64 | namespaces: 65 | allow: ["my-app"] 66 | 67 | # Default schedule for all PVCs within scope, unless overridden per namespace or 68 | # per PVC. The default is @daily, but any crontab syntax is supported. 69 | # See https://crontab.guru/ for examples and explanations. 70 | #schedule: "@daily" 71 | 72 | # Snapshot class, if no cluster-wide default is configured or you prefer 73 | # another one 74 | snapshotClass: "" 75 | 76 | # Storage class, if no cluster-wide default is configured or you prefer 77 | # another one 78 | storageClass: "" 79 | 80 | # S3 backup destination configuration 81 | s3: 82 | host: "" 83 | bucket: "" 84 | accessKey: "" 85 | secretKey: "" 86 | 87 | # Restic configuration 88 | restic: 89 | password: "" 90 | ``` 91 | 92 | After this, you can observe your backups and their status using: 93 | 94 | ``` 95 | kubectl get pvcbackup -A 96 | ``` 97 | 98 | See below on how to create your own PVCBackups, and restoring with PVCRestores. 99 | 100 | ### Manual installation with kubectl 101 | 102 | First, import the Backsnap CRDs: 103 | 104 | ``` 105 | kubectl apply -f https://raw.githubusercontent.com/skybitsnl/backsnap/main/config/crd/bases/backsnap.skyb.it_pvcbackups.yaml 106 | kubectl apply -f https://raw.githubusercontent.com/skybitsnl/backsnap/main/config/crd/bases/backsnap.skyb.it_pvcrestores.yaml 107 | ``` 108 | 109 | Then, create a `backsnap` namespace where the operator will run: 110 | 111 | ``` 112 | kubectl create namespace backsnap 113 | ``` 114 | 115 | Then, create the Service Account and its required roles. The files below create 116 | a ClusterRole which allows creating VolumeSnapshots, PersistentVolumeClaims and 117 | Jobs in any namespace, and allows reading various other resources. If you're 118 | just backing up a single namespace, you can tweak this file to create a Role 119 | which only allows this access to that namespace. 120 | 121 | Once [cross-namespace data sources](https://kubernetes.io/blog/2023/01/02/cross-namespace-data-sources-alpha/) 122 | are beta in Kubernetes, this application will also optionally support them, 123 | and the set of necessary ClusterRole rules will be significantly reduced. 124 | 125 | ``` 126 | kubectl apply -f https://raw.githubusercontent.com/skybitsnl/backsnap/main/config/rbac/role.yaml 127 | kubectl apply -f https://raw.githubusercontent.com/skybitsnl/backsnap/main/config/rbac/role_binding.yaml 128 | kubectl apply -f https://raw.githubusercontent.com/skybitsnl/backsnap/main/config/rbac/leader_election_role.yaml 129 | kubectl apply -f https://raw.githubusercontent.com/skybitsnl/backsnap/main/config/rbac/leader_election_role_binding.yaml 130 | kubectl apply -f https://raw.githubusercontent.com/skybitsnl/backsnap/main/config/rbac/service_account.yaml 131 | ``` 132 | 133 | Then, we can deploy the operator. 134 | 135 | > [!CAUTION] 136 | > Note that depending on your operator configuration, Backsnap may start to take 137 | > back-ups of all PVCs in your cluster immediately. If you don't want this to 138 | > happen (yet), you can enable manual mode using the `-manual` flag. Eventually, 139 | > we recommend running Backsnap in its default mode. This ensures that you have at 140 | > least a daily snapshot of all PVCs in your cluster, even new ones, unless you 141 | > opt out explicitly. 142 | 143 | We download the Deployment YAML, so that we can edit its default configuration before 144 | starting it. 145 | 146 | ``` 147 | wget https://raw.githubusercontent.com/skybitsnl/backsnap/main/config/manager/manager.yaml 148 | ``` 149 | 150 | Edit the manager.yaml and adjust as necessary. 151 | 152 | - The version by default is set to `latest`, you may want to choose a specific tag here 153 | to prevent automatic updating. 154 | - See the args inside the default YAML for any configuration you may want to set. 155 | 156 | Then, deploy it: 157 | 158 | ``` 159 | kubectl apply -f manager.yaml 160 | ``` 161 | 162 | After this, you can observe your backups and their status using: 163 | 164 | ``` 165 | kubectl get pvcbackup -A 166 | ``` 167 | 168 | See below on how to create your own PVCBackups, and restoring with PVCRestores. 169 | 170 | #### To uninstall 171 | 172 | Stop the manager, the namespace, cluster roles and the CRDs created above: 173 | 174 | ```sh 175 | kubectl delete deployment -n backsnap backsnap-operator 176 | kubectl delete namespace backsnap 177 | kubectl delete clusterrole backsnap-manager 178 | 179 | kubectl delete -f https://raw.githubusercontent.com/skybitsnl/backsnap/main/config/crd/bases/backsnap.skyb.it_pvcbackups.yaml 180 | kubectl delete -f https://raw.githubusercontent.com/skybitsnl/backsnap/main/config/crd/bases/backsnap.skyb.it_pvcrestores.yaml 181 | ``` 182 | 183 | ## How to use it 184 | 185 | In the default and recommended configuration, Backsnap automatically creates 186 | PVCBackups on the configured schedule. You can override this schedule per 187 | namespace and per PVC. The recommended way to do this is per namespace, e.g. 188 | you can set your prod namespaces schedule to back up hourly and your testing 189 | namespaces to an empty schedule to disable back-ups altogether. 190 | 191 | This way, any new namespaces will be backed up automatically unless you disable 192 | them explicitly. 193 | 194 | You can tell the operator to start the backup of a specific PVC by submitting a 195 | CR like the one in `config/samples/backsnap_v1alpha1_pvcbackup.yaml`: 196 | 197 | ``` 198 | apiVersion: backsnap.skyb.it/v1alpha1 199 | kind: PVCBackup 200 | metadata: 201 | name: your-data-backup 202 | namespace: your-application 203 | spec: 204 | pvc: your-data 205 | ttl: 1h 206 | ``` 207 | 208 | You'll see that a VolumeSnapshot, PVC and backup Job are created in the 209 | `your-application` namespace and you can follow along with the operator logs to 210 | see its progression. 211 | 212 | Similarly, to restore a PVC from backup, create a PVCRestore object like the 213 | following: 214 | 215 | ``` 216 | apiVersion: backsnap.skyb.it/v1alpha1 217 | kind: PVCRestore 218 | metadata: 219 | name: your-data-restore 220 | namespace: your-application 221 | spec: 222 | sourcePvc: your-data 223 | # By default, the sourceNamespace is the same namespace the PVCRestore 224 | # is in 225 | #sourceNamespace: "your-application-prod" 226 | # By default, the new PVC has the same name as the sourcePvc, but 227 | # you can override this 228 | #targetPvc: your-data-copy 229 | targetPvcSize: "10Gi" 230 | ``` 231 | 232 | For a concrete scenario of this, see 233 | [Migrating a PVC to another availability zone using Backsnap](docs/migrate_pvc_to_another_az.md). 234 | 235 | ### Troubleshooting 236 | 237 | If your PVCBackups or PVCRestores are failing, or not even starting, use the 238 | following check-list to find out why: 239 | 240 | - Is the operator running? (`kubectl get pods -n backsnap`) 241 | - Is the operator picking up the jobs? (`kubectl logs -n backsnap deployment/backsnap`, possibly 242 | `grep` by namespace and/or object name) 243 | - The operator will not delete the backup or restore Pods if they failed, so 244 | you can see if there are errors in their logs. (`kubectl logs -n your-application job/...`) 245 | 246 | If you are still having issues after the above steps, be sure to file an issue 247 | here on Github. 248 | 249 | ## Security considerations 250 | 251 | This operator assumes full trust within the entire Kubernetes cluster. Because 252 | of existing Kubernetes limitations, security risks are difficult to mitigate. 253 | 254 | Once 255 | [cross-namespace data sources](https://kubernetes.io/blog/2023/01/02/cross-namespace-data-sources-alpha/) 256 | are beta in Kubernetes, this application will also optionally support them. This 257 | will mitigate security risks to a point where absolute trust in anyone using the 258 | cluster is no longer necessary. 259 | 260 | ### While making back-ups 261 | 262 | This operator creates a point-in-time copy of the PVC to back up. This has to be 263 | done in the same namespace, because snapshots cannot be taken or restored into 264 | other namespaces. Theoretically, this PVC could be read from and written to by 265 | anyone who can read/write PVCs in the target namespace, before the back-up 266 | starts. But since such parties can typically also read/write the target PVC 267 | itself, this is relatively low-risk. 268 | 269 | Of a higher risk, this operator creates a Pod inside the target namespace which 270 | consumes the PVC copy and backs it up to the backup location using restic. This 271 | Pod contains the S3 credentials and the restic encryption password in its YAML. 272 | Anyone who can read Job or Pod definitions, or exec into Pods, can read these 273 | credentials and therefore read and write the back-ups. 274 | 275 | ### While keeping back-ups 276 | 277 | The restic password is used for client-side encryption. This means that the 278 | back-ups cannot be retrieved from the target location without also knowing the 279 | decryption password. This is a feature provided by restic. 280 | 281 | ### While restoring back-ups 282 | 283 | A PVCRestore object can be created in any namespace watched by the operator, and 284 | will be able to restore any PVC from any namespace backed up on the same backup 285 | location. This means that any user with access to any namespace on the cluster, 286 | can eventually read the contents of any PVC in any other namespace. 287 | 288 | ## Contributing 289 | 290 | Suggestions are welcomed as GitHub issues - and pull requests are well 291 | appreciated! This section should help you run the operator locally so that you 292 | can test your changes. 293 | 294 | If you need any help getting this to run, or would like to brainstorm about a 295 | feature, please file a GitHub issue as well. 296 | 297 | ## How to run it locally 298 | 299 | Run `go run ./cmd -help` to get a list of flags. Example run: 300 | 301 | ``` 302 | go run ./cmd \ 303 | -snapshotclass ... \ 304 | -namespaces ... \ 305 | -schedule "@daily" \ 306 | -s3-host s3.eu-west-1.amazonaws.com \ 307 | -s3-bucket backsnap-example \ 308 | -s3-access-key-id ... \ 309 | -s3-secret-access-key ... \ 310 | -restic-password ... 311 | ``` 312 | 313 | This will use your local credentials to access the cluster and create resources. 314 | Of course, if you simply have a `backsnap` binary, just run it as 315 | `backsnap -s3-host ...`. 316 | 317 | If you've made changes to the Restic image that is used in backup jobs, you can 318 | use `-image` to use your image. Of course, your Kubelet must be able to access 319 | this image, so `imagePullSecret` can be used if it is hosted on a private 320 | repository. 321 | 322 | ## Running the tests 323 | 324 | The tests require a Kubernetes cluster that can run Pods, but does not run the 325 | backsnap operator. That means envtest, the typical unit test framework, won't 326 | work, because it won't run Pods. Instead, you can run the tests against 327 | minikube. 328 | 329 | ``` 330 | minikube start --driver=docker --addons=volumesnapshots,csi-hostpath-driver 331 | make test 332 | ``` 333 | 334 | The first minikube command starts minikube, adds it as a context to kubectl, and 335 | sets it as the active context, so that every kubectl command after it uses 336 | minikube. You can use `kubectl config get-contexts` to see your configured 337 | contexts, and can switch to the existing one you had using `kubectl config 338 | use-context NAME`. Then, you can switch back using `kubectl config use-context 339 | minikube` in order to run the tests again. 340 | 341 | ## Building it locally / running your changes on a cluster 342 | 343 | This project uses goreleaser. If you have local changes, you can use 344 | `goreleaser build --snapshot --clean` to create new binaries in `dist/`. If you 345 | need Docker images, you can run `goreleaser release --snapshot --clean` which 346 | will create them locally. If you want to test them on a Kubernetes cluster, you 347 | should push them to a (private) registry writable by you and readable from your 348 | cluster. You can use the `retag-images-for-test.sh` script for this, e.g.: 349 | 350 | ``` 351 | $ goreleaser release --snapshot --clean 352 | $ ./retag-images-for-test.sh --push my-private-registry/backsnap:test-new-feature 353 | $ kubectl set image -n backsnap deployment/backsnap-operator manager=my-private-registry/backsnap:test-new-feature 354 | ``` 355 | 356 | Note, on subsequent runs, that the last command does nothing if the image is 357 | already set to that value. If you just pushed a new image with the same name, 358 | ensure that the imagePullPolicy is set to Always and simply delete the Pod. 359 | 360 | Also, the commands above do not update the CRDs, so you may need to update them 361 | manually: 362 | 363 | ``` 364 | $ make 365 | $ kubectl apply -f config/crd/bases/backsnap.skyb.it_pvcbackups.yaml 366 | $ kubectl apply -f config/crd/bases/backsnap.skyb.it_pvcrestores.yaml 367 | ``` 368 | -------------------------------------------------------------------------------- /api/v1alpha1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2024. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Package v1alpha1 contains API Schema definitions for the backsnap v1alpha1 API group 18 | // +kubebuilder:object:generate=true 19 | // +groupName=backsnap.skyb.it 20 | package v1alpha1 21 | 22 | import ( 23 | "k8s.io/apimachinery/pkg/runtime/schema" 24 | "sigs.k8s.io/controller-runtime/pkg/scheme" 25 | ) 26 | 27 | var ( 28 | // GroupVersion is group version used to register these objects 29 | GroupVersion = schema.GroupVersion{Group: "backsnap.skyb.it", Version: "v1alpha1"} 30 | 31 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 32 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 33 | 34 | // AddToScheme adds the types in this group-version to the given scheme. 35 | AddToScheme = SchemeBuilder.AddToScheme 36 | ) 37 | -------------------------------------------------------------------------------- /api/v1alpha1/pvcbackup_types.go: -------------------------------------------------------------------------------- 1 | package v1alpha1 2 | 3 | import ( 4 | corev1 "k8s.io/api/core/v1" 5 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | ) 7 | 8 | // NOTES: 9 | // - All fields must have json tags 10 | // - Run `make` after editing this file to update the CRDs accordingly 11 | 12 | type PVCBackupSpec struct { 13 | // Name of the PVC to back up. Must be in the same namespace. 14 | PVCName string `json:"pvc"` 15 | 16 | // How long will the backup object be retained after the backup completes. 17 | // The controller will also always keep the last PVCBackup for a particular 18 | // PVC around, so that it knows when the last backup was completed. 19 | TTL metav1.Duration `json:"ttl,omitempty"` 20 | 21 | // NodeSelector is a selector which must be true for the backup Pod to fit 22 | // on a node. This can be used e.g. to select which type of node, or which 23 | // Availability Zone, performs a backup. 24 | // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ 25 | // +optional 26 | // +mapType=atomic 27 | NodeSelector map[string]string `json:"nodeSelector,omitempty"` 28 | 29 | // If specified, the backup Pod's tolerations. 30 | // +optional 31 | // +listType=atomic 32 | Tolerations []corev1.Toleration `json:"tolerations,omitempty"` 33 | 34 | // If specified, indicates the backup Pod's priority. 35 | // +optional 36 | PriorityClassName string `json:"priorityClassName,omitempty"` 37 | 38 | // If specified, indicates the labels to be put on the backup 39 | // VolumeSnapshot, backup temporary PVC, backup Job and backup Pod. 40 | // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels 41 | // +optional 42 | Labels map[string]string `json:"labels,omitempty"` 43 | 44 | // If specified, indicates the annotations to be put on the backup 45 | // VolumeSnapshot, backup temporary PVC, backup Job and backup Pod. This 46 | // SHOULD NOT include any backsnap.skyb.it annotations. 47 | // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations 48 | // +optional 49 | Annotations map[string]string `json:"annotations,omitempty"` 50 | } 51 | 52 | // +kubebuilder:validation:Enum=Succeeded;Failed 53 | type Result string 54 | 55 | type PVCBackupStatus struct { 56 | StartedAt *metav1.Time `json:"startedAt,omitempty"` 57 | FinishedAt *metav1.Time `json:"finishedAt,omitempty"` 58 | Duration *metav1.Duration `json:"duration,omitempty"` 59 | Result *Result `json:"result,omitempty"` 60 | } 61 | 62 | //+kubebuilder:object:root=true 63 | //+kubebuilder:subresource:status 64 | //+kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time the backup was requested" 65 | //+kubebuilder:printcolumn:name="Started at",type="string",JSONPath=".status.startedAt",description="Time the backup job started running" 66 | //+kubebuilder:printcolumn:name="Duration",type="string",JSONPath=".status.duration",description="Time the backup job took to finish running" 67 | //+kubebuilder:printcolumn:name="Result",type="string",JSONPath=".status.result",description="Shows whether the backup succeeded or not" 68 | 69 | // PVCBackup is the Schema for the pvcbackups API 70 | type PVCBackup struct { 71 | metav1.TypeMeta `json:",inline"` 72 | metav1.ObjectMeta `json:"metadata,omitempty"` 73 | 74 | Spec PVCBackupSpec `json:"spec,omitempty"` 75 | Status PVCBackupStatus `json:"status,omitempty"` 76 | } 77 | 78 | //+kubebuilder:object:root=true 79 | 80 | // PVCBackupList contains a list of PVCBackup 81 | type PVCBackupList struct { 82 | metav1.TypeMeta `json:",inline"` 83 | metav1.ListMeta `json:"metadata,omitempty"` 84 | Items []PVCBackup `json:"items"` 85 | } 86 | 87 | func init() { 88 | SchemeBuilder.Register(&PVCBackup{}, &PVCBackupList{}) 89 | } 90 | -------------------------------------------------------------------------------- /api/v1alpha1/pvcrestore_types.go: -------------------------------------------------------------------------------- 1 | package v1alpha1 2 | 3 | import ( 4 | corev1 "k8s.io/api/core/v1" 5 | "k8s.io/apimachinery/pkg/api/resource" 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | ) 8 | 9 | // PVCRestoreSpec defines the desired state of PVCRestore 10 | type PVCRestoreSpec struct { 11 | // The name of the source PVC that will be restored. The source PVC does not 12 | // need to exist anymore, this is just for finding its data. 13 | SourcePVC string `json:"sourcePvc,omitempty"` 14 | // The namespace that the source PVC used to exist in. If empty, assume that 15 | // the source namespace is the same as the namespace where this PVCRestore 16 | // object exists. 17 | SourceNamespace string `json:"sourceNamespace,omitempty"` 18 | // The snapshot to restore, or empty to restore the latest snapshot. 19 | SourceSnapshot string `json:"sourceSnapshot,omitempty"` 20 | 21 | // The name of the new PVC where the source contents will be restored into. 22 | // The PVC must not exist, and will be created. If empty, assume that the 23 | // target PVC is the same name as the source PVC. 24 | TargetPVC string `json:"targetPvc,omitempty"` 25 | 26 | // The size of the target PVC. Must be large enough to contain the backup's 27 | // contents. 28 | TargetPVCSize resource.Quantity `json:"targetPvcSize,omitempty"` 29 | 30 | // NodeSelector is a selector which must be true for the restore Pod to fit 31 | // on a node. This can be used e.g. to select which type of node, or which 32 | // Availability Zone, performs a restore. This, in turn, may also determine 33 | // in which Availability Zone the restored volume is created. 34 | // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ 35 | // +optional 36 | // +mapType=atomic 37 | NodeSelector map[string]string `json:"nodeSelector,omitempty"` 38 | 39 | // If specified, the restore Pod's tolerations. 40 | // +optional 41 | // +listType=atomic 42 | Tolerations []corev1.Toleration `json:"tolerations,omitempty"` 43 | 44 | // If specified, indicates the restore Pod's priority. 45 | // +optional 46 | PriorityClassName string `json:"priorityClassName,omitempty"` 47 | 48 | // If specified, indicates the labels to be put on the restored PVC, restore 49 | // Job and restore Pod. 50 | // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels 51 | // +optional 52 | Labels map[string]string `json:"labels,omitempty"` 53 | 54 | // If specified, indicates the annotations to be put on the restored PVC, 55 | // restore Job and restore Pod. This SHOULD NOT include any backsnap.skyb.it 56 | // annotations. 57 | // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations 58 | // +optional 59 | Annotations map[string]string `json:"annotations,omitempty"` 60 | } 61 | 62 | // PVCRestoreStatus defines the observed state of PVCRestore 63 | type PVCRestoreStatus struct { 64 | StartedAt *metav1.Time `json:"startedAt,omitempty"` 65 | FinishedAt *metav1.Time `json:"finishedAt,omitempty"` 66 | Duration *metav1.Duration `json:"duration,omitempty"` 67 | Result *Result `json:"result,omitempty"` 68 | } 69 | 70 | //+kubebuilder:object:root=true 71 | //+kubebuilder:subresource:status 72 | //+kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time the restore was requested" 73 | //+kubebuilder:printcolumn:name="Started at",type="string",JSONPath=".status.startedAt",description="Time the restore job started running" 74 | //+kubebuilder:printcolumn:name="Duration",type="string",JSONPath=".status.duration",description="Time the restore job took to finish running" 75 | //+kubebuilder:printcolumn:name="Result",type="string",JSONPath=".status.result",description="Shows whether the restore succeeded or not" 76 | 77 | // PVCRestore is the Schema for the pvcrestores API 78 | type PVCRestore struct { 79 | metav1.TypeMeta `json:",inline"` 80 | metav1.ObjectMeta `json:"metadata,omitempty"` 81 | 82 | Spec PVCRestoreSpec `json:"spec,omitempty"` 83 | Status PVCRestoreStatus `json:"status,omitempty"` 84 | } 85 | 86 | //+kubebuilder:object:root=true 87 | 88 | // PVCRestoreList contains a list of PVCRestore 89 | type PVCRestoreList struct { 90 | metav1.TypeMeta `json:",inline"` 91 | metav1.ListMeta `json:"metadata,omitempty"` 92 | Items []PVCRestore `json:"items"` 93 | } 94 | 95 | func init() { 96 | SchemeBuilder.Register(&PVCRestore{}, &PVCRestoreList{}) 97 | } 98 | -------------------------------------------------------------------------------- /api/v1alpha1/zz_generated.deepcopy.go: -------------------------------------------------------------------------------- 1 | //go:build !ignore_autogenerated 2 | 3 | // Code generated by controller-gen. DO NOT EDIT. 4 | 5 | package v1alpha1 6 | 7 | import ( 8 | "k8s.io/api/core/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | runtime "k8s.io/apimachinery/pkg/runtime" 11 | ) 12 | 13 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 14 | func (in *PVCBackup) DeepCopyInto(out *PVCBackup) { 15 | *out = *in 16 | out.TypeMeta = in.TypeMeta 17 | in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) 18 | in.Spec.DeepCopyInto(&out.Spec) 19 | in.Status.DeepCopyInto(&out.Status) 20 | } 21 | 22 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PVCBackup. 23 | func (in *PVCBackup) DeepCopy() *PVCBackup { 24 | if in == nil { 25 | return nil 26 | } 27 | out := new(PVCBackup) 28 | in.DeepCopyInto(out) 29 | return out 30 | } 31 | 32 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 33 | func (in *PVCBackup) DeepCopyObject() runtime.Object { 34 | if c := in.DeepCopy(); c != nil { 35 | return c 36 | } 37 | return nil 38 | } 39 | 40 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 41 | func (in *PVCBackupList) DeepCopyInto(out *PVCBackupList) { 42 | *out = *in 43 | out.TypeMeta = in.TypeMeta 44 | in.ListMeta.DeepCopyInto(&out.ListMeta) 45 | if in.Items != nil { 46 | in, out := &in.Items, &out.Items 47 | *out = make([]PVCBackup, len(*in)) 48 | for i := range *in { 49 | (*in)[i].DeepCopyInto(&(*out)[i]) 50 | } 51 | } 52 | } 53 | 54 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PVCBackupList. 55 | func (in *PVCBackupList) DeepCopy() *PVCBackupList { 56 | if in == nil { 57 | return nil 58 | } 59 | out := new(PVCBackupList) 60 | in.DeepCopyInto(out) 61 | return out 62 | } 63 | 64 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 65 | func (in *PVCBackupList) DeepCopyObject() runtime.Object { 66 | if c := in.DeepCopy(); c != nil { 67 | return c 68 | } 69 | return nil 70 | } 71 | 72 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 73 | func (in *PVCBackupSpec) DeepCopyInto(out *PVCBackupSpec) { 74 | *out = *in 75 | out.TTL = in.TTL 76 | if in.NodeSelector != nil { 77 | in, out := &in.NodeSelector, &out.NodeSelector 78 | *out = make(map[string]string, len(*in)) 79 | for key, val := range *in { 80 | (*out)[key] = val 81 | } 82 | } 83 | if in.Tolerations != nil { 84 | in, out := &in.Tolerations, &out.Tolerations 85 | *out = make([]v1.Toleration, len(*in)) 86 | for i := range *in { 87 | (*in)[i].DeepCopyInto(&(*out)[i]) 88 | } 89 | } 90 | if in.Labels != nil { 91 | in, out := &in.Labels, &out.Labels 92 | *out = make(map[string]string, len(*in)) 93 | for key, val := range *in { 94 | (*out)[key] = val 95 | } 96 | } 97 | if in.Annotations != nil { 98 | in, out := &in.Annotations, &out.Annotations 99 | *out = make(map[string]string, len(*in)) 100 | for key, val := range *in { 101 | (*out)[key] = val 102 | } 103 | } 104 | } 105 | 106 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PVCBackupSpec. 107 | func (in *PVCBackupSpec) DeepCopy() *PVCBackupSpec { 108 | if in == nil { 109 | return nil 110 | } 111 | out := new(PVCBackupSpec) 112 | in.DeepCopyInto(out) 113 | return out 114 | } 115 | 116 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 117 | func (in *PVCBackupStatus) DeepCopyInto(out *PVCBackupStatus) { 118 | *out = *in 119 | if in.StartedAt != nil { 120 | in, out := &in.StartedAt, &out.StartedAt 121 | *out = (*in).DeepCopy() 122 | } 123 | if in.FinishedAt != nil { 124 | in, out := &in.FinishedAt, &out.FinishedAt 125 | *out = (*in).DeepCopy() 126 | } 127 | if in.Duration != nil { 128 | in, out := &in.Duration, &out.Duration 129 | *out = new(metav1.Duration) 130 | **out = **in 131 | } 132 | if in.Result != nil { 133 | in, out := &in.Result, &out.Result 134 | *out = new(Result) 135 | **out = **in 136 | } 137 | } 138 | 139 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PVCBackupStatus. 140 | func (in *PVCBackupStatus) DeepCopy() *PVCBackupStatus { 141 | if in == nil { 142 | return nil 143 | } 144 | out := new(PVCBackupStatus) 145 | in.DeepCopyInto(out) 146 | return out 147 | } 148 | 149 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 150 | func (in *PVCRestore) DeepCopyInto(out *PVCRestore) { 151 | *out = *in 152 | out.TypeMeta = in.TypeMeta 153 | in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) 154 | in.Spec.DeepCopyInto(&out.Spec) 155 | in.Status.DeepCopyInto(&out.Status) 156 | } 157 | 158 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PVCRestore. 159 | func (in *PVCRestore) DeepCopy() *PVCRestore { 160 | if in == nil { 161 | return nil 162 | } 163 | out := new(PVCRestore) 164 | in.DeepCopyInto(out) 165 | return out 166 | } 167 | 168 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 169 | func (in *PVCRestore) DeepCopyObject() runtime.Object { 170 | if c := in.DeepCopy(); c != nil { 171 | return c 172 | } 173 | return nil 174 | } 175 | 176 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 177 | func (in *PVCRestoreList) DeepCopyInto(out *PVCRestoreList) { 178 | *out = *in 179 | out.TypeMeta = in.TypeMeta 180 | in.ListMeta.DeepCopyInto(&out.ListMeta) 181 | if in.Items != nil { 182 | in, out := &in.Items, &out.Items 183 | *out = make([]PVCRestore, len(*in)) 184 | for i := range *in { 185 | (*in)[i].DeepCopyInto(&(*out)[i]) 186 | } 187 | } 188 | } 189 | 190 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PVCRestoreList. 191 | func (in *PVCRestoreList) DeepCopy() *PVCRestoreList { 192 | if in == nil { 193 | return nil 194 | } 195 | out := new(PVCRestoreList) 196 | in.DeepCopyInto(out) 197 | return out 198 | } 199 | 200 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 201 | func (in *PVCRestoreList) DeepCopyObject() runtime.Object { 202 | if c := in.DeepCopy(); c != nil { 203 | return c 204 | } 205 | return nil 206 | } 207 | 208 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 209 | func (in *PVCRestoreSpec) DeepCopyInto(out *PVCRestoreSpec) { 210 | *out = *in 211 | out.TargetPVCSize = in.TargetPVCSize.DeepCopy() 212 | if in.NodeSelector != nil { 213 | in, out := &in.NodeSelector, &out.NodeSelector 214 | *out = make(map[string]string, len(*in)) 215 | for key, val := range *in { 216 | (*out)[key] = val 217 | } 218 | } 219 | if in.Tolerations != nil { 220 | in, out := &in.Tolerations, &out.Tolerations 221 | *out = make([]v1.Toleration, len(*in)) 222 | for i := range *in { 223 | (*in)[i].DeepCopyInto(&(*out)[i]) 224 | } 225 | } 226 | if in.Labels != nil { 227 | in, out := &in.Labels, &out.Labels 228 | *out = make(map[string]string, len(*in)) 229 | for key, val := range *in { 230 | (*out)[key] = val 231 | } 232 | } 233 | if in.Annotations != nil { 234 | in, out := &in.Annotations, &out.Annotations 235 | *out = make(map[string]string, len(*in)) 236 | for key, val := range *in { 237 | (*out)[key] = val 238 | } 239 | } 240 | } 241 | 242 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PVCRestoreSpec. 243 | func (in *PVCRestoreSpec) DeepCopy() *PVCRestoreSpec { 244 | if in == nil { 245 | return nil 246 | } 247 | out := new(PVCRestoreSpec) 248 | in.DeepCopyInto(out) 249 | return out 250 | } 251 | 252 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 253 | func (in *PVCRestoreStatus) DeepCopyInto(out *PVCRestoreStatus) { 254 | *out = *in 255 | if in.StartedAt != nil { 256 | in, out := &in.StartedAt, &out.StartedAt 257 | *out = (*in).DeepCopy() 258 | } 259 | if in.FinishedAt != nil { 260 | in, out := &in.FinishedAt, &out.FinishedAt 261 | *out = (*in).DeepCopy() 262 | } 263 | if in.Duration != nil { 264 | in, out := &in.Duration, &out.Duration 265 | *out = new(metav1.Duration) 266 | **out = **in 267 | } 268 | if in.Result != nil { 269 | in, out := &in.Result, &out.Result 270 | *out = new(Result) 271 | **out = **in 272 | } 273 | } 274 | 275 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PVCRestoreStatus. 276 | func (in *PVCRestoreStatus) DeepCopy() *PVCRestoreStatus { 277 | if in == nil { 278 | return nil 279 | } 280 | out := new(PVCRestoreStatus) 281 | in.DeepCopyInto(out) 282 | return out 283 | } 284 | -------------------------------------------------------------------------------- /chart/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /chart/Chart.template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: backsnap 3 | description: Kubernetes backup operator - off-site point-in-time backups with history 4 | 5 | # A chart can be either an 'application' or a 'library' chart. 6 | # 7 | # Application charts are a collection of templates that can be packaged into versioned archives 8 | # to be deployed. 9 | # 10 | # Library charts provide useful utilities or functions for the chart developer. They're included as 11 | # a dependency of application charts to inject those utilities and functions into the rendering 12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed. 13 | type: application 14 | 15 | # This is the chart version. This version number should be incremented each time you make changes 16 | # to the chart and its templates, including the app version. 17 | # Versions are expected to follow Semantic Versioning (https://semver.org/) 18 | version: %APP_VERSION%.%CHART_PATCH% 19 | 20 | # This is the version number of the application being deployed. This version number should be 21 | # incremented each time you make changes to the application. Versions are not expected to 22 | # follow Semantic Versioning. They should reflect the version the application is using. 23 | # It is recommended to use it with quotes. 24 | appVersion: "v%APP_VERSION%.%APP_PATCH%" 25 | 26 | # APP_VERSION: %APP_VERSION% 27 | # CHART_PATCH: %CHART_PATCH% 28 | -------------------------------------------------------------------------------- /chart/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: backsnap 3 | description: Kubernetes backup operator - off-site point-in-time backups with history 4 | 5 | # A chart can be either an 'application' or a 'library' chart. 6 | # 7 | # Application charts are a collection of templates that can be packaged into versioned archives 8 | # to be deployed. 9 | # 10 | # Library charts provide useful utilities or functions for the chart developer. They're included as 11 | # a dependency of application charts to inject those utilities and functions into the rendering 12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed. 13 | type: application 14 | 15 | # This is the chart version. This version number should be incremented each time you make changes 16 | # to the chart and its templates, including the app version. 17 | # Versions are expected to follow Semantic Versioning (https://semver.org/) 18 | version: 0.9.1 19 | 20 | # This is the version number of the application being deployed. This version number should be 21 | # incremented each time you make changes to the application. Versions are not expected to 22 | # follow Semantic Versioning. They should reflect the version the application is using. 23 | # It is recommended to use it with quotes. 24 | appVersion: "v0.9.0" 25 | 26 | # APP_VERSION: 0.9 27 | # CHART_PATCH: 1 28 | -------------------------------------------------------------------------------- /chart/README.md: -------------------------------------------------------------------------------- 1 | # backsnap - a kubernetes backup operator 2 | 3 | *Backsnap: kubernetes backups, chiropractor approved!* 4 | 5 | [![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/backsnap)](https://artifacthub.io/packages/search?repo=backsnap) 6 | 7 | Backsnap performs backups of persistent volumes (PV/PVC) in a Kubernetes 8 | cluster, for use in your disaster recovery scenarios. 9 | 10 | It is unique in supporting point-in-time VolumeSnapshots, then making incremental 11 | off-site backups of those using `restic`. 12 | 13 | Backsnap by default assumes that all PVCs in your cluster should be backed up. 14 | No per-PVC object is necessary to start backing up any data. You can change this 15 | per PVC, per namespace or in operator configuration. 16 | 17 | ## How does it work? 18 | 19 | By default, Backsnap enumerates all PersistentVolumeClaims in your cluster, 20 | takes a point-in-time VolumeSnapshot of them, and uses `restic` to take a backup 21 | of the snapshot. 22 | 23 | By using VolumeSnapshots we are certain that a backup is internally consistant, 24 | which is important when backing up workloads such as databases. By using 25 | `restic` the backups are incremental and we automatically support all its 26 | features, such as restoring from a point in history, client-side encryption and 27 | multiple storage backends. 28 | 29 | The operator can run in automatic or manual mode. In manual mode (`-manual` 30 | flag), you create PVCBackup objects in the same namespace as a PVC you want to 31 | be backed up. The operator reacts to this by creating a snapshot, a 32 | point-in-time PVC and a Job to perform the backup, and cleans up afterwards. In 33 | automatic mode, the operator creates PVCBackup objects automatically according 34 | to schedule (you can still also create your own). 35 | 36 | The automatic schedule can be adjusted using a `backsnap.skyb.it/schedule` 37 | annotation on the target PVC or target namespace. By setting the annotation to 38 | the empty string, the PVC (or all PVCs in the namespace) are not backed up. If 39 | both the PVC and namespace have no annotation, the default schedule from the 40 | `-schedule` flag is used. You can set `-schedule=""` to disable automatic 41 | backups (this is the same as setting `-manual`, unless any PVCs or 42 | namespaces do have an overriding schedule set). 43 | 44 | ## Getting started 45 | 46 | ### Quick start using Helm 47 | 48 | ``` 49 | helm repo add backsnap https://skybitsnl.github.io/backsnap 50 | helm install --create-namespace --namespace backsnap backsnap -f values.yaml 51 | ``` 52 | 53 | An example `values.yaml` follows. For more configuration options, see the 54 | ["Default values" page on ArtifactHub](https://artifacthub.io/packages/helm/backsnap/backsnap?modal=values). 55 | 56 | ``` 57 | app: 58 | # In the default configuration for Backsnap, it creates a daily backup for all 59 | # PVCs in the cluster, starting immediately. In this example configuration, we 60 | # configure a single namespace instead, which causes it to back up only that 61 | # namespace. In order to back up the entire cluster state, it is recommended 62 | # not to configure any namespaces here, but configure per-namespace schedules 63 | # accordingly using annotations on the namespace or PVC. 64 | namespaces: 65 | allow: ["my-app"] 66 | 67 | # Default schedule for all PVCs within scope, unless overridden per namespace or 68 | # per PVC. The default is @daily, but any crontab syntax is supported. 69 | # See https://crontab.guru/ for examples and explanations. 70 | #schedule: "@daily" 71 | 72 | # Snapshot class, if no cluster-wide default is configured or you prefer 73 | # another one 74 | snapshotClass: "" 75 | 76 | # Storage class, if no cluster-wide default is configured or you prefer 77 | # another one 78 | storageClass: "" 79 | 80 | # S3 backup destination configuration 81 | s3: 82 | host: "" 83 | bucket: "" 84 | accessKey: "" 85 | secretKey: "" 86 | 87 | # Restic configuration 88 | restic: 89 | password: "" 90 | ``` 91 | 92 | After this, you can observe your backups and their status using: 93 | 94 | ``` 95 | kubectl get pvcbackup -A 96 | ``` 97 | 98 | See below on how to create your own PVCBackups, and restoring with PVCRestores. 99 | 100 | ### Manual installation with kubectl 101 | 102 | First, import the Backsnap CRDs: 103 | 104 | ``` 105 | kubectl apply -f https://raw.githubusercontent.com/skybitsnl/backsnap/main/config/crd/bases/backsnap.skyb.it_pvcbackups.yaml 106 | kubectl apply -f https://raw.githubusercontent.com/skybitsnl/backsnap/main/config/crd/bases/backsnap.skyb.it_pvcrestores.yaml 107 | ``` 108 | 109 | Then, create a `backsnap` namespace where the operator will run: 110 | 111 | ``` 112 | kubectl create namespace backsnap 113 | ``` 114 | 115 | Then, create the Service Account and its required roles. The files below create 116 | a ClusterRole which allows creating VolumeSnapshots, PersistentVolumeClaims and 117 | Jobs in any namespace, and allows reading various other resources. If you're 118 | just backing up a single namespace, you can tweak this file to create a Role 119 | which only allows this access to that namespace. 120 | 121 | Once [cross-namespace data sources](https://kubernetes.io/blog/2023/01/02/cross-namespace-data-sources-alpha/) 122 | are beta in Kubernetes, this application will also optionally support them, 123 | and the set of necessary ClusterRole rules will be significantly reduced. 124 | 125 | ``` 126 | kubectl apply -f https://raw.githubusercontent.com/skybitsnl/backsnap/main/config/rbac/role.yaml 127 | kubectl apply -f https://raw.githubusercontent.com/skybitsnl/backsnap/main/config/rbac/role_binding.yaml 128 | kubectl apply -f https://raw.githubusercontent.com/skybitsnl/backsnap/main/config/rbac/leader_election_role.yaml 129 | kubectl apply -f https://raw.githubusercontent.com/skybitsnl/backsnap/main/config/rbac/leader_election_role_binding.yaml 130 | kubectl apply -f https://raw.githubusercontent.com/skybitsnl/backsnap/main/config/rbac/service_account.yaml 131 | ``` 132 | 133 | Then, we can deploy the operator. 134 | 135 | > [!CAUTION] 136 | > Note that depending on your operator configuration, Backsnap may start to take 137 | > back-ups of all PVCs in your cluster immediately. If you don't want this to 138 | > happen (yet), you can enable manual mode using the `-manual` flag. Eventually, 139 | > we recommend running Backsnap in its default mode. This ensures that you have at 140 | > least a daily snapshot of all PVCs in your cluster, even new ones, unless you 141 | > opt out explicitly. 142 | 143 | We download the Deployment YAML, so that we can edit its default configuration before 144 | starting it. 145 | 146 | ``` 147 | wget https://raw.githubusercontent.com/skybitsnl/backsnap/main/config/manager/manager.yaml 148 | ``` 149 | 150 | Edit the manager.yaml and adjust as necessary. 151 | 152 | - The version by default is set to `latest`, you may want to choose a specific tag here 153 | to prevent automatic updating. 154 | - See the args inside the default YAML for any configuration you may want to set. 155 | 156 | Then, deploy it: 157 | 158 | ``` 159 | kubectl apply -f manager.yaml 160 | ``` 161 | 162 | After this, you can observe your backups and their status using: 163 | 164 | ``` 165 | kubectl get pvcbackup -A 166 | ``` 167 | 168 | See below on how to create your own PVCBackups, and restoring with PVCRestores. 169 | 170 | #### To uninstall 171 | 172 | Stop the manager, the namespace, cluster roles and the CRDs created above: 173 | 174 | ```sh 175 | kubectl delete deployment -n backsnap backsnap-operator 176 | kubectl delete namespace backsnap 177 | kubectl delete clusterrole backsnap-manager 178 | 179 | kubectl delete -f https://raw.githubusercontent.com/skybitsnl/backsnap/main/config/crd/bases/backsnap.skyb.it_pvcbackups.yaml 180 | kubectl delete -f https://raw.githubusercontent.com/skybitsnl/backsnap/main/config/crd/bases/backsnap.skyb.it_pvcrestores.yaml 181 | ``` 182 | 183 | ## How to use it 184 | 185 | In the default and recommended configuration, Backsnap automatically creates 186 | PVCBackups on the configured schedule. You can override this schedule per 187 | namespace and per PVC. The recommended way to do this is per namespace, e.g. 188 | you can set your prod namespaces schedule to back up hourly and your testing 189 | namespaces to an empty schedule to disable back-ups altogether. 190 | 191 | This way, any new namespaces will be backed up automatically unless you disable 192 | them explicitly. 193 | 194 | You can tell the operator to start the backup of a specific PVC by submitting a 195 | CR like the one in `config/samples/backsnap_v1alpha1_pvcbackup.yaml`: 196 | 197 | ``` 198 | apiVersion: backsnap.skyb.it/v1alpha1 199 | kind: PVCBackup 200 | metadata: 201 | name: your-data-backup 202 | namespace: your-application 203 | spec: 204 | pvc: your-data 205 | ttl: 1h 206 | ``` 207 | 208 | You'll see that a VolumeSnapshot, PVC and backup Job are created in the 209 | `your-application` namespace and you can follow along with the operator logs to 210 | see its progression. 211 | 212 | Similarly, to restore a PVC from backup, create a PVCRestore object like the 213 | following: 214 | 215 | ``` 216 | apiVersion: backsnap.skyb.it/v1alpha1 217 | kind: PVCRestore 218 | metadata: 219 | name: your-data-restore 220 | namespace: your-application 221 | spec: 222 | sourcePvc: your-data 223 | # By default, the sourceNamespace is the same namespace the PVCRestore 224 | # is in 225 | #sourceNamespace: "your-application-prod" 226 | # By default, the new PVC has the same name as the sourcePvc, but 227 | # you can override this 228 | #targetPvc: your-data-copy 229 | targetPvcSize: "10Gi" 230 | ``` 231 | 232 | For a concrete scenario of this, see 233 | [Migrating a PVC to another availability zone using Backsnap](docs/migrate_pvc_to_another_az.md). 234 | 235 | ### Troubleshooting 236 | 237 | If your PVCBackups or PVCRestores are failing, or not even starting, use the 238 | following check-list to find out why: 239 | 240 | - Is the operator running? (`kubectl get pods -n backsnap`) 241 | - Is the operator picking up the jobs? (`kubectl logs -n backsnap deployment/backsnap`, possibly 242 | `grep` by namespace and/or object name) 243 | - The operator will not delete the backup or restore Pods if they failed, so 244 | you can see if there are errors in their logs. (`kubectl logs -n your-application job/...`) 245 | 246 | If you are still having issues after the above steps, be sure to file an issue 247 | here on Github. 248 | 249 | ## Security considerations 250 | 251 | This operator assumes full trust within the entire Kubernetes cluster. Because 252 | of existing Kubernetes limitations, security risks are difficult to mitigate. 253 | 254 | Once 255 | [cross-namespace data sources](https://kubernetes.io/blog/2023/01/02/cross-namespace-data-sources-alpha/) 256 | are beta in Kubernetes, this application will also optionally support them. This 257 | will mitigate security risks to a point where absolute trust in anyone using the 258 | cluster is no longer necessary. 259 | 260 | ### While making back-ups 261 | 262 | This operator creates a point-in-time copy of the PVC to back up. This has to be 263 | done in the same namespace, because snapshots cannot be taken or restored into 264 | other namespaces. Theoretically, this PVC could be read from and written to by 265 | anyone who can read/write PVCs in the target namespace, before the back-up 266 | starts. But since such parties can typically also read/write the target PVC 267 | itself, this is relatively low-risk. 268 | 269 | Of a higher risk, this operator creates a Pod inside the target namespace which 270 | consumes the PVC copy and backs it up to the backup location using restic. This 271 | Pod contains the S3 credentials and the restic encryption password in its YAML. 272 | Anyone who can read Job or Pod definitions, or exec into Pods, can read these 273 | credentials and therefore read and write the back-ups. 274 | 275 | ### While keeping back-ups 276 | 277 | The restic password is used for client-side encryption. This means that the 278 | back-ups cannot be retrieved from the target location without also knowing the 279 | decryption password. This is a feature provided by restic. 280 | 281 | ### While restoring back-ups 282 | 283 | A PVCRestore object can be created in any namespace watched by the operator, and 284 | will be able to restore any PVC from any namespace backed up on the same backup 285 | location. This means that any user with access to any namespace on the cluster, 286 | can eventually read the contents of any PVC in any other namespace. 287 | 288 | ## Contributing 289 | 290 | Suggestions are welcomed as GitHub issues - and pull requests are well 291 | appreciated! This section should help you run the operator locally so that you 292 | can test your changes. 293 | 294 | If you need any help getting this to run, or would like to brainstorm about a 295 | feature, please file a GitHub issue as well. 296 | 297 | ## How to run it locally 298 | 299 | Run `go run ./cmd -help` to get a list of flags. Example run: 300 | 301 | ``` 302 | go run ./cmd \ 303 | -snapshotclass ... \ 304 | -namespaces ... \ 305 | -schedule "@daily" \ 306 | -s3-host s3.eu-west-1.amazonaws.com \ 307 | -s3-bucket backsnap-example \ 308 | -s3-access-key-id ... \ 309 | -s3-secret-access-key ... \ 310 | -restic-password ... 311 | ``` 312 | 313 | This will use your local credentials to access the cluster and create resources. 314 | Of course, if you simply have a `backsnap` binary, just run it as 315 | `backsnap -s3-host ...`. 316 | 317 | If you've made changes to the Restic image that is used in backup jobs, you can 318 | use `-image` to use your image. Of course, your Kubelet must be able to access 319 | this image, so `imagePullSecret` can be used if it is hosted on a private 320 | repository. 321 | 322 | ## Running the tests 323 | 324 | The tests require a Kubernetes cluster that can run Pods, but does not run the 325 | backsnap operator. That means envtest, the typical unit test framework, won't 326 | work, because it won't run Pods. Instead, you can run the tests against 327 | minikube. 328 | 329 | ``` 330 | minikube start --driver=docker --addons=volumesnapshots,csi-hostpath-driver 331 | make test 332 | ``` 333 | 334 | The first minikube command starts minikube, adds it as a context to kubectl, and 335 | sets it as the active context, so that every kubectl command after it uses 336 | minikube. You can use `kubectl config get-contexts` to see your configured 337 | contexts, and can switch to the existing one you had using `kubectl config 338 | use-context NAME`. Then, you can switch back using `kubectl config use-context 339 | minikube` in order to run the tests again. 340 | 341 | ## Building it locally / running your changes on a cluster 342 | 343 | This project uses goreleaser. If you have local changes, you can use 344 | `goreleaser build --snapshot --clean` to create new binaries in `dist/`. If you 345 | need Docker images, you can run `goreleaser release --snapshot --clean` which 346 | will create them locally. If you want to test them on a Kubernetes cluster, you 347 | should push them to a (private) registry writable by you and readable from your 348 | cluster. You can use the `retag-images-for-test.sh` script for this, e.g.: 349 | 350 | ``` 351 | $ goreleaser release --snapshot --clean 352 | $ ./retag-images-for-test.sh --push my-private-registry/backsnap:test-new-feature 353 | $ kubectl set image -n backsnap deployment/backsnap-operator manager=my-private-registry/backsnap:test-new-feature 354 | ``` 355 | 356 | Note, on subsequent runs, that the last command does nothing if the image is 357 | already set to that value. If you just pushed a new image with the same name, 358 | ensure that the imagePullPolicy is set to Always and simply delete the Pod. 359 | 360 | Also, the commands above do not update the CRDs, so you may need to update them 361 | manually: 362 | 363 | ``` 364 | $ make 365 | $ kubectl apply -f config/crd/bases/backsnap.skyb.it_pvcbackups.yaml 366 | $ kubectl apply -f config/crd/bases/backsnap.skyb.it_pvcrestores.yaml 367 | ``` 368 | -------------------------------------------------------------------------------- /chart/bump.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | VERSION="$1" 4 | APP_PATCH="$2" 5 | 6 | if [ -z "$VERSION" -o -z "$APP_PATCH" ]; then 7 | echo "Usage: $0 . " 8 | echo "" 9 | echo "Bumps the app version to .., and bumps the " 10 | echo "chart version to *at least* .., or higher." 11 | echo "" 12 | echo "Note the dot between major and minor, and the space between minor and patch." 13 | echo "" 14 | echo "For example: $0 0.3 2" 15 | echo "This will bump the app version to 0.3.2, and the chart version to 0.3.2," 16 | echo "unless the current chart version is already 0.3.2 or newer, in which case" 17 | echo "it will bump to 0.3.(current patch + 1)." 18 | exit 1 19 | fi 20 | 21 | # First, copy CRDs and other files from the repo into the chart. 22 | cp ../config/crd/bases/*.yaml templates/crd 23 | cp ../config/rbac/leader_election_role* templates/rbac 24 | cp ../config/rbac/role* templates/rbac 25 | cp ../config/rbac/service_account.yaml templates 26 | cp ../README.md . 27 | 28 | # Replace the namespaces and managed-by 29 | find templates -type f -exec gsed -i \ 30 | -e 's/namespace: backsnap/namespace: {{ .Release.Namespace }}/' \ 31 | -e 's/managed-by: kustomize/managed-by: Helm/' \ 32 | \ 33 | {} \; 34 | 35 | # Figure out the versioning 36 | CURRENT_VERSION="$(cat Chart.yaml | grep APP_VERSION: | awk '{print $3}')" 37 | CHART_PATCH="$APP_PATCH" 38 | if [ "$CURRENT_VERSION" = "$VERSION" ]; then 39 | CURRENT_CHART_PATCH="$(cat Chart.yaml | grep CHART_PATCH: | awk '{print $3}')" 40 | CHART_PATCH="$(($CURRENT_CHART_PATCH + 1))" 41 | fi 42 | 43 | # Bump version in Chart.yaml 44 | cp Chart.template.yaml Chart.yaml 45 | gsed -i -e "s/%APP_VERSION%/$VERSION/" -e "s/%APP_PATCH%/$APP_PATCH/" -e "s/%CHART_PATCH%/$CHART_PATCH/" Chart.yaml 46 | -------------------------------------------------------------------------------- /chart/release.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | pushd ../docs 4 | helm package ../chart 5 | helm repo index . 6 | popd 7 | -------------------------------------------------------------------------------- /chart/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "backsnap.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Create a default fully qualified app name. 10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 11 | If release name contains chart name it will be used as a full name. 12 | */}} 13 | {{- define "backsnap.fullname" -}} 14 | {{- if .Values.fullnameOverride }} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 16 | {{- else }} 17 | {{- $name := default .Chart.Name .Values.nameOverride }} 18 | {{- if contains $name .Release.Name }} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 20 | {{- else }} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | {{/* 27 | Create chart name and version as used by the chart label. 28 | */}} 29 | {{- define "backsnap.chart" -}} 30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 31 | {{- end }} 32 | 33 | {{/* 34 | Common labels 35 | */}} 36 | {{- define "backsnap.labels" -}} 37 | helm.sh/chart: {{ include "backsnap.chart" . }} 38 | {{ include "backsnap.selectorLabels" . }} 39 | {{- if .Chart.AppVersion }} 40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 41 | {{- end }} 42 | app.kubernetes.io/managed-by: {{ .Release.Service }} 43 | {{- end }} 44 | 45 | {{/* 46 | Selector labels 47 | */}} 48 | {{- define "backsnap.selectorLabels" -}} 49 | app.kubernetes.io/name: {{ include "backsnap.name" . }} 50 | app.kubernetes.io/instance: {{ .Release.Name }} 51 | {{- end }} 52 | 53 | {{/* 54 | Create the name of the service account to use 55 | */}} 56 | {{- define "backsnap.serviceAccountName" -}} 57 | {{- if .Values.serviceAccount.create }} 58 | {{- default (include "backsnap.fullname" .) .Values.serviceAccount.name }} 59 | {{- else }} 60 | {{- default "default" .Values.serviceAccount.name }} 61 | {{- end }} 62 | {{- end }} 63 | -------------------------------------------------------------------------------- /chart/templates/crd/backsnap.skyb.it_pvcbackups.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | controller-gen.kubebuilder.io/version: v0.15.0 7 | name: pvcbackups.backsnap.skyb.it 8 | spec: 9 | group: backsnap.skyb.it 10 | names: 11 | kind: PVCBackup 12 | listKind: PVCBackupList 13 | plural: pvcbackups 14 | singular: pvcbackup 15 | scope: Namespaced 16 | versions: 17 | - additionalPrinterColumns: 18 | - description: Time the backup was requested 19 | jsonPath: .metadata.creationTimestamp 20 | name: Age 21 | type: date 22 | - description: Time the backup job started running 23 | jsonPath: .status.startedAt 24 | name: Started at 25 | type: string 26 | - description: Time the backup job took to finish running 27 | jsonPath: .status.duration 28 | name: Duration 29 | type: string 30 | - description: Shows whether the backup succeeded or not 31 | jsonPath: .status.result 32 | name: Result 33 | type: string 34 | name: v1alpha1 35 | schema: 36 | openAPIV3Schema: 37 | description: PVCBackup is the Schema for the pvcbackups API 38 | properties: 39 | apiVersion: 40 | description: |- 41 | APIVersion defines the versioned schema of this representation of an object. 42 | Servers should convert recognized schemas to the latest internal value, and 43 | may reject unrecognized values. 44 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources 45 | type: string 46 | kind: 47 | description: |- 48 | Kind is a string value representing the REST resource this object represents. 49 | Servers may infer this from the endpoint the client submits requests to. 50 | Cannot be updated. 51 | In CamelCase. 52 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds 53 | type: string 54 | metadata: 55 | type: object 56 | spec: 57 | properties: 58 | annotations: 59 | additionalProperties: 60 | type: string 61 | description: |- 62 | If specified, indicates the annotations to be put on the backup 63 | VolumeSnapshot, backup temporary PVC, backup Job and backup Pod. This 64 | SHOULD NOT include any backsnap.skyb.it annotations. 65 | More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations 66 | type: object 67 | labels: 68 | additionalProperties: 69 | type: string 70 | description: |- 71 | If specified, indicates the labels to be put on the backup 72 | VolumeSnapshot, backup temporary PVC, backup Job and backup Pod. 73 | More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels 74 | type: object 75 | nodeSelector: 76 | additionalProperties: 77 | type: string 78 | description: |- 79 | NodeSelector is a selector which must be true for the backup Pod to fit 80 | on a node. This can be used e.g. to select which type of node, or which 81 | Availability Zone, performs a backup. 82 | More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ 83 | type: object 84 | x-kubernetes-map-type: atomic 85 | priorityClassName: 86 | description: If specified, indicates the backup Pod's priority. 87 | type: string 88 | pvc: 89 | description: Name of the PVC to back up. Must be in the same namespace. 90 | type: string 91 | tolerations: 92 | description: If specified, the backup Pod's tolerations. 93 | items: 94 | description: |- 95 | The pod this Toleration is attached to tolerates any taint that matches 96 | the triple using the matching operator . 97 | properties: 98 | effect: 99 | description: |- 100 | Effect indicates the taint effect to match. Empty means match all taint effects. 101 | When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. 102 | type: string 103 | key: 104 | description: |- 105 | Key is the taint key that the toleration applies to. Empty means match all taint keys. 106 | If the key is empty, operator must be Exists; this combination means to match all values and all keys. 107 | type: string 108 | operator: 109 | description: |- 110 | Operator represents a key's relationship to the value. 111 | Valid operators are Exists and Equal. Defaults to Equal. 112 | Exists is equivalent to wildcard for value, so that a pod can 113 | tolerate all taints of a particular category. 114 | type: string 115 | tolerationSeconds: 116 | description: |- 117 | TolerationSeconds represents the period of time the toleration (which must be 118 | of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, 119 | it is not set, which means tolerate the taint forever (do not evict). Zero and 120 | negative values will be treated as 0 (evict immediately) by the system. 121 | format: int64 122 | type: integer 123 | value: 124 | description: |- 125 | Value is the taint value the toleration matches to. 126 | If the operator is Exists, the value should be empty, otherwise just a regular string. 127 | type: string 128 | type: object 129 | type: array 130 | x-kubernetes-list-type: atomic 131 | ttl: 132 | description: |- 133 | How long will the backup object be retained after the backup completes. 134 | The controller will also always keep the last PVCBackup for a particular 135 | PVC around, so that it knows when the last backup was completed. 136 | type: string 137 | required: 138 | - pvc 139 | type: object 140 | status: 141 | properties: 142 | duration: 143 | type: string 144 | finishedAt: 145 | format: date-time 146 | type: string 147 | result: 148 | enum: 149 | - Succeeded 150 | - Failed 151 | type: string 152 | startedAt: 153 | format: date-time 154 | type: string 155 | type: object 156 | type: object 157 | served: true 158 | storage: true 159 | subresources: 160 | status: {} 161 | -------------------------------------------------------------------------------- /chart/templates/crd/backsnap.skyb.it_pvcrestores.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | controller-gen.kubebuilder.io/version: v0.15.0 7 | name: pvcrestores.backsnap.skyb.it 8 | spec: 9 | group: backsnap.skyb.it 10 | names: 11 | kind: PVCRestore 12 | listKind: PVCRestoreList 13 | plural: pvcrestores 14 | singular: pvcrestore 15 | scope: Namespaced 16 | versions: 17 | - additionalPrinterColumns: 18 | - description: Time the restore was requested 19 | jsonPath: .metadata.creationTimestamp 20 | name: Age 21 | type: date 22 | - description: Time the restore job started running 23 | jsonPath: .status.startedAt 24 | name: Started at 25 | type: string 26 | - description: Time the restore job took to finish running 27 | jsonPath: .status.duration 28 | name: Duration 29 | type: string 30 | - description: Shows whether the restore succeeded or not 31 | jsonPath: .status.result 32 | name: Result 33 | type: string 34 | name: v1alpha1 35 | schema: 36 | openAPIV3Schema: 37 | description: PVCRestore is the Schema for the pvcrestores API 38 | properties: 39 | apiVersion: 40 | description: |- 41 | APIVersion defines the versioned schema of this representation of an object. 42 | Servers should convert recognized schemas to the latest internal value, and 43 | may reject unrecognized values. 44 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources 45 | type: string 46 | kind: 47 | description: |- 48 | Kind is a string value representing the REST resource this object represents. 49 | Servers may infer this from the endpoint the client submits requests to. 50 | Cannot be updated. 51 | In CamelCase. 52 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds 53 | type: string 54 | metadata: 55 | type: object 56 | spec: 57 | description: PVCRestoreSpec defines the desired state of PVCRestore 58 | properties: 59 | annotations: 60 | additionalProperties: 61 | type: string 62 | description: |- 63 | If specified, indicates the annotations to be put on the restored PVC, 64 | restore Job and restore Pod. This SHOULD NOT include any backsnap.skyb.it 65 | annotations. 66 | More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations 67 | type: object 68 | labels: 69 | additionalProperties: 70 | type: string 71 | description: |- 72 | If specified, indicates the labels to be put on the restored PVC, restore 73 | Job and restore Pod. 74 | More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels 75 | type: object 76 | nodeSelector: 77 | additionalProperties: 78 | type: string 79 | description: |- 80 | NodeSelector is a selector which must be true for the restore Pod to fit 81 | on a node. This can be used e.g. to select which type of node, or which 82 | Availability Zone, performs a restore. This, in turn, may also determine 83 | in which Availability Zone the restored volume is created. 84 | More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ 85 | type: object 86 | x-kubernetes-map-type: atomic 87 | priorityClassName: 88 | description: If specified, indicates the restore Pod's priority. 89 | type: string 90 | sourceNamespace: 91 | description: |- 92 | The namespace that the source PVC used to exist in. If empty, assume that 93 | the source namespace is the same as the namespace where this PVCRestore 94 | object exists. 95 | type: string 96 | sourcePvc: 97 | description: |- 98 | The name of the source PVC that will be restored. The source PVC does not 99 | need to exist anymore, this is just for finding its data. 100 | type: string 101 | sourceSnapshot: 102 | description: The snapshot to restore, or empty to restore the latest 103 | snapshot. 104 | type: string 105 | targetPvc: 106 | description: |- 107 | The name of the new PVC where the source contents will be restored into. 108 | The PVC must not exist, and will be created. If empty, assume that the 109 | target PVC is the same name as the source PVC. 110 | type: string 111 | targetPvcSize: 112 | anyOf: 113 | - type: integer 114 | - type: string 115 | description: |- 116 | The size of the target PVC. Must be large enough to contain the backup's 117 | contents. 118 | pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ 119 | x-kubernetes-int-or-string: true 120 | tolerations: 121 | description: If specified, the restore Pod's tolerations. 122 | items: 123 | description: |- 124 | The pod this Toleration is attached to tolerates any taint that matches 125 | the triple using the matching operator . 126 | properties: 127 | effect: 128 | description: |- 129 | Effect indicates the taint effect to match. Empty means match all taint effects. 130 | When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. 131 | type: string 132 | key: 133 | description: |- 134 | Key is the taint key that the toleration applies to. Empty means match all taint keys. 135 | If the key is empty, operator must be Exists; this combination means to match all values and all keys. 136 | type: string 137 | operator: 138 | description: |- 139 | Operator represents a key's relationship to the value. 140 | Valid operators are Exists and Equal. Defaults to Equal. 141 | Exists is equivalent to wildcard for value, so that a pod can 142 | tolerate all taints of a particular category. 143 | type: string 144 | tolerationSeconds: 145 | description: |- 146 | TolerationSeconds represents the period of time the toleration (which must be 147 | of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, 148 | it is not set, which means tolerate the taint forever (do not evict). Zero and 149 | negative values will be treated as 0 (evict immediately) by the system. 150 | format: int64 151 | type: integer 152 | value: 153 | description: |- 154 | Value is the taint value the toleration matches to. 155 | If the operator is Exists, the value should be empty, otherwise just a regular string. 156 | type: string 157 | type: object 158 | type: array 159 | x-kubernetes-list-type: atomic 160 | type: object 161 | status: 162 | description: PVCRestoreStatus defines the observed state of PVCRestore 163 | properties: 164 | duration: 165 | type: string 166 | finishedAt: 167 | format: date-time 168 | type: string 169 | result: 170 | enum: 171 | - Succeeded 172 | - Failed 173 | type: string 174 | startedAt: 175 | format: date-time 176 | type: string 177 | type: object 178 | type: object 179 | served: true 180 | storage: true 181 | subresources: 182 | status: {} 183 | -------------------------------------------------------------------------------- /chart/templates/manager.yaml: -------------------------------------------------------------------------------- 1 | {{- $s3Host := .Values.app.s3.host | required ".Values.app.s3.host is required." -}} 2 | {{- $s3Bucket := .Values.app.s3.bucket | required ".Values.app.s3.bucket is required." -}} 3 | {{- $s3AccessKey := .Values.app.s3.accessKey | required ".Values.app.s3.accessKey is required." -}} 4 | {{- $s3SecretKey := .Values.app.s3.secretKey | required ".Values.app.s3.secretKey is required." -}} 5 | {{- $resticPassword := .Values.app.restic.password | required ".Values.app.restic.password is required." -}} 6 | --- 7 | apiVersion: apps/v1 8 | kind: Deployment 9 | metadata: 10 | name: backsnap-operator 11 | namespace: {{ .Release.Namespace }} 12 | labels: 13 | app: backsnap 14 | control-plane: controller-manager 15 | app.kubernetes.io/name: deployment 16 | app.kubernetes.io/instance: controller-manager 17 | app.kubernetes.io/component: manager 18 | app.kubernetes.io/created-by: backsnap 19 | app.kubernetes.io/part-of: backsnap 20 | app.kubernetes.io/managed-by: Helm 21 | spec: 22 | selector: 23 | matchLabels: 24 | app: backsnap 25 | replicas: 1 26 | template: 27 | metadata: 28 | annotations: 29 | kubectl.kubernetes.io/default-container: manager 30 | {{- with .Values.podAnnotations }} 31 | {{- toYaml . | nindent 8 }} 32 | {{- end }} 33 | labels: 34 | app: backsnap 35 | spec: 36 | {{- with .Values.podSecurityContext }} 37 | securityContext: 38 | {{- toYaml . | nindent 8 }} 39 | {{- end }} 40 | {{- with .Values.nodeSelector }} 41 | nodeSelector: 42 | {{- toYaml . | nindent 8 }} 43 | {{- end }} 44 | {{- with .Values.affinity }} 45 | affinity: 46 | {{- toYaml . | nindent 8 }} 47 | {{- end }} 48 | {{- with .Values.tolerations }} 49 | tolerations: 50 | {{- toYaml . | nindent 8 }} 51 | {{- end }} 52 | containers: 53 | - command: 54 | - /manager 55 | args: 56 | # Start a leader election, ensuring that even with two replicas, only one operator 57 | # is active at a time. 58 | - --leader-elect 59 | # In order to include only particular namespaces (default is all namespaces): 60 | # - --namespaces 61 | # - one,two,three 62 | {{- with .Values.app.namespaces.allow }} 63 | - --namespaces 64 | - {{ join "," . }} 65 | {{- end }} 66 | 67 | # Alternatively, in order to exclude namespaces. We'd recommend setting a schedule 68 | # annotation on the namespace instead of using this option, if possible. 69 | # - --exclude-namespaces 70 | # - dev,staging,testing 71 | {{- with .Values.app.namespaces.exclude }} 72 | - --exclude-namespaces 73 | - {{ join "," . }} 74 | {{- end }} 75 | 76 | # Set a default schedule. The default is @daily. We'd recommend keeping this default 77 | # and overriding it on a per-namespace basis. 78 | {{- if .Values.app.schedule }} 79 | - --schedule 80 | - "{{ .Values.app.schedule }}" 81 | {{- end }} 82 | 83 | # Enable manual mode. This never creates automatic PVCBackups, even if a namespace or 84 | # PVC sets a particular schedule in its annotations. If this is set, the operator only 85 | # performs back-ups for PVCBackup objects created externally (e.g., by you). 86 | # - --manual 87 | {{- if .Values.app.manual }} 88 | - --manual 89 | {{- end }} 90 | 91 | # Backsnap automatically creates volume snapshots while it is preparing 92 | # for a backup. The volume snapshot class name set here is used for all 93 | # volume snapshots created by Backsnap. Otherwise, the cluster default 94 | # volume snapshot class name is used. 95 | # - --snapshotclass 96 | # - csi-snapshot 97 | {{- if .Values.app.snapshotClass }} 98 | - --snapshotclass 99 | - {{ .Values.app.snapshotClass }} 100 | {{- end }} 101 | 102 | # The storage class name set here is used for all PVCs created by Backsnap 103 | # both while creating a back-up and while restoring one. Otherwise, the 104 | # cluster default storage class is used. 105 | # - --storageclass 106 | # - csi-block 107 | {{- if .Values.app.storageClass }} 108 | - --storageclass 109 | - {{ .Values.app.storageClass }} 110 | {{- end }} 111 | 112 | # The location and credentials of the S3 bucket where backups will be stored. 113 | # The S3 hostname can be host, host:port or http://host:port/. Any target supported 114 | # by Restic is supported, also e.g. MinIO. 115 | - --s3-host 116 | - {{ $s3Host }} 117 | - --s3-bucket 118 | - {{ $s3Bucket }} 119 | - --s3-access-key-id 120 | - {{ $s3AccessKey }} 121 | - --s3-secret-access-key 122 | - {{ $s3SecretKey }} 123 | 124 | # The encryption key by which Restic will client-side encrypt your 125 | # backup. Do not lose this! 126 | - --restic-password 127 | - {{ $resticPassword }} 128 | 129 | # There are other supported flags, too. See the manager --help (or 130 | # main.go) for more information. 131 | 132 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" 133 | name: manager 134 | 135 | {{- with .Values.app.securityContext }} 136 | securityContext: 137 | {{- toYaml . | nindent 10 }} 138 | {{- end }} 139 | 140 | livenessProbe: 141 | httpGet: 142 | path: /healthz 143 | port: 8081 144 | initialDelaySeconds: 15 145 | periodSeconds: 20 146 | 147 | readinessProbe: 148 | httpGet: 149 | path: /readyz 150 | port: 8081 151 | initialDelaySeconds: 5 152 | periodSeconds: 10 153 | 154 | resources: 155 | {{- toYaml .Values.resources | nindent 10 }} 156 | 157 | serviceAccountName: controller-manager 158 | terminationGracePeriodSeconds: 10 159 | -------------------------------------------------------------------------------- /chart/templates/rbac/leader_election_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions to do leader election. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: role 7 | app.kubernetes.io/instance: leader-election-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: backsnap 10 | app.kubernetes.io/part-of: backsnap 11 | app.kubernetes.io/managed-by: Helm 12 | name: leader-election-role 13 | namespace: {{ .Release.Namespace }} 14 | rules: 15 | - apiGroups: 16 | - "" 17 | resources: 18 | - configmaps 19 | verbs: 20 | - get 21 | - list 22 | - watch 23 | - create 24 | - update 25 | - patch 26 | - delete 27 | - apiGroups: 28 | - coordination.k8s.io 29 | resources: 30 | - leases 31 | verbs: 32 | - get 33 | - list 34 | - watch 35 | - create 36 | - update 37 | - patch 38 | - delete 39 | - apiGroups: 40 | - "" 41 | resources: 42 | - events 43 | verbs: 44 | - create 45 | - patch 46 | -------------------------------------------------------------------------------- /chart/templates/rbac/leader_election_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: rolebinding 6 | app.kubernetes.io/instance: leader-election-rolebinding 7 | app.kubernetes.io/component: rbac 8 | app.kubernetes.io/created-by: backsnap 9 | app.kubernetes.io/part-of: backsnap 10 | app.kubernetes.io/managed-by: Helm 11 | name: leader-election-rolebinding 12 | namespace: {{ .Release.Namespace }} 13 | roleRef: 14 | apiGroup: rbac.authorization.k8s.io 15 | kind: Role 16 | name: leader-election-role 17 | subjects: 18 | - kind: ServiceAccount 19 | name: controller-manager 20 | namespace: {{ .Release.Namespace }} 21 | -------------------------------------------------------------------------------- /chart/templates/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: backsnap-manager 6 | rules: 7 | - apiGroups: 8 | - backsnap.skyb.it 9 | resources: 10 | - pvcbackups 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - backsnap.skyb.it 21 | resources: 22 | - pvcbackups/finalizers 23 | verbs: 24 | - update 25 | - apiGroups: 26 | - backsnap.skyb.it 27 | resources: 28 | - pvcbackups/status 29 | verbs: 30 | - get 31 | - patch 32 | - update 33 | - apiGroups: 34 | - backsnap.skyb.it 35 | resources: 36 | - pvcrestores 37 | verbs: 38 | - create 39 | - delete 40 | - get 41 | - list 42 | - patch 43 | - update 44 | - watch 45 | - apiGroups: 46 | - backsnap.skyb.it 47 | resources: 48 | - pvcrestores/finalizers 49 | verbs: 50 | - update 51 | - apiGroups: 52 | - backsnap.skyb.it 53 | resources: 54 | - pvcrestores/status 55 | verbs: 56 | - get 57 | - patch 58 | - update 59 | - apiGroups: 60 | - batch 61 | resources: 62 | - jobs 63 | verbs: 64 | - create 65 | - delete 66 | - get 67 | - list 68 | - patch 69 | - update 70 | - watch 71 | - apiGroups: 72 | - "" 73 | resources: 74 | - namespaces 75 | verbs: 76 | - get 77 | - list 78 | - watch 79 | - apiGroups: 80 | - "" 81 | resources: 82 | - persistentvolumeclaims 83 | verbs: 84 | - create 85 | - delete 86 | - get 87 | - list 88 | - patch 89 | - update 90 | - watch 91 | - apiGroups: 92 | - "" 93 | resources: 94 | - persistentvolumeclaims/finalizers 95 | verbs: 96 | - update 97 | - apiGroups: 98 | - snapshot.storage.k8s.io 99 | resources: 100 | - volumesnapshots 101 | verbs: 102 | - create 103 | - delete 104 | - get 105 | - list 106 | - patch 107 | - update 108 | - watch 109 | -------------------------------------------------------------------------------- /chart/templates/rbac/role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: clusterrolebinding 6 | app.kubernetes.io/instance: manager-rolebinding 7 | app.kubernetes.io/component: rbac 8 | app.kubernetes.io/created-by: backsnap 9 | app.kubernetes.io/part-of: backsnap 10 | app.kubernetes.io/managed-by: Helm 11 | name: manager-rolebinding 12 | namespace: {{ .Release.Namespace }} 13 | roleRef: 14 | apiGroup: rbac.authorization.k8s.io 15 | kind: ClusterRole 16 | name: backsnap-manager 17 | subjects: 18 | - kind: ServiceAccount 19 | name: controller-manager 20 | namespace: {{ .Release.Namespace }} 21 | -------------------------------------------------------------------------------- /chart/templates/service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: serviceaccount 6 | app.kubernetes.io/instance: controller-manager-sa 7 | app.kubernetes.io/component: rbac 8 | app.kubernetes.io/created-by: backsnap 9 | app.kubernetes.io/part-of: backsnap 10 | app.kubernetes.io/managed-by: Helm 11 | name: controller-manager 12 | namespace: {{ .Release.Namespace }} 13 | -------------------------------------------------------------------------------- /chart/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for backsnap. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | replicaCount: 1 6 | 7 | image: 8 | repository: sjorsgielen/backsnap 9 | pullPolicy: IfNotPresent 10 | # Overrides the image tag whose default is the chart appVersion. 11 | tag: "" 12 | 13 | podAnnotations: {} 14 | 15 | podSecurityContext: 16 | runAsNonRoot: true 17 | 18 | resources: {} 19 | # limits: 20 | # cpu: 500m 21 | # memory: 128Mi 22 | # requests: 23 | # cpu: 10m 24 | # memory: 64Mi 25 | 26 | nodeSelector: {} 27 | 28 | tolerations: [] 29 | 30 | affinity: {} 31 | 32 | app: 33 | securityContext: 34 | allowPrivilegeEscalation: false 35 | capabilities: 36 | drop: 37 | - "ALL" 38 | namespaces: 39 | allow: [] 40 | exclude: [] 41 | # Default is @daily when schedule is blank. Other options include but are not limited to: @hourly and @weekly 42 | # See https://crontab.guru/ for examples and explanations. 43 | schedule: "@daily" 44 | # Default is nothing to run backsnap in automatic mode 45 | manual: "" 46 | snapshotClass: "" 47 | storageClass: "" 48 | s3: 49 | host: "" 50 | bucket: "" 51 | accessKey: "" 52 | secretKey: "" 53 | restic: 54 | password: "" 55 | -------------------------------------------------------------------------------- /cmd/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "flag" 6 | "log" 7 | "log/slog" 8 | "os" 9 | "strings" 10 | 11 | // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) 12 | // to ensure that exec-entrypoint and run can make use of them. 13 | _ "k8s.io/client-go/plugin/pkg/client/auth" 14 | 15 | "github.com/go-logr/logr" 16 | volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" 17 | "github.com/samber/lo" 18 | batchv1 "k8s.io/api/batch/v1" 19 | corev1 "k8s.io/api/core/v1" 20 | "k8s.io/apimachinery/pkg/runtime" 21 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 22 | ctrl "sigs.k8s.io/controller-runtime" 23 | "sigs.k8s.io/controller-runtime/pkg/healthz" 24 | metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" 25 | 26 | backsnapv1alpha1 "github.com/skybitsnl/backsnap/api/v1alpha1" 27 | "github.com/skybitsnl/backsnap/internal/controller" 28 | //+kubebuilder:scaffold:imports 29 | ) 30 | 31 | // nolint: lll 32 | var ( 33 | metricsAddr = flag.String("metrics-bind-address", ":8080", "The address the metric endpoint binds to.") 34 | probeAddr = flag.String("health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") 35 | enableLeaderElection = flag.Bool("leader-elect", false, 36 | "Enable leader election for controller manager. "+ 37 | "Enabling this will ensure there is only one active controller manager.") 38 | 39 | namespacesFlag = flag.String("namespaces", "", "limit to namespaces, comma-separated (default is all namespaces)") 40 | excludeNamespacesFlag = flag.String("exclude-namespaces", "", "exclude namespaces") 41 | defaultSchedule = flag.String("schedule", "@daily", "Default backup schedule, can be overridden per namespace or per PVC with annotations - set to empty if you want no automatic backups") 42 | manual = flag.Bool("manual", false, "Manual mode: don't automatically create any PVCBackup objects") 43 | maxRunningBackups = flag.Int("max-running-backups", 1, "Maximum amount of backups to run simultaneously") 44 | sleepBetweenBackups = flag.Int("sleep-between-backups", 30, "Seconds to sleep between backing up of each PVC") 45 | 46 | snapshotClassFlag = flag.String("snapshotclass", "", "name of the VolumeSnapshotClass to use") 47 | storageClassFlag = flag.String("storageclass", "", "name fo the StorageClass to use") 48 | imagePullSecret = flag.String("imagepullsecret", "", "imagePullSecret to pass to backup Pod (optional)") 49 | image = flag.String("image", "sjorsgielen/backsnap-restic:latest-main", "Restic back-up image") 50 | s3Host = flag.String("s3-host", "", "S3 hostname (can be host, host:port or http://host:port/)") 51 | s3Bucket = flag.String("s3-bucket", "", "S3 bucket") 52 | s3AccessKeyId = flag.String("s3-access-key-id", "", "S3 access key ID") 53 | s3SecretAccessKey = flag.String("s3-secret-access-key", "", "S3 secret access key") 54 | resticPassword = flag.String("restic-password", "", "Restic password to encrypt storage by") 55 | ) 56 | 57 | var ( 58 | scheme = runtime.NewScheme() 59 | ) 60 | 61 | func init() { 62 | utilruntime.Must(corev1.AddToScheme(scheme)) 63 | utilruntime.Must(batchv1.AddToScheme(scheme)) 64 | utilruntime.Must(volumesnapshotv1.AddToScheme(scheme)) 65 | 66 | utilruntime.Must(backsnapv1alpha1.AddToScheme(scheme)) 67 | //+kubebuilder:scaffold:scheme 68 | } 69 | 70 | func requiredFlag(fn string) { 71 | f := flag.Lookup(fn) 72 | if f.Value.String() == "" { 73 | log.Fatal("Flag -" + f.Name + " is required") 74 | } 75 | } 76 | 77 | func main() { 78 | flag.Parse() 79 | 80 | slog.SetDefault(slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{}))) 81 | 82 | ctx := logr.NewContextWithSlogLogger(context.Background(), slog.Default()) 83 | setupLog := logr.FromContextOrDiscard(ctx) 84 | ctrl.SetLogger(setupLog) 85 | 86 | requiredFlag("image") 87 | requiredFlag("s3-host") 88 | requiredFlag("s3-bucket") 89 | 90 | namespaces := lo.Map(strings.Split(*namespacesFlag, ","), ignore1[string, int](strings.TrimSpace)) 91 | excludeNamespaces := lo.Map(strings.Split(*excludeNamespacesFlag, ","), ignore1[string, int](strings.TrimSpace)) 92 | 93 | if len(namespaces) > 1 { 94 | // filter out "" as it would imply all namespaces when there's also particular namespaces mentioned 95 | // if this filters out all of them, we'll add 'all namespaces' right after this 96 | namespaces = lo.Filter(namespaces, func(item string, _ int) bool { return item != "" }) 97 | } 98 | 99 | if len(namespaces) == 0 { 100 | namespaces = append(namespaces, "") 101 | } 102 | 103 | mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ 104 | Scheme: scheme, 105 | Metrics: metricsserver.Options{BindAddress: *metricsAddr}, 106 | HealthProbeBindAddress: *probeAddr, 107 | LeaderElection: *enableLeaderElection, 108 | LeaderElectionID: "leader.backsnap.skyb.it", 109 | // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily 110 | // when the Manager ends. This requires the binary to immediately end when the 111 | // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly 112 | // speeds up voluntary leader transitions as the new leader don't have to wait 113 | // LeaseDuration time first. 114 | // 115 | // In the default scaffold provided, the program ends immediately after 116 | // the manager stops, so would be fine to enable this option. However, 117 | // if you are doing or is intended to do any operation such as perform cleanups 118 | // after the manager stops then its usage might be unsafe. 119 | LeaderElectionReleaseOnCancel: true, 120 | 121 | // TODO: 122 | // Note that the Manager can restrict the namespace that all controllers 123 | // will watch for resources by 124 | }) 125 | if err != nil { 126 | setupLog.Error(err, "unable to start manager") 127 | os.Exit(1) 128 | } 129 | 130 | if !*manual { 131 | if err := (&controller.AutomaticPVCBackupCreator{ 132 | Client: mgr.GetClient(), 133 | Scheme: mgr.GetScheme(), 134 | Namespaces: namespaces, 135 | ExcludeNamespaces: excludeNamespaces, 136 | DefaultSchedule: *defaultSchedule, 137 | }).SetupWithManager(mgr); err != nil { 138 | setupLog.Error(err, "unable to setup AutomaticPVCBackupCreator") 139 | os.Exit(1) 140 | } 141 | } 142 | 143 | backupSettings := controller.BackupSettings{ 144 | SnapshotClass: *snapshotClassFlag, 145 | StorageClass: *storageClassFlag, 146 | ImagePullSecret: *imagePullSecret, 147 | Image: *image, 148 | S3Host: *s3Host, 149 | S3Bucket: *s3Bucket, 150 | S3AccessKeyId: *s3AccessKeyId, 151 | S3SecretAccessKey: *s3SecretAccessKey, 152 | ResticPassword: *resticPassword, 153 | } 154 | 155 | if err = (&controller.PVCBackupReconciler{ 156 | Client: mgr.GetClient(), 157 | Scheme: mgr.GetScheme(), 158 | Namespaces: namespaces, 159 | ExcludeNamespaces: excludeNamespaces, 160 | MaxRunningBackups: *maxRunningBackups, 161 | SleepBetweenBackups: *sleepBetweenBackups, 162 | BackupSettings: backupSettings, 163 | }).SetupWithManager(ctx, mgr); err != nil { 164 | setupLog.Error(err, "unable to create controller", "controller", "PVCBackup") 165 | os.Exit(1) 166 | } 167 | if err = (&controller.PVCRestoreReconciler{ 168 | Client: mgr.GetClient(), 169 | Scheme: mgr.GetScheme(), 170 | Namespaces: namespaces, 171 | ExcludeNamespaces: excludeNamespaces, 172 | BackupSettings: backupSettings, 173 | }).SetupWithManager(mgr); err != nil { 174 | setupLog.Error(err, "unable to create controller", "controller", "PVCRestore") 175 | os.Exit(1) 176 | } 177 | //+kubebuilder:scaffold:builder 178 | 179 | if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { 180 | setupLog.Error(err, "unable to set up health check") 181 | os.Exit(1) 182 | } 183 | if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { 184 | setupLog.Error(err, "unable to set up ready check") 185 | os.Exit(1) 186 | } 187 | 188 | slog.Info("starting manager") 189 | if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { 190 | setupLog.Error(err, "problem running manager") 191 | os.Exit(1) 192 | } 193 | } 194 | 195 | func ignore1[T any, U any, V any](f func(t T) V) func(t T, u U) V { 196 | return func(t T, u U) V { 197 | return f(t) 198 | } 199 | } 200 | -------------------------------------------------------------------------------- /config/crd/bases/backsnap.skyb.it_pvcbackups.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | controller-gen.kubebuilder.io/version: v0.15.0 7 | name: pvcbackups.backsnap.skyb.it 8 | spec: 9 | group: backsnap.skyb.it 10 | names: 11 | kind: PVCBackup 12 | listKind: PVCBackupList 13 | plural: pvcbackups 14 | singular: pvcbackup 15 | scope: Namespaced 16 | versions: 17 | - additionalPrinterColumns: 18 | - description: Time the backup was requested 19 | jsonPath: .metadata.creationTimestamp 20 | name: Age 21 | type: date 22 | - description: Time the backup job started running 23 | jsonPath: .status.startedAt 24 | name: Started at 25 | type: string 26 | - description: Time the backup job took to finish running 27 | jsonPath: .status.duration 28 | name: Duration 29 | type: string 30 | - description: Shows whether the backup succeeded or not 31 | jsonPath: .status.result 32 | name: Result 33 | type: string 34 | name: v1alpha1 35 | schema: 36 | openAPIV3Schema: 37 | description: PVCBackup is the Schema for the pvcbackups API 38 | properties: 39 | apiVersion: 40 | description: |- 41 | APIVersion defines the versioned schema of this representation of an object. 42 | Servers should convert recognized schemas to the latest internal value, and 43 | may reject unrecognized values. 44 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources 45 | type: string 46 | kind: 47 | description: |- 48 | Kind is a string value representing the REST resource this object represents. 49 | Servers may infer this from the endpoint the client submits requests to. 50 | Cannot be updated. 51 | In CamelCase. 52 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds 53 | type: string 54 | metadata: 55 | type: object 56 | spec: 57 | properties: 58 | annotations: 59 | additionalProperties: 60 | type: string 61 | description: |- 62 | If specified, indicates the annotations to be put on the backup 63 | VolumeSnapshot, backup temporary PVC, backup Job and backup Pod. This 64 | SHOULD NOT include any backsnap.skyb.it annotations. 65 | More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations 66 | type: object 67 | labels: 68 | additionalProperties: 69 | type: string 70 | description: |- 71 | If specified, indicates the labels to be put on the backup 72 | VolumeSnapshot, backup temporary PVC, backup Job and backup Pod. 73 | More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels 74 | type: object 75 | nodeSelector: 76 | additionalProperties: 77 | type: string 78 | description: |- 79 | NodeSelector is a selector which must be true for the backup Pod to fit 80 | on a node. This can be used e.g. to select which type of node, or which 81 | Availability Zone, performs a backup. 82 | More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ 83 | type: object 84 | x-kubernetes-map-type: atomic 85 | priorityClassName: 86 | description: If specified, indicates the backup Pod's priority. 87 | type: string 88 | pvc: 89 | description: Name of the PVC to back up. Must be in the same namespace. 90 | type: string 91 | tolerations: 92 | description: If specified, the backup Pod's tolerations. 93 | items: 94 | description: |- 95 | The pod this Toleration is attached to tolerates any taint that matches 96 | the triple using the matching operator . 97 | properties: 98 | effect: 99 | description: |- 100 | Effect indicates the taint effect to match. Empty means match all taint effects. 101 | When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. 102 | type: string 103 | key: 104 | description: |- 105 | Key is the taint key that the toleration applies to. Empty means match all taint keys. 106 | If the key is empty, operator must be Exists; this combination means to match all values and all keys. 107 | type: string 108 | operator: 109 | description: |- 110 | Operator represents a key's relationship to the value. 111 | Valid operators are Exists and Equal. Defaults to Equal. 112 | Exists is equivalent to wildcard for value, so that a pod can 113 | tolerate all taints of a particular category. 114 | type: string 115 | tolerationSeconds: 116 | description: |- 117 | TolerationSeconds represents the period of time the toleration (which must be 118 | of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, 119 | it is not set, which means tolerate the taint forever (do not evict). Zero and 120 | negative values will be treated as 0 (evict immediately) by the system. 121 | format: int64 122 | type: integer 123 | value: 124 | description: |- 125 | Value is the taint value the toleration matches to. 126 | If the operator is Exists, the value should be empty, otherwise just a regular string. 127 | type: string 128 | type: object 129 | type: array 130 | x-kubernetes-list-type: atomic 131 | ttl: 132 | description: |- 133 | How long will the backup object be retained after the backup completes. 134 | The controller will also always keep the last PVCBackup for a particular 135 | PVC around, so that it knows when the last backup was completed. 136 | type: string 137 | required: 138 | - pvc 139 | type: object 140 | status: 141 | properties: 142 | duration: 143 | type: string 144 | finishedAt: 145 | format: date-time 146 | type: string 147 | result: 148 | enum: 149 | - Succeeded 150 | - Failed 151 | type: string 152 | startedAt: 153 | format: date-time 154 | type: string 155 | type: object 156 | type: object 157 | served: true 158 | storage: true 159 | subresources: 160 | status: {} 161 | -------------------------------------------------------------------------------- /config/crd/bases/backsnap.skyb.it_pvcrestores.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | controller-gen.kubebuilder.io/version: v0.15.0 7 | name: pvcrestores.backsnap.skyb.it 8 | spec: 9 | group: backsnap.skyb.it 10 | names: 11 | kind: PVCRestore 12 | listKind: PVCRestoreList 13 | plural: pvcrestores 14 | singular: pvcrestore 15 | scope: Namespaced 16 | versions: 17 | - additionalPrinterColumns: 18 | - description: Time the restore was requested 19 | jsonPath: .metadata.creationTimestamp 20 | name: Age 21 | type: date 22 | - description: Time the restore job started running 23 | jsonPath: .status.startedAt 24 | name: Started at 25 | type: string 26 | - description: Time the restore job took to finish running 27 | jsonPath: .status.duration 28 | name: Duration 29 | type: string 30 | - description: Shows whether the restore succeeded or not 31 | jsonPath: .status.result 32 | name: Result 33 | type: string 34 | name: v1alpha1 35 | schema: 36 | openAPIV3Schema: 37 | description: PVCRestore is the Schema for the pvcrestores API 38 | properties: 39 | apiVersion: 40 | description: |- 41 | APIVersion defines the versioned schema of this representation of an object. 42 | Servers should convert recognized schemas to the latest internal value, and 43 | may reject unrecognized values. 44 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources 45 | type: string 46 | kind: 47 | description: |- 48 | Kind is a string value representing the REST resource this object represents. 49 | Servers may infer this from the endpoint the client submits requests to. 50 | Cannot be updated. 51 | In CamelCase. 52 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds 53 | type: string 54 | metadata: 55 | type: object 56 | spec: 57 | description: PVCRestoreSpec defines the desired state of PVCRestore 58 | properties: 59 | annotations: 60 | additionalProperties: 61 | type: string 62 | description: |- 63 | If specified, indicates the annotations to be put on the restored PVC, 64 | restore Job and restore Pod. This SHOULD NOT include any backsnap.skyb.it 65 | annotations. 66 | More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations 67 | type: object 68 | labels: 69 | additionalProperties: 70 | type: string 71 | description: |- 72 | If specified, indicates the labels to be put on the restored PVC, restore 73 | Job and restore Pod. 74 | More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels 75 | type: object 76 | nodeSelector: 77 | additionalProperties: 78 | type: string 79 | description: |- 80 | NodeSelector is a selector which must be true for the restore Pod to fit 81 | on a node. This can be used e.g. to select which type of node, or which 82 | Availability Zone, performs a restore. This, in turn, may also determine 83 | in which Availability Zone the restored volume is created. 84 | More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ 85 | type: object 86 | x-kubernetes-map-type: atomic 87 | priorityClassName: 88 | description: If specified, indicates the restore Pod's priority. 89 | type: string 90 | sourceNamespace: 91 | description: |- 92 | The namespace that the source PVC used to exist in. If empty, assume that 93 | the source namespace is the same as the namespace where this PVCRestore 94 | object exists. 95 | type: string 96 | sourcePvc: 97 | description: |- 98 | The name of the source PVC that will be restored. The source PVC does not 99 | need to exist anymore, this is just for finding its data. 100 | type: string 101 | sourceSnapshot: 102 | description: The snapshot to restore, or empty to restore the latest 103 | snapshot. 104 | type: string 105 | targetPvc: 106 | description: |- 107 | The name of the new PVC where the source contents will be restored into. 108 | The PVC must not exist, and will be created. If empty, assume that the 109 | target PVC is the same name as the source PVC. 110 | type: string 111 | targetPvcSize: 112 | anyOf: 113 | - type: integer 114 | - type: string 115 | description: |- 116 | The size of the target PVC. Must be large enough to contain the backup's 117 | contents. 118 | pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ 119 | x-kubernetes-int-or-string: true 120 | tolerations: 121 | description: If specified, the restore Pod's tolerations. 122 | items: 123 | description: |- 124 | The pod this Toleration is attached to tolerates any taint that matches 125 | the triple using the matching operator . 126 | properties: 127 | effect: 128 | description: |- 129 | Effect indicates the taint effect to match. Empty means match all taint effects. 130 | When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. 131 | type: string 132 | key: 133 | description: |- 134 | Key is the taint key that the toleration applies to. Empty means match all taint keys. 135 | If the key is empty, operator must be Exists; this combination means to match all values and all keys. 136 | type: string 137 | operator: 138 | description: |- 139 | Operator represents a key's relationship to the value. 140 | Valid operators are Exists and Equal. Defaults to Equal. 141 | Exists is equivalent to wildcard for value, so that a pod can 142 | tolerate all taints of a particular category. 143 | type: string 144 | tolerationSeconds: 145 | description: |- 146 | TolerationSeconds represents the period of time the toleration (which must be 147 | of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, 148 | it is not set, which means tolerate the taint forever (do not evict). Zero and 149 | negative values will be treated as 0 (evict immediately) by the system. 150 | format: int64 151 | type: integer 152 | value: 153 | description: |- 154 | Value is the taint value the toleration matches to. 155 | If the operator is Exists, the value should be empty, otherwise just a regular string. 156 | type: string 157 | type: object 158 | type: array 159 | x-kubernetes-list-type: atomic 160 | type: object 161 | status: 162 | description: PVCRestoreStatus defines the observed state of PVCRestore 163 | properties: 164 | duration: 165 | type: string 166 | finishedAt: 167 | format: date-time 168 | type: string 169 | result: 170 | enum: 171 | - Succeeded 172 | - Failed 173 | type: string 174 | startedAt: 175 | format: date-time 176 | type: string 177 | type: object 178 | type: object 179 | served: true 180 | storage: true 181 | subresources: 182 | status: {} 183 | -------------------------------------------------------------------------------- /config/crd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # This kustomization.yaml is not intended to be run by itself, 2 | # since it depends on service name and namespace that are out of this kustomize package. 3 | # It should be run by config/default 4 | resources: 5 | - bases/backsnap.skyb.it_pvcbackups.yaml 6 | - bases/backsnap.skyb.it_pvcrestores.yaml 7 | #+kubebuilder:scaffold:crdkustomizeresource 8 | 9 | patches: 10 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. 11 | # patches here are for enabling the conversion webhook for each CRD 12 | #- path: patches/webhook_in_pvcbackups.yaml 13 | #- path: patches/webhook_in_pvcrestores.yaml 14 | #+kubebuilder:scaffold:crdkustomizewebhookpatch 15 | 16 | # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. 17 | # patches here are for enabling the CA injection for each CRD 18 | #- path: patches/cainjection_in_pvcbackups.yaml 19 | #- path: patches/cainjection_in_pvcrestores.yaml 20 | #+kubebuilder:scaffold:crdkustomizecainjectionpatch 21 | 22 | # [WEBHOOK] To enable webhook, uncomment the following section 23 | # the following config is for teaching kustomize how to do kustomization for CRDs. 24 | 25 | #configurations: 26 | #- kustomizeconfig.yaml 27 | -------------------------------------------------------------------------------- /config/crd/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD 2 | nameReference: 3 | - kind: Service 4 | version: v1 5 | fieldSpecs: 6 | - kind: CustomResourceDefinition 7 | version: v1 8 | group: apiextensions.k8s.io 9 | path: spec/conversion/webhook/clientConfig/service/name 10 | 11 | namespace: 12 | - kind: CustomResourceDefinition 13 | version: v1 14 | group: apiextensions.k8s.io 15 | path: spec/conversion/webhook/clientConfig/service/namespace 16 | create: false 17 | 18 | varReference: 19 | - path: metadata/annotations 20 | -------------------------------------------------------------------------------- /config/default/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Adds namespace to all resources. 2 | namespace: backsnap-system 3 | 4 | # Value of this field is prepended to the 5 | # names of all resources, e.g. a deployment named 6 | # "wordpress" becomes "alices-wordpress". 7 | # Note that it should also match with the prefix (text before '-') of the namespace 8 | # field above. 9 | namePrefix: backsnap- 10 | 11 | # Labels to add to all resources and selectors. 12 | #labels: 13 | #- includeSelectors: true 14 | # pairs: 15 | # someName: someValue 16 | 17 | resources: 18 | - ../crd 19 | - ../rbac 20 | - ../manager 21 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 22 | # crd/kustomization.yaml 23 | #- ../webhook 24 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. 25 | #- ../certmanager 26 | # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. 27 | #- ../prometheus 28 | 29 | patches: 30 | # Protect the /metrics endpoint by putting it behind auth. 31 | # If you want your controller-manager to expose the /metrics 32 | # endpoint w/o any authn/z, please comment the following line. 33 | - path: manager_auth_proxy_patch.yaml 34 | 35 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 36 | # crd/kustomization.yaml 37 | #- path: manager_webhook_patch.yaml 38 | 39 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 40 | # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. 41 | # 'CERTMANAGER' needs to be enabled to use ca injection 42 | #- path: webhookcainjection_patch.yaml 43 | 44 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. 45 | # Uncomment the following replacements to add the cert-manager CA injection annotations 46 | #replacements: 47 | # - source: # Add cert-manager annotation to ValidatingWebhookConfiguration, MutatingWebhookConfiguration and CRDs 48 | # kind: Certificate 49 | # group: cert-manager.io 50 | # version: v1 51 | # name: serving-cert # this name should match the one in certificate.yaml 52 | # fieldPath: .metadata.namespace # namespace of the certificate CR 53 | # targets: 54 | # - select: 55 | # kind: ValidatingWebhookConfiguration 56 | # fieldPaths: 57 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 58 | # options: 59 | # delimiter: '/' 60 | # index: 0 61 | # create: true 62 | # - select: 63 | # kind: MutatingWebhookConfiguration 64 | # fieldPaths: 65 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 66 | # options: 67 | # delimiter: '/' 68 | # index: 0 69 | # create: true 70 | # - select: 71 | # kind: CustomResourceDefinition 72 | # fieldPaths: 73 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 74 | # options: 75 | # delimiter: '/' 76 | # index: 0 77 | # create: true 78 | # - source: 79 | # kind: Certificate 80 | # group: cert-manager.io 81 | # version: v1 82 | # name: serving-cert # this name should match the one in certificate.yaml 83 | # fieldPath: .metadata.name 84 | # targets: 85 | # - select: 86 | # kind: ValidatingWebhookConfiguration 87 | # fieldPaths: 88 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 89 | # options: 90 | # delimiter: '/' 91 | # index: 1 92 | # create: true 93 | # - select: 94 | # kind: MutatingWebhookConfiguration 95 | # fieldPaths: 96 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 97 | # options: 98 | # delimiter: '/' 99 | # index: 1 100 | # create: true 101 | # - select: 102 | # kind: CustomResourceDefinition 103 | # fieldPaths: 104 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 105 | # options: 106 | # delimiter: '/' 107 | # index: 1 108 | # create: true 109 | # - source: # Add cert-manager annotation to the webhook Service 110 | # kind: Service 111 | # version: v1 112 | # name: webhook-service 113 | # fieldPath: .metadata.name # namespace of the service 114 | # targets: 115 | # - select: 116 | # kind: Certificate 117 | # group: cert-manager.io 118 | # version: v1 119 | # fieldPaths: 120 | # - .spec.dnsNames.0 121 | # - .spec.dnsNames.1 122 | # options: 123 | # delimiter: '.' 124 | # index: 0 125 | # create: true 126 | # - source: 127 | # kind: Service 128 | # version: v1 129 | # name: webhook-service 130 | # fieldPath: .metadata.namespace # namespace of the service 131 | # targets: 132 | # - select: 133 | # kind: Certificate 134 | # group: cert-manager.io 135 | # version: v1 136 | # fieldPaths: 137 | # - .spec.dnsNames.0 138 | # - .spec.dnsNames.1 139 | # options: 140 | # delimiter: '.' 141 | # index: 1 142 | # create: true 143 | -------------------------------------------------------------------------------- /config/default/manager_auth_proxy_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch inject a sidecar container which is a HTTP proxy for the 2 | # controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | name: controller-manager 7 | namespace: system 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: kube-rbac-proxy 13 | securityContext: 14 | allowPrivilegeEscalation: false 15 | capabilities: 16 | drop: 17 | - "ALL" 18 | image: gcr.io/kubebuilder/kube-rbac-proxy:v0.15.0 19 | args: 20 | - "--secure-listen-address=0.0.0.0:8443" 21 | - "--upstream=http://127.0.0.1:8080/" 22 | - "--logtostderr=true" 23 | - "--v=0" 24 | ports: 25 | - containerPort: 8443 26 | protocol: TCP 27 | name: https 28 | resources: 29 | limits: 30 | cpu: 500m 31 | memory: 128Mi 32 | requests: 33 | cpu: 5m 34 | memory: 64Mi 35 | - name: manager 36 | args: 37 | - "--health-probe-bind-address=:8081" 38 | - "--metrics-bind-address=127.0.0.1:8080" 39 | - "--leader-elect" 40 | -------------------------------------------------------------------------------- /config/default/manager_config_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: manager 11 | -------------------------------------------------------------------------------- /config/manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manager.yaml 3 | -------------------------------------------------------------------------------- /config/manager/manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | labels: 5 | app: backsnap 6 | app.kubernetes.io/name: namespace 7 | app.kubernetes.io/instance: backsnap 8 | app.kubernetes.io/component: manager 9 | app.kubernetes.io/created-by: backsnap 10 | app.kubernetes.io/part-of: backsnap 11 | app.kubernetes.io/managed-by: kustomize 12 | name: backsnap 13 | --- 14 | apiVersion: apps/v1 15 | kind: Deployment 16 | metadata: 17 | name: backsnap-operator 18 | namespace: backsnap 19 | labels: 20 | app: backsnap 21 | control-plane: controller-manager 22 | app.kubernetes.io/name: deployment 23 | app.kubernetes.io/instance: controller-manager 24 | app.kubernetes.io/component: manager 25 | app.kubernetes.io/created-by: backsnap 26 | app.kubernetes.io/part-of: backsnap 27 | app.kubernetes.io/managed-by: kustomize 28 | spec: 29 | selector: 30 | matchLabels: 31 | app: backsnap 32 | replicas: 1 33 | template: 34 | metadata: 35 | annotations: 36 | kubectl.kubernetes.io/default-container: manager 37 | labels: 38 | app: backsnap 39 | spec: 40 | securityContext: 41 | runAsNonRoot: true 42 | containers: 43 | - command: 44 | - /manager 45 | args: 46 | # Start a leader election, ensuring that even with two replicas, only one operator 47 | # is active at a time. 48 | - --leader-elect 49 | # In order to include only particular namespaces (default is all namespaces): 50 | # - --namespaces 51 | # - one,two,three 52 | 53 | # Alternatively, in order to exclude namespaces. We'd recommend setting a schedule 54 | # annotation on the namespace instead of using this option, if possible. 55 | # - --exclude-namespaces 56 | # - dev,staging,testing 57 | 58 | # Set a default schedule. The default is @daily. We'd recommend keeping this default 59 | # and overriding it on a per-namespace basis. 60 | # - --schedule 61 | # - @weekly 62 | 63 | # Enable manual mode. This never creates automatic PVCBackups, even if a namespace or 64 | # PVC sets a particular schedule in its annotations. If this is set, the operator only 65 | # performs back-ups for PVCBackup objects created externally (e.g., by you). 66 | # - --manual 67 | 68 | # Backsnap automatically creates volume snapshots while it is preparing 69 | # for a backup. The volume snapshot class name set here is used for all 70 | # volume snapshots created by Backsnap. Otherwise, the cluster default 71 | # volume snapshot class name is used. 72 | # - --snapshotclass 73 | # - csi-snapshot 74 | 75 | # The storage class name set here is used for all PVCs created by Backsnap 76 | # both while creating a back-up and while restoring one. Otherwise, the 77 | # cluster default storage class is used. 78 | # - -storageclass 79 | # - csi-block 80 | 81 | # The location and credentials of the S3 bucket where backups will be stored. 82 | # The S3 hostname can be host, host:port or http://host:port/. Any target supported 83 | # by Restic is supported, also e.g. MinIO. 84 | - --s3-host 85 | - s3.us-west-004.backblazeb2.com 86 | - --s3-bucket 87 | - backsnap 88 | - --s3-access-key-id 89 | - 004ffef...0003 90 | - --s3-secret-access-key 91 | - "K004KC/..." 92 | 93 | # The encryption key by which Restic will client-side encrypt your 94 | # backup. Do not lose this! 95 | - --restic-password 96 | - ChangeMe 97 | 98 | # There are other supported flags, too. See the manager --help (or 99 | # main.go) for more information. 100 | 101 | image: sjorsgielen/backsnap:latest 102 | name: manager 103 | securityContext: 104 | allowPrivilegeEscalation: false 105 | capabilities: 106 | drop: 107 | - "ALL" 108 | livenessProbe: 109 | httpGet: 110 | path: /healthz 111 | port: 8081 112 | initialDelaySeconds: 15 113 | periodSeconds: 20 114 | readinessProbe: 115 | httpGet: 116 | path: /readyz 117 | port: 8081 118 | initialDelaySeconds: 5 119 | periodSeconds: 10 120 | resources: 121 | limits: 122 | cpu: 500m 123 | memory: 128Mi 124 | requests: 125 | cpu: 10m 126 | memory: 64Mi 127 | serviceAccountName: controller-manager 128 | terminationGracePeriodSeconds: 10 129 | -------------------------------------------------------------------------------- /config/prometheus/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - monitor.yaml 3 | -------------------------------------------------------------------------------- /config/prometheus/monitor.yaml: -------------------------------------------------------------------------------- 1 | # Prometheus Monitor Service (Metrics) 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | labels: 6 | control-plane: controller-manager 7 | app.kubernetes.io/name: servicemonitor 8 | app.kubernetes.io/instance: controller-manager-metrics-monitor 9 | app.kubernetes.io/component: metrics 10 | app.kubernetes.io/created-by: backsnap 11 | app.kubernetes.io/part-of: backsnap 12 | app.kubernetes.io/managed-by: kustomize 13 | name: controller-manager-metrics-monitor 14 | namespace: system 15 | spec: 16 | endpoints: 17 | - path: /metrics 18 | port: https 19 | scheme: https 20 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 21 | tlsConfig: 22 | insecureSkipVerify: true 23 | selector: 24 | matchLabels: 25 | control-plane: controller-manager 26 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_client_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: clusterrole 6 | app.kubernetes.io/instance: metrics-reader 7 | app.kubernetes.io/component: kube-rbac-proxy 8 | app.kubernetes.io/created-by: backsnap 9 | app.kubernetes.io/part-of: backsnap 10 | app.kubernetes.io/managed-by: kustomize 11 | name: metrics-reader 12 | rules: 13 | - nonResourceURLs: 14 | - "/metrics" 15 | verbs: 16 | - get 17 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: clusterrole 6 | app.kubernetes.io/instance: proxy-role 7 | app.kubernetes.io/component: kube-rbac-proxy 8 | app.kubernetes.io/created-by: backsnap 9 | app.kubernetes.io/part-of: backsnap 10 | app.kubernetes.io/managed-by: kustomize 11 | name: proxy-role 12 | rules: 13 | - apiGroups: 14 | - authentication.k8s.io 15 | resources: 16 | - tokenreviews 17 | verbs: 18 | - create 19 | - apiGroups: 20 | - authorization.k8s.io 21 | resources: 22 | - subjectaccessreviews 23 | verbs: 24 | - create 25 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: clusterrolebinding 6 | app.kubernetes.io/instance: proxy-rolebinding 7 | app.kubernetes.io/component: kube-rbac-proxy 8 | app.kubernetes.io/created-by: backsnap 9 | app.kubernetes.io/part-of: backsnap 10 | app.kubernetes.io/managed-by: kustomize 11 | name: proxy-rolebinding 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: ClusterRole 15 | name: proxy-role 16 | subjects: 17 | - kind: ServiceAccount 18 | name: controller-manager 19 | namespace: system 20 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | app.kubernetes.io/name: service 7 | app.kubernetes.io/instance: controller-manager-metrics-service 8 | app.kubernetes.io/component: kube-rbac-proxy 9 | app.kubernetes.io/created-by: backsnap 10 | app.kubernetes.io/part-of: backsnap 11 | app.kubernetes.io/managed-by: kustomize 12 | name: controller-manager-metrics-service 13 | namespace: system 14 | spec: 15 | ports: 16 | - name: https 17 | port: 8443 18 | protocol: TCP 19 | targetPort: https 20 | selector: 21 | control-plane: controller-manager 22 | -------------------------------------------------------------------------------- /config/rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | # All RBAC will be applied under this service account in 3 | # the deployment namespace. You may comment out this resource 4 | # if your manager will use a service account that exists at 5 | # runtime. Be sure to update RoleBinding and ClusterRoleBinding 6 | # subjects if changing service account names. 7 | - service_account.yaml 8 | - role.yaml 9 | - role_binding.yaml 10 | - leader_election_role.yaml 11 | - leader_election_role_binding.yaml 12 | # Comment the following 4 lines if you want to disable 13 | # the auth proxy (https://github.com/brancz/kube-rbac-proxy) 14 | # which protects your /metrics endpoint. 15 | - auth_proxy_service.yaml 16 | - auth_proxy_role.yaml 17 | - auth_proxy_role_binding.yaml 18 | - auth_proxy_client_clusterrole.yaml 19 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions to do leader election. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: role 7 | app.kubernetes.io/instance: leader-election-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: backsnap 10 | app.kubernetes.io/part-of: backsnap 11 | app.kubernetes.io/managed-by: kustomize 12 | name: leader-election-role 13 | namespace: backsnap 14 | rules: 15 | - apiGroups: 16 | - "" 17 | resources: 18 | - configmaps 19 | verbs: 20 | - get 21 | - list 22 | - watch 23 | - create 24 | - update 25 | - patch 26 | - delete 27 | - apiGroups: 28 | - coordination.k8s.io 29 | resources: 30 | - leases 31 | verbs: 32 | - get 33 | - list 34 | - watch 35 | - create 36 | - update 37 | - patch 38 | - delete 39 | - apiGroups: 40 | - "" 41 | resources: 42 | - events 43 | verbs: 44 | - create 45 | - patch 46 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: rolebinding 6 | app.kubernetes.io/instance: leader-election-rolebinding 7 | app.kubernetes.io/component: rbac 8 | app.kubernetes.io/created-by: backsnap 9 | app.kubernetes.io/part-of: backsnap 10 | app.kubernetes.io/managed-by: kustomize 11 | name: leader-election-rolebinding 12 | namespace: backsnap 13 | roleRef: 14 | apiGroup: rbac.authorization.k8s.io 15 | kind: Role 16 | name: leader-election-role 17 | subjects: 18 | - kind: ServiceAccount 19 | name: controller-manager 20 | namespace: backsnap 21 | -------------------------------------------------------------------------------- /config/rbac/pvcbackup_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit pvcbackups. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: pvcbackup-editor-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: backsnap 10 | app.kubernetes.io/part-of: backsnap 11 | app.kubernetes.io/managed-by: kustomize 12 | name: pvcbackup-editor-role 13 | rules: 14 | - apiGroups: 15 | - backsnap.skyb.it 16 | resources: 17 | - pvcbackups 18 | verbs: 19 | - create 20 | - delete 21 | - get 22 | - list 23 | - patch 24 | - update 25 | - watch 26 | - apiGroups: 27 | - backsnap.skyb.it 28 | resources: 29 | - pvcbackups/status 30 | verbs: 31 | - get 32 | -------------------------------------------------------------------------------- /config/rbac/pvcbackup_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view pvcbackups. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: pvcbackup-viewer-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: backsnap 10 | app.kubernetes.io/part-of: backsnap 11 | app.kubernetes.io/managed-by: kustomize 12 | name: pvcbackup-viewer-role 13 | rules: 14 | - apiGroups: 15 | - backsnap.skyb.it 16 | resources: 17 | - pvcbackups 18 | verbs: 19 | - get 20 | - list 21 | - watch 22 | - apiGroups: 23 | - backsnap.skyb.it 24 | resources: 25 | - pvcbackups/status 26 | verbs: 27 | - get 28 | -------------------------------------------------------------------------------- /config/rbac/pvcrestore_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit pvcrestores. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: pvcrestore-editor-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: backsnap 10 | app.kubernetes.io/part-of: backsnap 11 | app.kubernetes.io/managed-by: kustomize 12 | name: pvcrestore-editor-role 13 | rules: 14 | - apiGroups: 15 | - backsnap.skyb.it 16 | resources: 17 | - pvcrestores 18 | verbs: 19 | - create 20 | - delete 21 | - get 22 | - list 23 | - patch 24 | - update 25 | - watch 26 | - apiGroups: 27 | - backsnap.skyb.it 28 | resources: 29 | - pvcrestores/status 30 | verbs: 31 | - get 32 | -------------------------------------------------------------------------------- /config/rbac/pvcrestore_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view pvcrestores. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: pvcrestore-viewer-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: backsnap 10 | app.kubernetes.io/part-of: backsnap 11 | app.kubernetes.io/managed-by: kustomize 12 | name: pvcrestore-viewer-role 13 | rules: 14 | - apiGroups: 15 | - backsnap.skyb.it 16 | resources: 17 | - pvcrestores 18 | verbs: 19 | - get 20 | - list 21 | - watch 22 | - apiGroups: 23 | - backsnap.skyb.it 24 | resources: 25 | - pvcrestores/status 26 | verbs: 27 | - get 28 | -------------------------------------------------------------------------------- /config/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: backsnap-manager 6 | rules: 7 | - apiGroups: 8 | - backsnap.skyb.it 9 | resources: 10 | - pvcbackups 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - backsnap.skyb.it 21 | resources: 22 | - pvcbackups/finalizers 23 | verbs: 24 | - update 25 | - apiGroups: 26 | - backsnap.skyb.it 27 | resources: 28 | - pvcbackups/status 29 | verbs: 30 | - get 31 | - patch 32 | - update 33 | - apiGroups: 34 | - backsnap.skyb.it 35 | resources: 36 | - pvcrestores 37 | verbs: 38 | - create 39 | - delete 40 | - get 41 | - list 42 | - patch 43 | - update 44 | - watch 45 | - apiGroups: 46 | - backsnap.skyb.it 47 | resources: 48 | - pvcrestores/finalizers 49 | verbs: 50 | - update 51 | - apiGroups: 52 | - backsnap.skyb.it 53 | resources: 54 | - pvcrestores/status 55 | verbs: 56 | - get 57 | - patch 58 | - update 59 | - apiGroups: 60 | - batch 61 | resources: 62 | - jobs 63 | verbs: 64 | - create 65 | - delete 66 | - get 67 | - list 68 | - patch 69 | - update 70 | - watch 71 | - apiGroups: 72 | - "" 73 | resources: 74 | - namespaces 75 | verbs: 76 | - get 77 | - list 78 | - watch 79 | - apiGroups: 80 | - "" 81 | resources: 82 | - persistentvolumeclaims 83 | verbs: 84 | - create 85 | - delete 86 | - get 87 | - list 88 | - patch 89 | - update 90 | - watch 91 | - apiGroups: 92 | - "" 93 | resources: 94 | - persistentvolumeclaims/finalizers 95 | verbs: 96 | - update 97 | - apiGroups: 98 | - snapshot.storage.k8s.io 99 | resources: 100 | - volumesnapshots 101 | verbs: 102 | - create 103 | - delete 104 | - get 105 | - list 106 | - patch 107 | - update 108 | - watch 109 | -------------------------------------------------------------------------------- /config/rbac/role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: clusterrolebinding 6 | app.kubernetes.io/instance: manager-rolebinding 7 | app.kubernetes.io/component: rbac 8 | app.kubernetes.io/created-by: backsnap 9 | app.kubernetes.io/part-of: backsnap 10 | app.kubernetes.io/managed-by: kustomize 11 | name: manager-rolebinding 12 | namespace: backsnap 13 | roleRef: 14 | apiGroup: rbac.authorization.k8s.io 15 | kind: ClusterRole 16 | name: backsnap-manager 17 | subjects: 18 | - kind: ServiceAccount 19 | name: controller-manager 20 | namespace: backsnap 21 | -------------------------------------------------------------------------------- /config/rbac/service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: serviceaccount 6 | app.kubernetes.io/instance: controller-manager-sa 7 | app.kubernetes.io/component: rbac 8 | app.kubernetes.io/created-by: backsnap 9 | app.kubernetes.io/part-of: backsnap 10 | app.kubernetes.io/managed-by: kustomize 11 | name: controller-manager 12 | namespace: backsnap 13 | -------------------------------------------------------------------------------- /config/samples/backsnap_v1alpha1_pvcbackup.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: backsnap.skyb.it/v1alpha1 2 | kind: PVCBackup 3 | metadata: 4 | name: my-data-backup 5 | namespace: my-application 6 | spec: 7 | pvc: my-data 8 | ttl: 1h 9 | -------------------------------------------------------------------------------- /config/samples/backsnap_v1alpha1_pvcrestore.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: backsnap.skyb.it/v1alpha1 2 | kind: PVCRestore 3 | metadata: 4 | name: my-data-restore 5 | namespace: my-application 6 | spec: 7 | sourcePvc: "my-data" 8 | # Empty means 'same namespace as this PVCRestore object' 9 | sourceNamespace: "" 10 | # Empty means latest 11 | sourceSnapshot: "" 12 | 13 | targetPvc: "my-restored-data" 14 | targetPvcSize: "1Gi" 15 | 16 | # NodeSelector is a selector which must be true for the restore Pod to fit 17 | # on a node. This can be used e.g. to select which type of node, or which 18 | # Availability Zone, performs a restore. This, in turn, may also determine 19 | # in which Availability Zone the restored volume is created. 20 | #nodeSelector: 21 | # topology.kubernetes.io/zone: nl-ams-2 22 | 23 | # If specified, the restore Pod's tolerations. 24 | #tolerations: 25 | #- key: "node-role.kubernetes.io/master" 26 | # operator: "Equal" 27 | # value: "true" 28 | # effect: "NoSchedule" 29 | 30 | # If specified, indicates the restore Pod's priority. 31 | #priorityClassName: "system-cluster-critical" 32 | 33 | # If specified, indicates the labels to be put on the restored PVC, restore 34 | # Job and restore Pod. 35 | # More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels 36 | #labels: 37 | # restored-by: me 38 | 39 | # If specified, indicates the annotations to be put on the restored PVC, 40 | # restore Job and restore Pod. This SHOULD NOT include any backsnap.skyb.it 41 | # annotations. 42 | # More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations 43 | #annotations: 44 | # restored-by: me 45 | -------------------------------------------------------------------------------- /config/samples/kustomization.yaml: -------------------------------------------------------------------------------- 1 | ## Append samples of your project ## 2 | resources: 3 | - backsnap_v1alpha1_pvcbackup.yaml 4 | - backsnap_v1alpha1_pvcrestore.yaml 5 | #+kubebuilder:scaffold:manifestskustomizesamples 6 | -------------------------------------------------------------------------------- /docs/backsnap-0.7.0.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/skybitsnl/backsnap/4c31fa87ab305f07f5c8bcf135a118f024359c6d/docs/backsnap-0.7.0.tgz -------------------------------------------------------------------------------- /docs/backsnap-0.7.1.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/skybitsnl/backsnap/4c31fa87ab305f07f5c8bcf135a118f024359c6d/docs/backsnap-0.7.1.tgz -------------------------------------------------------------------------------- /docs/backsnap-0.7.2.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/skybitsnl/backsnap/4c31fa87ab305f07f5c8bcf135a118f024359c6d/docs/backsnap-0.7.2.tgz -------------------------------------------------------------------------------- /docs/backsnap-0.8.0.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/skybitsnl/backsnap/4c31fa87ab305f07f5c8bcf135a118f024359c6d/docs/backsnap-0.8.0.tgz -------------------------------------------------------------------------------- /docs/backsnap-0.9.0.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/skybitsnl/backsnap/4c31fa87ab305f07f5c8bcf135a118f024359c6d/docs/backsnap-0.9.0.tgz -------------------------------------------------------------------------------- /docs/backsnap-0.9.1.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/skybitsnl/backsnap/4c31fa87ab305f07f5c8bcf135a118f024359c6d/docs/backsnap-0.9.1.tgz -------------------------------------------------------------------------------- /docs/index.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | entries: 3 | backsnap: 4 | - apiVersion: v2 5 | appVersion: v0.9.0 6 | created: "2024-09-15T16:50:08.017032+02:00" 7 | description: Kubernetes backup operator - off-site point-in-time backups with 8 | history 9 | digest: e474b17f3d98bc22812be0e2e31ccc96c863ebe3eba0911969e3cb8c91924ea5 10 | name: backsnap 11 | type: application 12 | urls: 13 | - backsnap-0.9.1.tgz 14 | version: 0.9.1 15 | - apiVersion: v2 16 | appVersion: v0.9.0 17 | created: "2024-09-15T16:50:08.016662+02:00" 18 | description: Kubernetes backup operator - off-site point-in-time backups with 19 | history 20 | digest: b4cbde91d459ba2df376e19c65a05830057427c6a471aef6ac310991032f01f6 21 | name: backsnap 22 | type: application 23 | urls: 24 | - backsnap-0.9.0.tgz 25 | version: 0.9.0 26 | - apiVersion: v2 27 | appVersion: v0.8.0 28 | created: "2024-09-15T16:50:08.015996+02:00" 29 | description: Kubernetes backup operator - off-site point-in-time backups with 30 | history 31 | digest: 10daa39fd5622eeea46231bf85f8c35386327e417f30202433092d97d0c99421 32 | name: backsnap 33 | type: application 34 | urls: 35 | - backsnap-0.8.0.tgz 36 | version: 0.8.0 37 | - apiVersion: v2 38 | appVersion: v0.7.0 39 | created: "2024-09-15T16:50:08.015443+02:00" 40 | description: Kubernetes backup operator - off-site point-in-time backups with 41 | history 42 | digest: c4d58d7ba07e718e54c684ba53b135ff18b775d55e4bee9431cd02512f72fc64 43 | name: backsnap 44 | type: application 45 | urls: 46 | - backsnap-0.7.2.tgz 47 | version: 0.7.2 48 | - apiVersion: v2 49 | appVersion: v0.7.0 50 | created: "2024-09-15T16:50:08.015027+02:00" 51 | description: Kubernetes backup operator - off-site point-in-time backups with 52 | history 53 | digest: 0e9cae73d7796b7a0d11f0c9aaf2ed67e5c7bf3635d1f4305f88736d0fb1a2d6 54 | name: backsnap 55 | type: application 56 | urls: 57 | - backsnap-0.7.1.tgz 58 | version: 0.7.1 59 | - apiVersion: v2 60 | appVersion: v0.7.0 61 | created: "2024-09-15T16:50:08.014262+02:00" 62 | description: Kubernetes backup operator - off-site point-in-time backups with 63 | history 64 | digest: 5ab092a7d5e870f0a07d2c27649a005c9cc8ced5677752076cd26505b7748460 65 | name: backsnap 66 | type: application 67 | urls: 68 | - backsnap-0.7.0.tgz 69 | version: 0.7.0 70 | generated: "2024-09-15T16:50:08.012367+02:00" 71 | -------------------------------------------------------------------------------- /docs/migrate_pvc_to_another_az.md: -------------------------------------------------------------------------------- 1 | # Migrating a PVC to another availability zone using Backsnap 2 | 3 | Suppose you have a `myapplication` StatefulSet with two replicas, both in availability 4 | zone `nl-ams-1`. Now, you want to move one of the two replicas (`myapplication-1` PVC) 5 | to availability zone `nl-ams-2`. 6 | 7 | First, scale down the application to 1 replica, so that only `myapplication-0` is 8 | used. 9 | 10 | ``` 11 | $ kubectl scale statefulset -n myapplication myapplication --replicas=1 12 | ``` 13 | 14 | Then, create a backup of the last contents of the `myapplication-1` PVC. 15 | 16 | ``` 17 | $ cat <backup.yaml 18 | apiVersion: backsnap.skyb.it/v1alpha1 19 | kind: PVCBackup 20 | metadata: 21 | name: myapplication-1-manual 22 | namespace: myapplication 23 | spec: 24 | pvc: myapplication-1 25 | ttl: 1h 26 | EOF 27 | $ kubectl apply -f backup.yaml 28 | $ kubectl wait -n myapplication --for=jsonpath='{.status.result}'=Succeeded --timeout=1h pvcbackup myapplication-1-manual 29 | ``` 30 | 31 | Once the backup is completed, delete the PVC: 32 | 33 | ``` 34 | $ kubectl delete pvc -n myapplication myapplication-1 35 | ``` 36 | 37 | Then, create a PVCRestore to the new AZ and wait for it to complete: 38 | 39 | ``` 40 | $ cat <restore.yaml 41 | apiVersion: backsnap.skyb.it/v1alpha1 42 | kind: PVCRestore 43 | metadata: 44 | name: myapplication-1-restore 45 | namespace: myapplication 46 | spec: 47 | sourcePvc: myapplication-1 48 | targetPvcSize: "10Gi" 49 | nodeSelector: 50 | topology.kubernetes.io/zone: nl-ams-2 51 | EOF 52 | $ kubectl apply -f restore.yaml 53 | $ kubectl wait -n myapplication --for=jsonpath='{.status.result}'=Succeeded --timeout=1h pvcrestore myapplication-1-restore 54 | ``` 55 | 56 | The PVC should now exist again, but on the `nl-ams-2` AZ, and with the same 57 | contents as before. Now, we can scale the StatefulSet back up: 58 | 59 | ``` 60 | $ kubectl scale statefulset -n myapplication myapplication --replicas=2 61 | ``` 62 | 63 | Using `kubectl get pods -o wide -n myapplication`, you should see the new 64 | `myapplication-1` Pod being scheduled in the `nl-ams-2` availability zone. 65 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/skybitsnl/backsnap 2 | 3 | go 1.22.0 4 | 5 | toolchain go1.22.4 6 | 7 | require ( 8 | github.com/samber/lo v1.46.0 9 | k8s.io/api v0.30.3 10 | k8s.io/apimachinery v0.30.3 11 | k8s.io/client-go v0.30.3 12 | sigs.k8s.io/controller-runtime v0.18.4 13 | ) 14 | 15 | require ( 16 | github.com/beorn7/perks v1.0.1 // indirect 17 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 18 | github.com/davecgh/go-spew v1.1.1 // indirect 19 | github.com/dustin/go-humanize v1.0.1 // indirect 20 | github.com/emicklei/go-restful/v3 v3.12.1 // indirect 21 | github.com/evanphx/json-patch v5.7.0+incompatible // indirect 22 | github.com/evanphx/json-patch/v5 v5.9.0 // indirect 23 | github.com/fsnotify/fsnotify v1.7.0 // indirect 24 | github.com/go-logr/zapr v1.3.0 // indirect 25 | github.com/go-openapi/jsonpointer v0.21.0 // indirect 26 | github.com/go-openapi/jsonreference v0.21.0 // indirect 27 | github.com/go-openapi/swag v0.23.0 // indirect 28 | github.com/go-task/slim-sprig/v3 v3.0.0 // indirect 29 | github.com/gogo/protobuf v1.3.2 // indirect 30 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect 31 | github.com/golang/protobuf v1.5.4 // indirect 32 | github.com/google/gnostic-models v0.6.8 // indirect 33 | github.com/google/go-cmp v0.6.0 // indirect 34 | github.com/google/gofuzz v1.2.0 // indirect 35 | github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 // indirect 36 | github.com/imdario/mergo v0.3.16 // indirect 37 | github.com/josharian/intern v1.0.0 // indirect 38 | github.com/json-iterator/go v1.1.12 // indirect 39 | github.com/klauspost/compress v1.17.6 // indirect 40 | github.com/klauspost/cpuid/v2 v2.2.6 // indirect 41 | github.com/mailru/easyjson v0.7.7 // indirect 42 | github.com/minio/md5-simd v1.1.2 // indirect 43 | github.com/minio/sha256-simd v1.0.1 // indirect 44 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 45 | github.com/modern-go/reflect2 v1.0.2 // indirect 46 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 47 | github.com/pkg/errors v0.9.1 // indirect 48 | github.com/prometheus/client_golang v1.19.1 // indirect 49 | github.com/prometheus/client_model v0.6.1 // indirect 50 | github.com/prometheus/common v0.55.0 // indirect 51 | github.com/prometheus/procfs v0.15.1 // indirect 52 | github.com/rs/xid v1.5.0 // indirect 53 | github.com/spf13/pflag v1.0.5 // indirect 54 | go.uber.org/multierr v1.11.0 // indirect 55 | go.uber.org/zap v1.26.0 // indirect 56 | golang.org/x/crypto v0.25.0 // indirect 57 | golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect 58 | golang.org/x/net v0.27.0 // indirect 59 | golang.org/x/oauth2 v0.21.0 // indirect 60 | golang.org/x/sys v0.22.0 // indirect 61 | golang.org/x/term v0.22.0 // indirect 62 | golang.org/x/text v0.16.0 // indirect 63 | golang.org/x/time v0.5.0 // indirect 64 | golang.org/x/tools v0.23.0 // indirect 65 | gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect 66 | google.golang.org/protobuf v1.34.2 // indirect 67 | gopkg.in/inf.v0 v0.9.1 // indirect 68 | gopkg.in/ini.v1 v1.67.0 // indirect 69 | gopkg.in/yaml.v2 v2.4.0 // indirect 70 | gopkg.in/yaml.v3 v3.0.1 // indirect 71 | k8s.io/apiextensions-apiserver v0.30.3 // indirect 72 | k8s.io/klog/v2 v2.130.1 // indirect 73 | k8s.io/kube-openapi v0.0.0-20240726031636-6f6746feab9c // indirect 74 | k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect 75 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect 76 | sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect 77 | sigs.k8s.io/yaml v1.4.0 // indirect 78 | ) 79 | 80 | require ( 81 | github.com/go-logr/logr v1.4.2 82 | github.com/google/uuid v1.6.0 83 | github.com/hashicorp/cronexpr v1.1.2 84 | github.com/kubernetes-csi/external-snapshotter/client/v6 v6.3.0 85 | github.com/minio/minio-go/v7 v7.0.68 86 | github.com/onsi/ginkgo/v2 v2.19.0 87 | github.com/onsi/gomega v1.33.1 88 | ) 89 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= 2 | github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= 3 | github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= 4 | github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 5 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 6 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 7 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 8 | github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= 9 | github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= 10 | github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= 11 | github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= 12 | github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= 13 | github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= 14 | github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= 15 | github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= 16 | github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= 17 | github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= 18 | github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= 19 | github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= 20 | github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= 21 | github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= 22 | github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= 23 | github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= 24 | github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= 25 | github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= 26 | github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= 27 | github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= 28 | github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= 29 | github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= 30 | github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= 31 | github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= 32 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= 33 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= 34 | github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= 35 | github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= 36 | github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= 37 | github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= 38 | github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 39 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= 40 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 41 | github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= 42 | github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= 43 | github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= 44 | github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= 45 | github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= 46 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= 47 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 48 | github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= 49 | github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= 50 | github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= 51 | github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= 52 | github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= 53 | github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= 54 | github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= 55 | github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= 56 | github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= 57 | github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= 58 | github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= 59 | github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= 60 | github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= 61 | github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= 62 | github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= 63 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 64 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 65 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 66 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 67 | github.com/kubernetes-csi/external-snapshotter/client/v6 v6.3.0 h1:qS4r4ljINLWKJ9m9Ge3Q3sGZ/eIoDVDT2RhAdQFHb1k= 68 | github.com/kubernetes-csi/external-snapshotter/client/v6 v6.3.0/go.mod h1:oGXx2XTEzs9ikW2V6IC1dD8trgjRsS/Mvc2JRiC618Y= 69 | github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= 70 | github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= 71 | github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= 72 | github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= 73 | github.com/minio/minio-go/v7 v7.0.68 h1:hTqSIfLlpXaKuNy4baAp4Jjy2sqZEN9hRxD0M4aOfrQ= 74 | github.com/minio/minio-go/v7 v7.0.68/go.mod h1:XAvOPJQ5Xlzk5o3o/ArO2NMbhSGkimC+bpW/ngRKDmQ= 75 | github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= 76 | github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= 77 | github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 78 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= 79 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 80 | github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= 81 | github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= 82 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= 83 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= 84 | github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= 85 | github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= 86 | github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= 87 | github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= 88 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= 89 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 90 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 91 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 92 | github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= 93 | github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= 94 | github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= 95 | github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= 96 | github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= 97 | github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= 98 | github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= 99 | github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= 100 | github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= 101 | github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= 102 | github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= 103 | github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= 104 | github.com/samber/lo v1.46.0 h1:w8G+oaCPgz1PoCJztqymCFaKwXt+5cCXn51uPxExFfQ= 105 | github.com/samber/lo v1.46.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU= 106 | github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= 107 | github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= 108 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 109 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 110 | github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= 111 | github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 112 | github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 113 | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 114 | go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= 115 | go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= 116 | go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= 117 | go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= 118 | go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= 119 | go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= 120 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 121 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 122 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 123 | golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= 124 | golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= 125 | golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= 126 | golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= 127 | golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 128 | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 129 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 130 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 131 | golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 132 | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= 133 | golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= 134 | golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= 135 | golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= 136 | golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= 137 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 138 | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 139 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 140 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 141 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 142 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 143 | golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 144 | golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= 145 | golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 146 | golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= 147 | golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= 148 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 149 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 150 | golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= 151 | golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= 152 | golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= 153 | golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= 154 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 155 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 156 | golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= 157 | golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= 158 | golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= 159 | golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= 160 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 161 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 162 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 163 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 164 | gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= 165 | gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= 166 | google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= 167 | google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= 168 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 169 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 170 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 171 | gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= 172 | gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= 173 | gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= 174 | gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= 175 | gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 176 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= 177 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= 178 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 179 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 180 | k8s.io/api v0.30.3 h1:ImHwK9DCsPA9uoU3rVh4QHAHHK5dTSv1nxJUapx8hoQ= 181 | k8s.io/api v0.30.3/go.mod h1:GPc8jlzoe5JG3pb0KJCSLX5oAFIW3/qNJITlDj8BH04= 182 | k8s.io/apiextensions-apiserver v0.30.3 h1:oChu5li2vsZHx2IvnGP3ah8Nj3KyqG3kRSaKmijhB9U= 183 | k8s.io/apiextensions-apiserver v0.30.3/go.mod h1:uhXxYDkMAvl6CJw4lrDN4CPbONkF3+XL9cacCT44kV4= 184 | k8s.io/apimachinery v0.30.3 h1:q1laaWCmrszyQuSQCfNB8cFgCuDAoPszKY4ucAjDwHc= 185 | k8s.io/apimachinery v0.30.3/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= 186 | k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= 187 | k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= 188 | k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= 189 | k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= 190 | k8s.io/kube-openapi v0.0.0-20240726031636-6f6746feab9c h1:CHL3IcTrTI3csK36iwYJy36uQRic+IpSoRMNH+0I8SE= 191 | k8s.io/kube-openapi v0.0.0-20240726031636-6f6746feab9c/go.mod h1:0CVn9SVo8PeW5/JgsBZZIFmmTk5noOM8WXf2e1tCihE= 192 | k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= 193 | k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= 194 | sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= 195 | sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= 196 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= 197 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= 198 | sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= 199 | sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= 200 | sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= 201 | sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= 202 | -------------------------------------------------------------------------------- /internal/controller/automatic.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log/slog" 7 | "time" 8 | 9 | "github.com/hashicorp/cronexpr" 10 | "github.com/samber/lo" 11 | "github.com/skybitsnl/backsnap/api/v1alpha1" 12 | corev1 "k8s.io/api/core/v1" 13 | apierrors "k8s.io/apimachinery/pkg/api/errors" 14 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 15 | "k8s.io/apimachinery/pkg/runtime" 16 | "k8s.io/apimachinery/pkg/types" 17 | ctrl "sigs.k8s.io/controller-runtime" 18 | "sigs.k8s.io/controller-runtime/pkg/builder" 19 | "sigs.k8s.io/controller-runtime/pkg/client" 20 | "sigs.k8s.io/controller-runtime/pkg/handler" 21 | "sigs.k8s.io/controller-runtime/pkg/predicate" 22 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 23 | ) 24 | 25 | var ( 26 | jobOwnerKey = ".metadata.controller" 27 | 28 | // Sometimes, leap seconds or time differences between systems may cause a 29 | // backup to exist *just* before its schedule. For example, a situation has 30 | // been observed where a backup was scheduled at 12:00:00 UTC, but its 31 | // eventual creationDate was 11:59:59 UTC. This then causes another backup 32 | // to be executed at 12:00:00, i.e. two backups within a very small time 33 | // interval. To prevent this, we have a certain tolerance that allows a 34 | // backup to be created just before its scheduled time, to act as a backup 35 | // considered at that scheduled time. This is expressed as a percentage of 36 | // the normal interval. 37 | // For example, if the schedule is @hourly, the normal interval is 3600 38 | // seconds. A tolerance factor of 1% (0.01) means a tolerance of 36 seconds, 39 | // i.e. a backup created after 11:59:24 or later will be considered scheduled 40 | // at 12:00:00, so the next backup will be created at 13:00:00 in this case. 41 | ScheduleIntervalTolerance = 0.01 42 | 43 | // Default time-to-live for newly created backups: 3 days. 44 | DefaultTTL = 3 * 24 * time.Hour 45 | ) 46 | 47 | type AutomaticPVCBackupCreator struct { 48 | client.Client 49 | Scheme *runtime.Scheme 50 | Clock 51 | DefaultSchedule string 52 | Namespaces []string 53 | ExcludeNamespaces []string 54 | } 55 | 56 | //+kubebuilder:rbac:groups=core,resources=namespaces,verbs=get;list;watch 57 | //+kubebuilder:rbac:groups=core,resources=persistentvolumeclaims,verbs=get;list;watch 58 | //+kubebuilder:rbac:groups=core,resources=persistentvolumeclaims/finalizers,verbs=update 59 | //+kubebuilder:rbac:groups=backsnap.skyb.it,resources=pvcbackups,verbs=get;list;watch;create;update;patch 60 | 61 | func (r *AutomaticPVCBackupCreator) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 62 | logger := slog.With( 63 | slog.String("namespace", req.Namespace), 64 | slog.String("pvc", req.Name), 65 | ) 66 | 67 | var namespace corev1.Namespace 68 | if err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: req.Namespace}, &namespace); err != nil { 69 | if apierrors.IsNotFound(err) { 70 | // ignore not-found errors since we don't need to create any PVCBackup for those 71 | return ctrl.Result{}, nil 72 | } 73 | logger.ErrorContext(ctx, "unable to fetch namespace", slog.Any("err", err)) 74 | return ctrl.Result{}, err 75 | } 76 | 77 | var pvc corev1.PersistentVolumeClaim 78 | if err := r.Get(ctx, req.NamespacedName, &pvc); err != nil { 79 | if apierrors.IsNotFound(err) { 80 | // ignore not-found errors since we don't need to create any PVCBackup for those 81 | return ctrl.Result{}, nil 82 | } 83 | logger.ErrorContext(ctx, "unable to fetch PVC", slog.Any("err", err)) 84 | return ctrl.Result{}, err 85 | } 86 | 87 | // Find the schedule for this PVC, from its annotations, namespace annotations or default flags. 88 | var schedule string 89 | if s, ok := pvc.Annotations[BackupScheduleAnnotation]; ok { 90 | schedule = s 91 | } else if s, ok := namespace.Annotations[BackupScheduleAnnotation]; ok { 92 | schedule = s 93 | } else { 94 | schedule = r.DefaultSchedule 95 | } 96 | 97 | if schedule == "" { 98 | logger.InfoContext(ctx, "ignoring PVC for backup because schedule is empty") 99 | return ctrl.Result{}, nil 100 | } 101 | 102 | parsedSchedule, err := cronexpr.Parse(schedule) 103 | if err != nil { 104 | logger.ErrorContext(ctx, "unable to parse schedule expression", 105 | slog.String("cron", schedule), 106 | slog.Any("err", err), 107 | ) 108 | return ctrl.Result{}, err 109 | } 110 | 111 | // Find the last backup for this PVC, or its created date otherwise 112 | var childBackups v1alpha1.PVCBackupList 113 | if err := r.List(ctx, &childBackups, client.InNamespace(pvc.Namespace), client.MatchingFields{jobOwnerKey: pvc.Name}); err != nil { 114 | logger.ErrorContext(ctx, "unable to list PVC child backups", slog.Any("err", err)) 115 | return ctrl.Result{}, err 116 | } 117 | 118 | newestBackup := lo.Reduce(childBackups.Items, func(newest time.Time, item v1alpha1.PVCBackup, _ int) time.Time { 119 | if item.CreationTimestamp.Time.After(newest) { 120 | return item.CreationTimestamp.Time 121 | } else { 122 | return newest 123 | } 124 | }, pvc.CreationTimestamp.Time) 125 | 126 | nextBackup := parsedSchedule.Next(newestBackup) 127 | 128 | // If the nextBackup is very close to the last backup, less than a low 129 | // percentage of the expected interval, then skip the next backup. See the 130 | // documentation next to ScheduleIntervalTolerance. 131 | actualInterval := nextBackup.Sub(newestBackup) 132 | scheduleInterval := parsedSchedule.Next(nextBackup).Sub(nextBackup) 133 | if float64(actualInterval) < float64(scheduleInterval)*ScheduleIntervalTolerance { 134 | logger.InfoContext(ctx, "the most-recent backup is within tolerance of a more recent scheduled backup - skipping that scheduled backup", 135 | slog.Time("mostrecent", newestBackup), 136 | slog.Time("scheduled", nextBackup), 137 | ) 138 | nextBackup = parsedSchedule.Next(nextBackup) 139 | } 140 | 141 | logger.InfoContext(ctx, "next backup for pvc should occur at", 142 | slog.Time("next", nextBackup), 143 | ) 144 | 145 | // If the next backup is in the future, schedule our next reconcile then 146 | now := time.Now() 147 | if nextBackup.After(now) { 148 | return ctrl.Result{ 149 | Requeue: true, 150 | RequeueAfter: time.Until(nextBackup), 151 | }, nil 152 | } 153 | 154 | // Give backup names a unique name containing their scheduled unix timestamps. 155 | backupName := fmt.Sprintf("%s-%d", pvc.Name, now.Unix()) 156 | 157 | newBackup := &v1alpha1.PVCBackup{ 158 | ObjectMeta: metav1.ObjectMeta{ 159 | Name: backupName, 160 | Namespace: pvc.Namespace, 161 | }, 162 | Spec: v1alpha1.PVCBackupSpec{ 163 | PVCName: pvc.Name, 164 | TTL: metav1.Duration{Duration: DefaultTTL}, 165 | }, 166 | } 167 | if err := ctrl.SetControllerReference(&pvc, newBackup, r.Scheme); err != nil { 168 | return ctrl.Result{}, err 169 | } 170 | if err := r.Create(ctx, newBackup); err != nil { 171 | logger.ErrorContext(ctx, "failed to create PVCBackup", slog.Any("err", err)) 172 | return ctrl.Result{}, err 173 | } 174 | 175 | logger.InfoContext(ctx, "created new PVCBackup", slog.Any("name", newBackup.ObjectMeta.Name)) 176 | 177 | // After that, schedule for the next run after this one 178 | nextBackup = parsedSchedule.Next(newBackup.CreationTimestamp.Time) 179 | return ctrl.Result{ 180 | Requeue: true, 181 | RequeueAfter: time.Until(nextBackup), 182 | }, nil 183 | } 184 | 185 | func (r *AutomaticPVCBackupCreator) SetupWithManager(mgr ctrl.Manager) error { 186 | if r.Clock == nil { 187 | r.Clock = realClock{} 188 | } 189 | 190 | if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.PVCBackup{}, jobOwnerKey, func(rawObj client.Object) []string { 191 | pvc := rawObj.(*v1alpha1.PVCBackup) 192 | owner := metav1.GetControllerOf(pvc) 193 | if owner == nil { 194 | return nil 195 | } 196 | // Make sure it's a PVC 197 | if owner.APIVersion != corev1.SchemeGroupVersion.String() || owner.Kind != "PersistentVolumeClaim" { 198 | return nil 199 | } 200 | return []string{owner.Name} 201 | }); err != nil { 202 | return err 203 | } 204 | 205 | return ctrl.NewControllerManagedBy(mgr). 206 | Named("automatic"). 207 | Watches( 208 | &corev1.PersistentVolumeClaim{}, 209 | handler.EnqueueRequestsFromMapFunc(r.pvcToRequest), 210 | builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), 211 | ). 212 | Watches( 213 | &corev1.Namespace{}, 214 | handler.EnqueueRequestsFromMapFunc(r.namespaceToRequests), 215 | builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), 216 | ). 217 | Complete(r) 218 | } 219 | 220 | func (r *AutomaticPVCBackupCreator) pvcToRequest(ctx context.Context, pvc client.Object) []reconcile.Request { 221 | if lo.Contains(r.ExcludeNamespaces, pvc.GetNamespace()) { 222 | // namespace excluded 223 | return []reconcile.Request{} 224 | } 225 | if !lo.Contains(r.Namespaces, "") && !lo.Contains(r.Namespaces, pvc.GetNamespace()) { 226 | // namespace not included 227 | return []reconcile.Request{} 228 | } 229 | return []reconcile.Request{{ 230 | NamespacedName: types.NamespacedName{ 231 | Name: pvc.GetName(), 232 | Namespace: pvc.GetNamespace(), 233 | }, 234 | }} 235 | } 236 | 237 | func (r *AutomaticPVCBackupCreator) namespaceToRequests(ctx context.Context, namespace client.Object) []reconcile.Request { 238 | if lo.Contains(r.ExcludeNamespaces, namespace.GetName()) { 239 | // namespace excluded 240 | return []reconcile.Request{} 241 | } 242 | if !lo.Contains(r.Namespaces, "") && !lo.Contains(r.Namespaces, namespace.GetName()) { 243 | // namespace not included 244 | return []reconcile.Request{} 245 | } 246 | 247 | var pvcList corev1.PersistentVolumeClaimList 248 | if err := r.List(ctx, &pvcList, client.InNamespace(namespace.GetName())); err != nil { 249 | slog.Error("failed to enumerate PVCs in namespace", 250 | slog.String("namespace", namespace.GetName()), 251 | slog.Any("err", err), 252 | ) 253 | return []reconcile.Request{} 254 | } 255 | 256 | return lo.Map(pvcList.Items, func(pvc corev1.PersistentVolumeClaim, _ int) reconcile.Request { 257 | return reconcile.Request{ 258 | NamespacedName: types.NamespacedName{ 259 | Name: pvc.GetName(), 260 | Namespace: pvc.GetNamespace(), 261 | }, 262 | } 263 | }) 264 | } 265 | -------------------------------------------------------------------------------- /internal/controller/automatic_test.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/google/uuid" 8 | . "github.com/onsi/ginkgo/v2" 9 | . "github.com/onsi/gomega" 10 | "github.com/samber/lo" 11 | "github.com/skybitsnl/backsnap/api/v1alpha1" 12 | corev1 "k8s.io/api/core/v1" 13 | "k8s.io/apimachinery/pkg/api/resource" 14 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 15 | "k8s.io/client-go/kubernetes/scheme" 16 | ctrl "sigs.k8s.io/controller-runtime" 17 | "sigs.k8s.io/controller-runtime/pkg/client" 18 | ) 19 | 20 | var _ = Describe("Automatic controller", func() { 21 | var namespace string 22 | ctx, cancel := context.WithCancel(context.Background()) 23 | 24 | BeforeEach(func() { 25 | By("creating a namespace") 26 | namespace = uuid.NewString() 27 | err := k8sClient.Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ 28 | Name: namespace, 29 | Annotations: map[string]string{ 30 | BackupScheduleAnnotation: "* * * * *", 31 | }, 32 | }}) 33 | Expect(err).ToNot(HaveOccurred()) 34 | 35 | k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ 36 | Scheme: scheme.Scheme, 37 | }) 38 | Expect(err).ToNot(HaveOccurred()) 39 | 40 | err = (&AutomaticPVCBackupCreator{ 41 | Client: k8sManager.GetClient(), 42 | Scheme: k8sManager.GetScheme(), 43 | Namespaces: []string{namespace}, 44 | DefaultSchedule: "", 45 | }).SetupWithManager(k8sManager) 46 | Expect(err).ToNot(HaveOccurred()) 47 | 48 | go func() { 49 | defer GinkgoRecover() 50 | err = k8sManager.Start(ctx) 51 | Expect(err).ToNot(HaveOccurred(), "failed to run manager") 52 | }() 53 | }) 54 | AfterEach(func() { 55 | err := k8sClient.Delete(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}) 56 | Expect(err).ToNot(HaveOccurred()) 57 | 58 | cancel() 59 | }) 60 | 61 | When("a PVC exists", func() { 62 | It("should get a PVCBackup if it has existed for long enough", func() { 63 | By("creating a new PVC") 64 | pvc := &corev1.PersistentVolumeClaim{ 65 | ObjectMeta: metav1.ObjectMeta{ 66 | Name: "my-data", 67 | Namespace: namespace, 68 | }, 69 | Spec: corev1.PersistentVolumeClaimSpec{ 70 | AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, 71 | Resources: corev1.VolumeResourceRequirements{ 72 | Requests: map[corev1.ResourceName]resource.Quantity{ 73 | corev1.ResourceStorage: resource.MustParse("1Gi"), 74 | }, 75 | }, 76 | }, 77 | } 78 | Expect(k8sClient.Create(ctx, pvc)).Should(Succeed()) 79 | 80 | Eventually(func() bool { 81 | var list v1alpha1.PVCBackupList 82 | Expect(k8sClient.List(ctx, &list, &client.ListOptions{ 83 | Namespace: namespace, 84 | })).Should(Succeed()) 85 | 86 | return len(list.Items) == 1 && list.Items[0].Spec.PVCName == "my-data" 87 | }, time.Minute+10*time.Second, time.Second).Should(BeTrue()) 88 | 89 | // Then, just over a minute later, the list length should be 2 90 | time.Sleep(time.Second * 10) 91 | 92 | Eventually(func() bool { 93 | var list v1alpha1.PVCBackupList 94 | Expect(k8sClient.List(ctx, &list, &client.ListOptions{ 95 | Namespace: namespace, 96 | })).Should(Succeed()) 97 | 98 | return len(list.Items) == 2 && lo.EveryBy(list.Items, func(b v1alpha1.PVCBackup) bool { return b.Spec.PVCName == "my-data" }) 99 | }, time.Minute, time.Second).Should(BeTrue()) 100 | }) 101 | }) 102 | }) 103 | -------------------------------------------------------------------------------- /internal/controller/pvcrestore_controller.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log/slog" 7 | "time" 8 | 9 | batchv1 "k8s.io/api/batch/v1" 10 | corev1 "k8s.io/api/core/v1" 11 | apierrors "k8s.io/apimachinery/pkg/api/errors" 12 | "k8s.io/apimachinery/pkg/api/resource" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | "k8s.io/apimachinery/pkg/runtime" 15 | "k8s.io/apimachinery/pkg/types" 16 | ctrl "sigs.k8s.io/controller-runtime" 17 | "sigs.k8s.io/controller-runtime/pkg/client" 18 | 19 | "github.com/samber/lo" 20 | backsnapv1alpha1 "github.com/skybitsnl/backsnap/api/v1alpha1" 21 | ) 22 | 23 | // PVCRestoreReconciler reconciles a PVCRestore object 24 | type PVCRestoreReconciler struct { 25 | client.Client 26 | Scheme *runtime.Scheme 27 | 28 | Namespaces []string 29 | ExcludeNamespaces []string 30 | BackupSettings BackupSettings 31 | } 32 | 33 | // +kubebuilder:rbac:groups=backsnap.skyb.it,resources=pvcrestores,verbs=get;list;watch;create;update;patch;delete 34 | // +kubebuilder:rbac:groups=backsnap.skyb.it,resources=pvcrestores/status,verbs=get;update;patch 35 | // +kubebuilder:rbac:groups=backsnap.skyb.it,resources=pvcrestores/finalizers,verbs=update 36 | // +kubebuilder:rbac:groups=core,resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete 37 | // TODO: should be able to add a finalizer for a PVC which is being backed up? 38 | // +kubebuilder:rbac:groups=core,resources=persistentvolumeclaims/finalizers,verbs=update 39 | // +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete 40 | 41 | // nolint: gocyclo 42 | func (r *PVCRestoreReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 43 | logger := slog.With( 44 | slog.String("namespace", req.Namespace), 45 | slog.String("pvcrestore", req.Name), 46 | ) 47 | 48 | if lo.Contains(r.ExcludeNamespaces, req.Namespace) { 49 | // namespace excluded 50 | return ctrl.Result{}, nil 51 | } 52 | if !lo.Contains(r.Namespaces, "") && !lo.Contains(r.Namespaces, req.Namespace) { 53 | // namespace not included 54 | return ctrl.Result{}, nil 55 | } 56 | 57 | var restore backsnapv1alpha1.PVCRestore 58 | if err := r.Get(ctx, req.NamespacedName, &restore); err != nil { 59 | if apierrors.IsNotFound(err) { 60 | // apparently we don't need to restore anymore 61 | return ctrl.Result{}, nil 62 | } 63 | logger.ErrorContext(ctx, "unable to fetch PVCRestore", slog.Any("err", err)) 64 | return ctrl.Result{}, err 65 | } 66 | 67 | if !restore.Status.FinishedAt.IsZero() { 68 | if restore.Status.Result == nil || *restore.Status.Result != "Succeeded" { 69 | logger.ErrorContext(ctx, "pvcrestore failed, not reconciling - please clean it up yourself") 70 | return ctrl.Result{}, nil 71 | } 72 | 73 | // TODO: remove PVCRestore after TTL? 74 | return ctrl.Result{}, nil 75 | } 76 | 77 | targetPvc := restore.Spec.TargetPVC 78 | if targetPvc == "" { 79 | targetPvc = restore.Spec.SourcePVC 80 | } 81 | if targetPvc == "" { 82 | logger.ErrorContext(ctx, "failed to start backup without source PVC") 83 | return ctrl.Result{}, fmt.Errorf("failed to start backup without source PVC") 84 | } 85 | 86 | if restore.Status.StartedAt == nil { 87 | restore.Status.StartedAt = lo.ToPtr(metav1.Time{Time: time.Now()}) 88 | if err := r.Status().Update(ctx, &restore); err != nil { 89 | logger.ErrorContext(ctx, "failed to update PVCRestore", slog.Any("err", err)) 90 | return ctrl.Result{}, err 91 | } 92 | } 93 | 94 | // The PVC must either not exist, or be marked as "restoring" with the UID 95 | // of the PVCRestore object, otherwise we error out here 96 | var pvc corev1.PersistentVolumeClaim 97 | if err := r.Get(ctx, types.NamespacedName{ 98 | Namespace: req.Namespace, 99 | Name: targetPvc, 100 | }, &pvc); err != nil { 101 | if !apierrors.IsNotFound(err) { 102 | logger.ErrorContext(ctx, "unable to fetch PVC", slog.Any("err", err)) 103 | return ctrl.Result{}, err 104 | } 105 | 106 | var storageClass *string 107 | if r.BackupSettings.StorageClass != "" { 108 | storageClass = &r.BackupSettings.StorageClass 109 | } 110 | annotations := make(map[string]string, len(restore.Spec.Annotations)+1) 111 | for k, v := range restore.Spec.Annotations { 112 | annotations[k] = v 113 | } 114 | // Allow the restore job to restart on this PVC during this restore only. 115 | annotations[CurrentlyRestoringAnnotation] = string(restore.UID) 116 | 117 | pvc = corev1.PersistentVolumeClaim{ 118 | ObjectMeta: metav1.ObjectMeta{ 119 | Name: targetPvc, 120 | Namespace: req.Namespace, 121 | Labels: restore.Spec.Labels, 122 | Annotations: annotations, 123 | }, 124 | Spec: corev1.PersistentVolumeClaimSpec{ 125 | StorageClassName: storageClass, 126 | AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, 127 | Resources: corev1.VolumeResourceRequirements{ 128 | Requests: map[corev1.ResourceName]resource.Quantity{ 129 | corev1.ResourceStorage: restore.Spec.TargetPVCSize, 130 | }, 131 | }, 132 | }, 133 | } 134 | // Do NOT set the PVCRestore as the owner of the PVC, or the PVC will be 135 | // destroyed when the PVCRestore is destroyed. 136 | if err := r.Create(ctx, &pvc); err != nil { 137 | logger.ErrorContext(ctx, "unable to create PVC", slog.Any("err", err)) 138 | return ctrl.Result{}, err 139 | } 140 | 141 | logger.InfoContext(ctx, "created PVC to restore to", slog.String("pvc", pvc.ObjectMeta.Name)) 142 | } 143 | 144 | if uid, ok := pvc.ObjectMeta.Annotations[CurrentlyRestoringAnnotation]; !ok || uid != string(restore.UID) { 145 | logger.ErrorContext(ctx, "PVC to restore to already exists - refusing to reconcile") 146 | setRestoreFinished(&restore, "Failed") 147 | if err := r.Status().Update(ctx, &restore); err != nil { 148 | logger.ErrorContext(ctx, "Failed to update PVCRestore", slog.Any("err", err)) 149 | return ctrl.Result{}, err 150 | } 151 | return ctrl.Result{}, nil 152 | } 153 | 154 | var job batchv1.Job 155 | if err := r.Get(ctx, req.NamespacedName, &job); err != nil { 156 | if !apierrors.IsNotFound(err) { 157 | logger.ErrorContext(ctx, "unable to fetch job", slog.Any("err", err)) 158 | return ctrl.Result{}, err 159 | } 160 | 161 | var imagePullSecrets []corev1.LocalObjectReference 162 | if r.BackupSettings.ImagePullSecret != "" { 163 | imagePullSecrets = append(imagePullSecrets, corev1.LocalObjectReference{ 164 | Name: r.BackupSettings.ImagePullSecret, 165 | }) 166 | } 167 | 168 | if restore.Spec.SourceNamespace == "" { 169 | restore.Spec.SourceNamespace = restore.Namespace 170 | } 171 | if restore.Spec.SourceSnapshot == "" { 172 | restore.Spec.SourceSnapshot = "latest" 173 | } 174 | 175 | annotations := make(map[string]string, len(restore.Spec.Annotations)+1) 176 | for k, v := range restore.Spec.Annotations { 177 | annotations[k] = v 178 | } 179 | // Allow the restore job to recognize this Job during this restore only. 180 | annotations[CurrentlyRestoringAnnotation] = string(restore.UID) 181 | 182 | job = batchv1.Job{ 183 | ObjectMeta: metav1.ObjectMeta{ 184 | Namespace: restore.Namespace, 185 | Name: restore.Name, 186 | Labels: restore.Spec.Labels, 187 | Annotations: annotations, 188 | }, 189 | Spec: batchv1.JobSpec{ 190 | BackoffLimit: lo.ToPtr(int32(4)), 191 | Template: corev1.PodTemplateSpec{ 192 | ObjectMeta: metav1.ObjectMeta{ 193 | Labels: restore.Spec.Labels, 194 | Annotations: restore.Spec.Annotations, 195 | }, 196 | Spec: corev1.PodSpec{ 197 | ImagePullSecrets: imagePullSecrets, 198 | RestartPolicy: "OnFailure", 199 | NodeSelector: restore.Spec.NodeSelector, 200 | Tolerations: restore.Spec.Tolerations, 201 | PriorityClassName: restore.Spec.PriorityClassName, 202 | Volumes: []corev1.Volume{{ 203 | Name: "data", 204 | VolumeSource: corev1.VolumeSource{ 205 | PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ 206 | ClaimName: pvc.Name, 207 | }, 208 | }, 209 | }}, 210 | Containers: []corev1.Container{{ 211 | Name: "default", 212 | Image: r.BackupSettings.Image, 213 | ImagePullPolicy: "Always", 214 | Command: []string{ 215 | "restic", "restore", 216 | restore.Spec.SourceSnapshot, 217 | "--sparse", "--verify", 218 | "--target", "/data", 219 | }, 220 | Env: []corev1.EnvVar{{ 221 | Name: "BACKUP_NAMESPACE", 222 | Value: restore.Spec.SourceNamespace, 223 | }, { 224 | Name: "BACKUP_VOLUME", 225 | Value: restore.Spec.SourcePVC, 226 | }, { 227 | Name: "RESTIC_REPOSITORY_BASE", 228 | Value: "s3:" + r.BackupSettings.S3Host + "/" + r.BackupSettings.S3Bucket, 229 | }, { 230 | Name: "RESTIC_PASSWORD", 231 | Value: r.BackupSettings.ResticPassword, 232 | }, { 233 | Name: "AWS_ACCESS_KEY_ID", 234 | Value: r.BackupSettings.S3AccessKeyId, 235 | }, { 236 | Name: "AWS_SECRET_ACCESS_KEY", 237 | Value: r.BackupSettings.S3SecretAccessKey, 238 | }, { 239 | Name: "RESTIC_HOSTNAME", 240 | Value: "$(BACKUP_NAMESPACE)", 241 | }, { 242 | Name: "RESTIC_REPOSITORY", 243 | Value: "$(RESTIC_REPOSITORY_BASE)/$(BACKUP_NAMESPACE)/$(BACKUP_VOLUME)", 244 | }}, 245 | // We mount the volume at /data/data, and then restore to /data, because the 246 | // topmost directory of the backup will be /data so will end up at /data/data 247 | // and therefore in the root of the PVC 248 | VolumeMounts: []corev1.VolumeMount{{ 249 | Name: "data", 250 | MountPath: "/data/data", 251 | }}, 252 | }}, 253 | }, 254 | }, 255 | }, 256 | } 257 | if err := ctrl.SetControllerReference(&restore, &job, r.Scheme); err != nil { 258 | return ctrl.Result{}, err 259 | } 260 | if err := r.Create(ctx, &job); err != nil { 261 | return ctrl.Result{}, err 262 | } 263 | 264 | logger.InfoContext(ctx, "created restore job", 265 | slog.String("name", job.Name), 266 | ) 267 | } 268 | 269 | if uid, ok := job.ObjectMeta.Annotations[CurrentlyRestoringAnnotation]; !ok || uid != string(restore.UID) { 270 | logger.ErrorContext(ctx, "Restore job already exists - refusing to reconcile") 271 | setRestoreFinished(&restore, "Failed") 272 | if err := r.Status().Update(ctx, &restore); err != nil { 273 | logger.ErrorContext(ctx, "Failed to update PVCRestore", slog.Any("err", err)) 274 | return ctrl.Result{}, err 275 | } 276 | return ctrl.Result{}, nil 277 | } 278 | 279 | // TODO: we could tail the job's log here for ease of access 280 | 281 | if job.Status.CompletionTime == nil || job.Status.CompletionTime.IsZero() { 282 | logger.InfoContext(ctx, "restore job is running", 283 | slog.String("name", job.Name), 284 | ) 285 | 286 | // Wait for the job to complete. We'll automatically get another reconcile 287 | // when the job changes. 288 | return ctrl.Result{}, nil 289 | } 290 | 291 | logger.InfoContext(ctx, "restore job succeeded", 292 | slog.String("name", req.Name)) 293 | 294 | // Remove the restore annotation from the PVC 295 | delete(pvc.ObjectMeta.Annotations, CurrentlyRestoringAnnotation) 296 | if err := r.Update(ctx, &pvc); err != nil { 297 | logger.ErrorContext(ctx, "Failed to remove annotation from PVC", slog.Any("err", err)) 298 | setRestoreFinished(&restore, "Failed") 299 | if err := r.Status().Update(ctx, &restore); err != nil { 300 | logger.ErrorContext(ctx, "Failed to update PVCRestore", slog.Any("err", err)) 301 | return ctrl.Result{}, err 302 | } 303 | return ctrl.Result{}, nil 304 | } 305 | 306 | setRestoreFinished(&restore, "Succeeded") 307 | if err := r.Status().Update(ctx, &restore); err != nil { 308 | logger.ErrorContext(ctx, "Failed to update PVCRestore", slog.Any("err", err)) 309 | return ctrl.Result{}, err 310 | } 311 | return ctrl.Result{}, nil 312 | } 313 | 314 | func setRestoreFinished(restore *backsnapv1alpha1.PVCRestore, result backsnapv1alpha1.Result) { 315 | restore.Status.FinishedAt = lo.ToPtr(metav1.Time{Time: time.Now()}) 316 | duration := restore.Status.FinishedAt.Sub(restore.Status.StartedAt.Time) 317 | duration = duration.Round(time.Second) 318 | restore.Status.Duration = lo.ToPtr(metav1.Duration{Duration: duration}) 319 | restore.Status.Result = lo.ToPtr[backsnapv1alpha1.Result](result) 320 | } 321 | 322 | // SetupWithManager sets up the controller with the Manager. 323 | func (r *PVCRestoreReconciler) SetupWithManager(mgr ctrl.Manager) error { 324 | return ctrl.NewControllerManagedBy(mgr). 325 | For(&backsnapv1alpha1.PVCRestore{}). 326 | Owns(&corev1.PersistentVolumeClaim{}). 327 | Owns(&batchv1.Job{}). 328 | Complete(r) 329 | } 330 | -------------------------------------------------------------------------------- /internal/controller/suite_test.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "fmt" 5 | "log/slog" 6 | "path/filepath" 7 | "runtime" 8 | "testing" 9 | 10 | volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" 11 | . "github.com/onsi/ginkgo/v2" 12 | . "github.com/onsi/gomega" 13 | "github.com/samber/lo" 14 | batchv1 "k8s.io/api/batch/v1" 15 | "k8s.io/client-go/kubernetes/scheme" 16 | "k8s.io/client-go/rest" 17 | "sigs.k8s.io/controller-runtime/pkg/client" 18 | "sigs.k8s.io/controller-runtime/pkg/envtest" 19 | logf "sigs.k8s.io/controller-runtime/pkg/log" 20 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 21 | 22 | backsnapv1alpha1 "github.com/skybitsnl/backsnap/api/v1alpha1" 23 | //+kubebuilder:scaffold:imports 24 | ) 25 | 26 | var cfg *rest.Config 27 | var k8sClient client.Client 28 | var testEnv *envtest.Environment 29 | 30 | func TestControllers(t *testing.T) { 31 | RegisterFailHandler(Fail) 32 | 33 | RunSpecs(t, "Controller Suite") 34 | } 35 | 36 | var _ = BeforeSuite(func() { 37 | logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) 38 | slog.SetDefault(slog.New(slog.NewTextHandler(GinkgoWriter, &slog.HandlerOptions{}))) 39 | 40 | By("bootstrapping test environment") 41 | testEnv = &envtest.Environment{ 42 | // Need to use an existing cluster, since we need to actually run Pods in these e2e tests. 43 | // NOTE: The cluster must not be running another version of the controllers, since it 44 | // will interfere. It's recommended to run these tests against Minikube. 45 | UseExistingCluster: lo.ToPtr(true), 46 | CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, 47 | ErrorIfCRDPathMissing: true, 48 | 49 | // The BinaryAssetsDirectory is only required if you want to run the tests directly 50 | // without call the makefile target test. If not informed it will look for the 51 | // default path defined in controller-runtime which is /usr/local/kubebuilder/. 52 | // Note that you must have the required binaries setup under the bin directory to perform 53 | // the tests directly. When we run make test it will be setup and used automatically. 54 | BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s", 55 | fmt.Sprintf("1.28.3-%s-%s", runtime.GOOS, runtime.GOARCH)), 56 | } 57 | 58 | var err error 59 | cfg, err = testEnv.Start() 60 | Expect(err).NotTo(HaveOccurred()) 61 | Expect(cfg).NotTo(BeNil()) 62 | 63 | err = batchv1.AddToScheme(scheme.Scheme) 64 | Expect(err).NotTo(HaveOccurred()) 65 | err = volumesnapshotv1.AddToScheme(scheme.Scheme) 66 | Expect(err).NotTo(HaveOccurred()) 67 | err = backsnapv1alpha1.AddToScheme(scheme.Scheme) 68 | Expect(err).NotTo(HaveOccurred()) 69 | 70 | //+kubebuilder:scaffold:scheme 71 | 72 | k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) 73 | Expect(err).NotTo(HaveOccurred()) 74 | Expect(k8sClient).NotTo(BeNil()) 75 | }) 76 | 77 | var _ = AfterSuite(func() { 78 | By("tearing down the test environment") 79 | err := testEnv.Stop() 80 | Expect(err).NotTo(HaveOccurred()) 81 | }) 82 | -------------------------------------------------------------------------------- /internal/controller/util.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import "time" 4 | 5 | var ( 6 | BackupScheduleAnnotation = "backsnap.skyb.it/schedule" 7 | CurrentlyRestoringAnnotation = "backsnap.skyb.it/restoring" 8 | ) 9 | 10 | type realClock struct{} 11 | 12 | func (realClock) Now() time.Time { return time.Now() } 13 | 14 | type Clock interface { 15 | Now() time.Time 16 | } 17 | 18 | type BackupSettings struct { 19 | SnapshotClass string 20 | StorageClass string 21 | ImagePullSecret string 22 | Image string 23 | // S3 hostname (can be host, host:port or http://host:port/) 24 | S3Host string 25 | S3Bucket string 26 | S3AccessKeyId string 27 | S3SecretAccessKey string 28 | ResticPassword string 29 | } 30 | -------------------------------------------------------------------------------- /restic.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e -o pipefail 3 | 4 | restic version 5 | 6 | RESTIC_HOSTNAME="${RESTIC_HOSTNAME:-$(hostname)}" 7 | 8 | # restic init if necessary 9 | set +e 10 | CAT_ERROR=$(restic cat config 2>&1) 11 | EXIT_CODE=$? 12 | set -e 13 | if [ "$EXIT_CODE" != "0" ]; then 14 | if [ "$(echo "${CAT_ERROR}" | grep 'Is there a')" = "Is there a repository at the following location?" ]; then 15 | echo -e "\n== No repository according to Restic, running restic init... ==\n" 16 | restic init -vvv 17 | else 18 | echo "$CAT_ERROR" 19 | echo -e "\n== Restic init check failed, exiting ==\n" 20 | exit 1 21 | fi 22 | fi 23 | 24 | echo -e "\n== Backing up... ==\n" 25 | 26 | set -x 27 | restic --verbose backup --host "${RESTIC_HOSTNAME}" /data 28 | restic --verbose check 29 | set +x 30 | 31 | echo -e "\n== Restic snapshots before pruning: ==\n" 32 | 33 | restic snapshots 34 | 35 | echo "" 36 | 37 | set -x 38 | restic forget --keep-last 1 --keep-hourly 12 --keep-daily 7 --keep-weekly 2 --keep-monthly 2 39 | restic prune 40 | set +x 41 | 42 | echo -e "\n== Restic snapshots after pruning: ==\n" 43 | restic snapshots 44 | 45 | echo "" 46 | 47 | # restic docs recommend another check after pruning 48 | restic --verbose check --read-data-subset=10% 49 | 50 | exit 0 51 | -------------------------------------------------------------------------------- /retag-images-for-test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TAG_SOURCE="sjorsgielen/backsnap:latest" 4 | 5 | PUSH="0" 6 | TAG_DESTINATION="$1" 7 | if [ "$TAG_DESTINATION" == "--push" ]; then 8 | PUSH="1" 9 | TAG_DESTINATION="$2" 10 | fi 11 | 12 | if [ -z "$TAG_DESTINATION" ]; then 13 | echo "Usage: $0 [ --push ] " 14 | echo "Example: $0 my-private-registry/backsnap:test-new-feature" 15 | echo "" 16 | echo "This will tag the recently built -arm64 and -amd64 images towards your registry," 17 | echo "and also create a multiarch image with the exact name you provided." 18 | echo "If you pass --push, it will also push all images to your registry." 19 | exit 1 20 | fi 21 | 22 | set -xe 23 | 24 | docker tag ${TAG_SOURCE}-arm64 ${TAG_DESTINATION}-arm64 25 | docker tag ${TAG_SOURCE}-amd64 ${TAG_DESTINATION}-amd64 26 | 27 | # The images must be pushed before we can create the manifest 28 | 29 | { set +x; } 2>/dev/null 30 | if [ "$PUSH" = "1" ]; then 31 | set -x 32 | docker push ${TAG_DESTINATION}-arm64 33 | docker push ${TAG_DESTINATION}-amd64 34 | else 35 | set -x 36 | fi 37 | 38 | # need to remove explicitly, or 'create' doesn't seem to refresh properly 39 | docker manifest rm ${TAG_DESTINATION} 2>/dev/null || true 40 | 41 | docker manifest create ${TAG_DESTINATION} \ 42 | --amend ${TAG_DESTINATION}-arm64 \ 43 | --amend ${TAG_DESTINATION}-amd64 44 | 45 | { set +x; } 2>/dev/null 46 | if [ "$PUSH" = "1" ]; then 47 | set -x 48 | docker manifest push ${TAG_DESTINATION} 49 | fi 50 | --------------------------------------------------------------------------------