├── .dockerignore ├── .editorconfig ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.yml │ ├── config.yml │ └── feature_request.yml ├── PULL_REQUEST_TEMPLATE.md ├── boilerplate.go.txt ├── changelog-charts.json ├── changelog-charts.sh ├── changelog-configuration.json ├── helm-docs-footer.gotmpl.md ├── helm-docs-header.gotmpl.md ├── render-charts.sh └── workflows │ ├── chart-lint.yml │ ├── chart-release.yml │ ├── chart-test.yml │ ├── docs-lint.yml │ ├── docs.yml │ ├── e2e.yml │ ├── lint.yml │ ├── master.yml │ ├── release.yml │ ├── scorecard.yml │ └── test.yml ├── .gitignore ├── .golangci.yml ├── .goreleaser.yml ├── ADOPTERS.md ├── CODEOWNERS ├── CODE_OF_CONDUCT.md ├── Dockerfile ├── GOVERNANCE.md ├── LICENSE ├── Makefile ├── Makefile.restic-integration.mk ├── Makefile.restic-integration.vars.mk ├── Makefile.vars.mk ├── OWNERS.md ├── README.md ├── api └── v1 │ ├── archive_types.go │ ├── backend.go │ ├── backend_test.go │ ├── backup_types.go │ ├── check_types.go │ ├── common_types.go │ ├── groupversion_info.go │ ├── history_limits_test.go │ ├── job_object.go │ ├── object_list_test.go │ ├── podconfig_types.go │ ├── prebackuppod_types.go │ ├── prune_types.go │ ├── restore_types.go │ ├── runnable_types.go │ ├── schedule_types.go │ ├── snapshot_types.go │ ├── status.go │ └── zz_generated.deepcopy.go ├── charts ├── charts.mk ├── go.mod ├── go.sum ├── k8up │ ├── .helmignore │ ├── Chart.yaml │ ├── Makefile │ ├── README.gotmpl.md │ ├── README.md │ ├── crds │ │ ├── k8up.io_archives.yaml │ │ ├── k8up.io_backups.yaml │ │ ├── k8up.io_checks.yaml │ │ ├── k8up.io_podconfigs.yaml │ │ ├── k8up.io_prebackuppods.yaml │ │ ├── k8up.io_prunes.yaml │ │ ├── k8up.io_restores.yaml │ │ ├── k8up.io_schedules.yaml │ │ └── k8up.io_snapshots.yaml │ ├── templates │ │ ├── NOTES.txt │ │ ├── _helpers.tpl │ │ ├── cleanup-hook.yaml │ │ ├── clusterrolebinding.yaml │ │ ├── deployment.yaml │ │ ├── executor-clusterrole.yaml │ │ ├── grafana-dashboard.yaml │ │ ├── operator-clusterrole.yaml │ │ ├── prometheus │ │ │ ├── prometheusrule.yaml │ │ │ └── servicemonitor.yaml │ │ ├── service.yaml │ │ ├── serviceaccount.yaml │ │ └── user-clusterrole.yaml │ ├── test │ │ ├── deployment_test.go │ │ ├── main_test.go │ │ ├── operator-clusterrole_test.go │ │ ├── prometheus │ │ │ ├── prometheus_test.go │ │ │ ├── prometheusrule_test.go │ │ │ ├── servicemonitor_test.go │ │ │ └── testdata │ │ │ │ ├── custom_rules.yaml │ │ │ │ └── labels.yaml │ │ ├── service_test.go │ │ ├── serviceaccount_test.go │ │ └── testdata │ │ │ └── deployment_1.yaml │ └── values.yaml └── tools.go ├── clean.sh ├── cli └── restore │ ├── helpers.go │ └── restore.go ├── cmd ├── cli │ └── main.go ├── k8up │ └── main.go ├── logger.go ├── operator │ └── main.go └── restic │ ├── integration_test.go │ └── main.go ├── common ├── targzipwriter.go └── targzipwriter_test.go ├── config ├── crd │ └── apiextensions.k8s.io │ │ └── v1 │ │ ├── k8up.io_archives.yaml │ │ ├── k8up.io_backups.yaml │ │ ├── k8up.io_checks.yaml │ │ ├── k8up.io_podconfigs.yaml │ │ ├── k8up.io_prebackuppods.yaml │ │ ├── k8up.io_prunes.yaml │ │ ├── k8up.io_restores.yaml │ │ ├── k8up.io_schedules.yaml │ │ └── k8up.io_snapshots.yaml ├── rbac │ └── role.yaml └── samples │ ├── deployments │ ├── galera.yaml │ ├── mariadb.yaml │ ├── minio.yaml │ ├── mongodb.yaml │ ├── pv-example.yaml │ └── pvc-example.yaml │ ├── k8up_v1_archive.yaml │ ├── k8up_v1_backup.yaml │ ├── k8up_v1_check.yaml │ ├── k8up_v1_prebackuppod.yaml │ ├── k8up_v1_prune.yaml │ ├── k8up_v1_restore.yaml │ ├── k8up_v1_schedule.yaml │ ├── k8up_v1_snapshot.yaml │ ├── kustomization.yaml │ ├── prometheus │ ├── prometheus.yaml │ └── rules.yaml │ └── secrets.yaml ├── deployment.yaml ├── docs ├── antora.yml ├── api-gen-config.yaml ├── api-templates │ ├── gv-details.tpl │ ├── gv-list.tpl │ ├── type-members.tpl │ └── type.tpl ├── docs.mk ├── k8up-ng.excalidraw ├── k8up.adoc └── modules │ └── ROOT │ ├── assets │ ├── attachments │ │ └── slides.pdf │ └── images │ │ ├── architecture.drawio.svg │ │ ├── k8up-logo-square.png │ │ ├── k8up-logo-square.svg │ │ ├── k8up-logo.png │ │ ├── k8up-logo.svg │ │ ├── minio_browser.png │ │ └── tutorial │ │ ├── k9s-delete.png │ │ ├── logo.png │ │ ├── minio-browser.png │ │ ├── wordpress-db-error.png │ │ ├── wordpress-defaced.png │ │ ├── wordpress-install.png │ │ └── wordpress-restored.png │ ├── examples │ ├── archive.yaml │ ├── credentials.yaml │ ├── minio-standalone-deployment.yaml │ ├── minio-standalone-pvc.yaml │ ├── minio-standalone-service.yaml │ ├── open-stack-swift-auth.yaml │ ├── pvc.yaml │ ├── references │ │ └── effective-schedule.yaml │ ├── schedule.yaml │ ├── tutorial │ │ ├── backup.yaml │ │ ├── mariadb │ │ │ ├── deployment.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── pvc.yaml │ │ │ └── service.yaml │ │ ├── minio │ │ │ ├── deployment.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── pvc.yaml │ │ │ └── service.yaml │ │ ├── restore │ │ │ ├── wordpress-bucket-to-bucket.yaml │ │ │ └── wordpress.yaml │ │ ├── scripts │ │ │ ├── 1_setup.sh │ │ │ ├── 2_browser.sh │ │ │ ├── 3_backup.sh │ │ │ ├── 4_restore.sh │ │ │ ├── 5_schedule.sh │ │ │ ├── 6_stop.sh │ │ │ └── environment.sh │ │ ├── secrets │ │ │ ├── backup-repo.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── mariadb-pass.yaml │ │ │ └── minio-credentials.yaml │ │ └── wordpress │ │ │ ├── deployment.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── pvc.yaml │ │ │ └── service.yaml │ └── usage │ │ ├── k8up.txt │ │ ├── operator.txt │ │ └── restic.txt │ ├── nav.adoc │ └── pages │ ├── .vale.ini │ ├── about │ ├── code_of_conduct.adoc │ ├── community.adoc │ ├── contribution_guide.adoc │ ├── roadmap.adoc │ ├── visual_design.adoc │ └── vulnerability_reporting.adoc │ ├── explanations │ ├── architecture.adoc │ ├── backup.adoc │ ├── ide.adoc │ ├── missing-docs.adoc │ ├── release.adoc │ ├── rwo.adoc │ ├── system-requirements.adoc │ ├── what-has-changed-in-v1.adoc │ └── what-has-changed-in-v2.adoc │ ├── how-tos │ ├── application-aware-backups.adoc │ ├── archive.adoc │ ├── backup.adoc │ ├── check-status.adoc │ ├── generic-env-vars.adoc │ ├── installation.adoc │ ├── manage-pod-resources.adoc │ ├── optimize-schedules.adoc │ ├── prebackuppod.adoc │ ├── restore.adoc │ ├── schedules.adoc │ └── upgrade.adoc │ ├── index.adoc │ ├── references │ ├── annotations.adoc │ ├── api-reference.adoc │ ├── object-specifications.adoc │ ├── operator-config-reference.adoc │ ├── restic-config-reference.adoc │ ├── schedule-specification.adoc │ └── status.adoc │ └── tutorials │ ├── presentations.adoc │ └── tutorial.adoc ├── e2e ├── .gitignore ├── Makefile ├── definitions │ ├── annotated-subject │ │ ├── deployment-error.yaml │ │ ├── deployment.yaml │ │ └── pod.yaml │ ├── archive │ │ ├── config-mtls-env.yaml │ │ ├── s3-mtls-archive-mtls-env.yaml │ │ ├── s3-mtls-archive-mtls.yaml │ │ ├── s3-mtls-archive-tls.yaml │ │ ├── s3-tls-archive-mtls.yaml │ │ └── s3-tls-archive-tls.yaml │ ├── backup │ │ ├── backup-mtls-env.yaml │ │ ├── backup-mtls.yaml │ │ ├── backup-selectors.yaml │ │ ├── backup-tls.yaml │ │ ├── backup.yaml │ │ ├── config-mtls-env.yaml │ │ └── podconfig.yaml │ ├── cert │ │ ├── issure.yaml │ │ ├── minio-ca.yaml │ │ ├── minio-mtls.yaml │ │ └── minio-tls.yaml │ ├── check │ │ ├── check-mtls-env.yaml │ │ ├── check-mtls.yaml │ │ ├── check-tls.yaml │ │ └── config-mtls-env.yaml │ ├── kind │ │ └── config.yaml │ ├── minio │ │ ├── helm.yaml │ │ └── pvc.yaml │ ├── operator │ │ └── values.yaml │ ├── prebackup │ │ ├── prebackup-match-labels.yaml │ │ └── prebackup-no-labels.yaml │ ├── proxy │ │ ├── config.yaml │ │ ├── deployment.yaml │ │ └── service.yaml │ ├── pv │ │ ├── pvc.yaml │ │ └── pvcs-matching-labels.yaml │ ├── pvc-rwo-subject │ │ ├── controlplane.yaml │ │ └── worker.yaml │ ├── restore │ │ ├── config-mtls-env.yaml │ │ ├── restore-backupcommand.yaml │ │ ├── restore-mtls.yaml │ │ ├── restore-tls.yaml │ │ ├── restore.yaml │ │ ├── s3-mtls-restore-mtls-env.yaml │ │ ├── s3-mtls-restore-mtls.yaml │ │ ├── s3-mtls-restore-tls.yaml │ │ ├── s3-tls-restore-mtls.yaml │ │ └── s3-tls-restore-tls.yaml │ ├── secrets │ │ └── secrets.yaml │ ├── subject-dl │ │ └── deployment.yaml │ └── subject │ │ ├── deployment-pvc-with-labels.yaml │ │ └── deployment.yaml ├── kind.mk ├── lib │ ├── detik.bash │ ├── k8up.bash │ ├── linter.bash │ └── utils.bash ├── package-lock.json ├── package.json ├── test-01-lint.bats ├── test-02-deployment.bats ├── test-03-backup.bats ├── test-04-restore.bats ├── test-05-annotated-backup.bats ├── test-06-pvc-rwo.bats ├── test-07-snapshots.bats.disabled ├── test-08-restore-backupcommand.bats ├── test-09-pod-backupcommand.bats ├── test-10-restore-self-signed-tls.bats ├── test-11-archive-self-signed-tls.bats ├── test-12-annotated-failure.bats ├── test-13-cleanup-empty-jobs.bats └── test-15-backup-with-selectors.bats ├── envtest ├── envsuite.go ├── integration.mk └── rootpath.go ├── exec.sh ├── fetch_restic.sh ├── go.mod ├── go.sum ├── kill.sh ├── operator ├── README.md ├── archivecontroller │ ├── controller.go │ ├── executor.go │ └── setup.go ├── backupcontroller │ ├── backup_utils.go │ ├── backup_utils_test.go │ ├── controller.go │ ├── controller_integration_test.go │ ├── controller_utils_integration_test.go │ ├── executor.go │ ├── executor_test.go │ ├── prebackup.go │ ├── prebackup_utils.go │ └── setup.go ├── cfg │ ├── config.go │ └── config_test.go ├── checkcontroller │ ├── controller.go │ ├── controller_integration_test.go │ ├── controller_utils_integration_test.go │ ├── executor.go │ └── setup.go ├── controllers.go ├── executor │ ├── cleaner │ │ ├── cleaner.go │ │ ├── cleaner_integration_test.go │ │ └── cleaner_test.go │ ├── envvarconverter.go │ ├── envvarconverter_test.go │ └── generic.go ├── job │ ├── job.go │ ├── job_test.go │ └── status.go ├── locker │ ├── locker.go │ └── locker_test.go ├── monitoring │ └── prometheus.go ├── prunecontroller │ ├── controller.go │ ├── executor.go │ └── setup.go ├── reconciler │ └── reconciler.go ├── restorecontroller │ ├── controller.go │ ├── controller_integration_test.go │ ├── executor.go │ ├── executor_test.go │ └── setup.go ├── schedulecontroller │ ├── controller.go │ ├── effectiveschedule.go │ ├── effectiveschedule_test.go │ ├── handler.go │ ├── handler_test.go │ ├── randomizer.go │ ├── randomizer_test.go │ ├── schedule_integration_test.go │ ├── schedule_utils_integration_test.go │ └── setup.go ├── scheduler │ ├── scheduler.go │ └── scheduler_test.go └── utils │ ├── utils.go │ └── utils_test.go ├── renovate.json ├── restic ├── README.md ├── cfg │ └── config.go ├── cli │ ├── archive.go │ ├── backup.go │ ├── check.go │ ├── command.go │ ├── flags.go │ ├── flags_test.go │ ├── init.go │ ├── interfaces.go │ ├── prune.go │ ├── restic.go │ ├── restore.go │ ├── snapshots.go │ ├── stats.go │ ├── stdinbackup.go │ ├── unlock.go │ ├── utils.go │ └── wait.go ├── dto │ └── snapshot.go ├── kubernetes │ ├── config.go │ ├── pod_exec.go │ ├── pod_list.go │ ├── snapshots.go │ └── snapshots_test.go ├── logging │ └── logging.go ├── s3 │ └── client.go └── stats │ └── handler.go └── tools.go /.dockerignore: -------------------------------------------------------------------------------- 1 | .* 2 | * 3 | !k8up 4 | !fetch_restic.sh 5 | !go.mod 6 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | end_of_line = lf 5 | insert_final_newline = true 6 | charset = utf-8 7 | 8 | [Makefile] 9 | indent_style = tab 10 | 11 | [{*.go, go.mod, *.bats, *.bash}] 12 | indent_style = tab 13 | 14 | [{*.yml, *.json, *.css, *.js}] 15 | indent_style = space 16 | indent_size = 2 17 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.yml: -------------------------------------------------------------------------------- 1 | name: 🐛 Bug report 2 | description: Create a report to help us improve 🎉 3 | labels: 4 | - bug 5 | 6 | body: 7 | - type: textarea 8 | id: description 9 | attributes: 10 | label: Description 11 | description: A clear and concise description of what the bug is. 12 | validations: 13 | required: true 14 | - type: textarea 15 | id: context 16 | attributes: 17 | label: Additional Context 18 | description: Add any other context about the problem here. 19 | validations: 20 | required: false 21 | - type: textarea 22 | id: logs 23 | attributes: 24 | label: Logs 25 | description: If applicable, add logs to help explain the bug. 26 | render: shell 27 | validations: 28 | required: false 29 | - type: textarea 30 | id: expected_behavior 31 | attributes: 32 | label: Expected Behavior 33 | description: A clear and concise description of what you expected to happen. 34 | validations: 35 | required: true 36 | - type: textarea 37 | id: reproduction_steps 38 | attributes: 39 | label: Steps To Reproduce 40 | description: Describe steps to reproduce the behavior 41 | placeholder: | 42 | 1. Specs 43 | ```yaml 44 | 45 | ``` 46 | 2. Commands 47 | ```bash 48 | 49 | ``` 50 | validations: 51 | required: false 52 | - type: input 53 | id: k8up_version 54 | attributes: 55 | label: Version of K8up 56 | placeholder: e.g. v2.0.0 57 | validations: 58 | required: true 59 | - type: input 60 | id: k8s_version 61 | attributes: 62 | label: Version of Kubernetes 63 | placeholder: e.g. v1.22 64 | validations: 65 | required: true 66 | - type: input 67 | id: k8s_distribution 68 | attributes: 69 | label: Distribution of Kubernetes 70 | placeholder: e.g. OpenShift, Rancher, … 71 | validations: 72 | required: true 73 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: 3 | - name: ❓ Question 4 | url: https://github.com/k8up-io/k8up/discussions 5 | about: Ask or discuss with us, we're happy to help 🙋 6 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## Summary 2 | 3 | * Short summary of what's included in the PR 4 | * Give special note to breaking changes: List the exact changes or provide links to documentation. 5 | 6 | ## Checklist 7 | 8 | ### For Code changes 9 | 10 | - [ ] Categorize the PR by setting a good title and adding one of the labels: 11 | `bug`, `enhancement`, `documentation`, `change`, `breaking`, `dependency` 12 | as they show up in the changelog 13 | - [ ] PR contains the label `area:operator` 14 | - [ ] Commits are [signed off](https://docs.github.com/en/authentication/managing-commit-signature-verification/signing-commits) 15 | - [ ] Link this PR to related issues 16 | - [ ] I have not made _any_ changes in the `charts/` directory. 17 | 18 | ### For Helm Chart changes 19 | 20 | - [ ] Categorize the PR by setting a good title and adding one of the labels: 21 | `bug`, `enhancement`, `documentation`, `change`, `breaking`, `dependency` 22 | as they show up in the changelog 23 | - [ ] PR contains the label `area:chart` 24 | - [ ] PR contains the chart label, e.g. `chart:k8up` 25 | - [ ] Commits are [signed off](https://docs.github.com/en/authentication/managing-commit-signature-verification/signing-commits) 26 | - [ ] Variables are documented in the values.yaml using the format required by [Helm-Docs](https://github.com/norwoodj/helm-docs#valuesyaml-metadata). 27 | - [ ] Chart Version bumped if immediate release after merging is planned 28 | - [ ] I have run `make chart-docs` 29 | - [ ] Link this PR to related code release or other issues. 30 | 31 | 40 | -------------------------------------------------------------------------------- /.github/boilerplate.go.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/k8up-io/k8up/56163bc155fccd93729233f5824ca7b249bccbcd/.github/boilerplate.go.txt -------------------------------------------------------------------------------- /.github/changelog-charts.json: -------------------------------------------------------------------------------- 1 | { 2 | "pr_template": "- ${{TITLE}} (#${{NUMBER}})", 3 | "categories": [ 4 | { 5 | "title": "## 🚀 Features", 6 | "labels": [ 7 | "enhancement", 8 | "area:chart" 9 | ], 10 | "exhaustive": true 11 | }, 12 | { 13 | "title": "## 🛠️ Minor Changes", 14 | "labels": [ 15 | "change", 16 | "area:chart" 17 | ], 18 | "exhaustive": true 19 | }, 20 | { 21 | "title": "## 🔎 Breaking Changes", 22 | "labels": [ 23 | "breaking", 24 | "area:chart" 25 | ], 26 | "exhaustive": true 27 | }, 28 | { 29 | "title": "## 🐛 Fixes", 30 | "labels": [ 31 | "bug", 32 | "area:chart" 33 | ], 34 | "exhaustive": true 35 | }, 36 | { 37 | "title": "## 📄 Documentation", 38 | "labels": [ 39 | "documentation", 40 | "area:chart" 41 | ], 42 | "exhaustive": true 43 | }, 44 | { 45 | "title": "## 🔗 Dependency Updates", 46 | "labels": [ 47 | "dependency", 48 | "area:chart" 49 | ], 50 | "exhaustive": true 51 | } 52 | ], 53 | "template": "This release contains _only_ Helm chart changes\n\n${{CATEGORIZED_COUNT}} changes since ${{FROM_TAG}}\n\n${{CHANGELOG}}" 54 | } 55 | -------------------------------------------------------------------------------- /.github/changelog-charts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eo pipefail 4 | 5 | chart="${1}" 6 | 7 | tagPattern="${chart}-(.+)" 8 | chartLabel="chart:${chart}" 9 | 10 | echo ::group::Configuring changelog generator 11 | jq '.tag_resolver.filter.pattern="'$tagPattern'" | .tag_resolver.transformer.pattern="'$tagPattern'" | .categories[].labels += ["'$chartLabel'"]' \ 12 | .github/changelog-charts.json | tee .github/configuration.json 13 | echo ::endgroup:: 14 | -------------------------------------------------------------------------------- /.github/changelog-configuration.json: -------------------------------------------------------------------------------- 1 | { 2 | "pr_template": "- ${{TITLE}} (#${{NUMBER}})", 3 | "categories": [ 4 | { 5 | "title": "## 🚀 Features", 6 | "labels": [ 7 | "enhancement", 8 | "area:operator" 9 | ], 10 | "exhaustive": true 11 | }, 12 | { 13 | "title": "## 🛠️ Minor Changes", 14 | "labels": [ 15 | "change", 16 | "area:operator" 17 | ], 18 | "exhaustive": true 19 | }, 20 | { 21 | "title": "## 🔎 Breaking Changes", 22 | "labels": [ 23 | "breaking", 24 | "area:operator" 25 | ], 26 | "exhaustive": true 27 | }, 28 | { 29 | "title": "## 🐛 Fixes", 30 | "labels": [ 31 | "bug", 32 | "area:operator" 33 | ], 34 | "exhaustive": true 35 | }, 36 | { 37 | "title": "## 📄 Documentation", 38 | "labels": [ 39 | "documentation", 40 | "area:operator" 41 | ], 42 | "exhaustive": true 43 | }, 44 | { 45 | "title": "## 🔗 Dependency Updates", 46 | "labels": [ 47 | "dependency", 48 | "area:operator" 49 | ], 50 | "exhaustive": true 51 | } 52 | ], 53 | "template": "${{CATEGORIZED_COUNT}} changes since ${{FROM_TAG}}\n\n${{CHANGELOG}}" 54 | } 55 | -------------------------------------------------------------------------------- /.github/helm-docs-footer.gotmpl.md: -------------------------------------------------------------------------------- 1 | 2 | {{ template "chart.sourcesSection" . }} 3 | 4 | {{ template "chart.requirementsSection" . }} 5 | 6 | 9 | [resource-units]: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes 10 | [prometheus-operator]: https://github.com/coreos/prometheus-operator 11 | -------------------------------------------------------------------------------- /.github/helm-docs-header.gotmpl.md: -------------------------------------------------------------------------------- 1 | {{ template "chart.header" . }} 2 | {{ template "chart.deprecationWarning" . }} 3 | 4 | {{ template "chart.badgesSection" . }} 5 | 6 | {{ template "chart.description" . }} 7 | 8 | {{ template "chart.homepageLine" . }} 9 | 10 | ## Installation 11 | 12 | ```bash 13 | helm repo add k8up-io https://k8up-io.github.io/k8up 14 | helm install {{ template "chart.name" . }} k8up-io/{{ template "chart.name" . }} 15 | ``` 16 | -------------------------------------------------------------------------------- /.github/render-charts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eo pipefail 4 | 5 | chartYaml="${1}" 6 | chartName=$(dirname "${chartYaml}") 7 | 8 | echo "::group::Render chart ${chartName}" 9 | helm template "${chartName}" 10 | echo "::endgroup::" 11 | -------------------------------------------------------------------------------- /.github/workflows/chart-lint.yml: -------------------------------------------------------------------------------- 1 | name: ChartLint 2 | 3 | on: 4 | pull_request: 5 | # only run when there are chart changes 6 | paths: 7 | - 'charts/**' 8 | - '!charts/charts.mk' 9 | - '!charts/go*' 10 | 11 | jobs: 12 | lint: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v4 16 | with: 17 | fetch-depth: '0' 18 | 19 | - name: Determine Go version from go.mod 20 | run: echo "GO_VERSION=$(go mod edit -json | jq -r .Go)" >> $GITHUB_ENV 21 | working-directory: ./charts 22 | 23 | - uses: actions/setup-go@v5 24 | with: 25 | go-version: ${{ env.GO_VERSION }} 26 | 27 | - uses: actions/cache@v4 28 | with: 29 | path: ~/go/pkg/mod 30 | key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} 31 | restore-keys: | 32 | ${{ runner.os }}-go- 33 | - name: Verify charts are upt-do-date 34 | run: make chart-lint 35 | 36 | template: 37 | runs-on: ubuntu-latest 38 | steps: 39 | - uses: actions/checkout@v4 40 | 41 | - name: Render Helm charts 42 | run: find charts -type f -name Chart.yaml -exec .github/render-charts.sh {} \; 43 | -------------------------------------------------------------------------------- /.github/workflows/chart-test.yml: -------------------------------------------------------------------------------- 1 | name: ChartTest 2 | 3 | on: 4 | pull_request: 5 | # only run when there are chart changes 6 | paths: 7 | - 'charts/**' 8 | 9 | jobs: 10 | test: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v4 14 | with: 15 | fetch-depth: '0' 16 | 17 | - name: Determine Go version from go.mod 18 | run: echo "GO_VERSION=$(go mod edit -json | jq -r .Go)" >> $GITHUB_ENV 19 | working-directory: ./charts 20 | 21 | - uses: actions/setup-go@v5 22 | with: 23 | go-version: ${{ env.GO_VERSION }} 24 | 25 | - uses: actions/cache@v4 26 | with: 27 | path: ~/go/pkg/mod 28 | key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} 29 | restore-keys: | 30 | ${{ runner.os }}-go- 31 | 32 | - name: Run chart tests 33 | run: make chart-test 34 | -------------------------------------------------------------------------------- /.github/workflows/docs-lint.yml: -------------------------------------------------------------------------------- 1 | name: Docs Lint 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - 'docs/**' 7 | push: 8 | branches: 9 | - master 10 | paths: 11 | - 'docs/**' 12 | 13 | 14 | jobs: 15 | check: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - uses: actions/checkout@v4 19 | 20 | - name: Determine Go version from go.mod 21 | run: echo "GO_VERSION=$(go mod edit -json | jq -r .Go)" >> $GITHUB_ENV 22 | 23 | - uses: actions/setup-go@v5 24 | with: 25 | go-version: ${{ env.GO_VERSION }} 26 | 27 | - uses: actions/cache@v4 28 | with: 29 | path: ~/go/pkg/mod 30 | key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} 31 | restore-keys: | 32 | ${{ runner.os }}-go- 33 | 34 | - name: Run linters 35 | run: make docs-check 36 | -------------------------------------------------------------------------------- /.github/workflows/e2e.yml: -------------------------------------------------------------------------------- 1 | name: E2E 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - master 7 | paths-ignore: 8 | - 'docs/**' 9 | 10 | jobs: 11 | e2e-test: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v4 15 | 16 | - name: Determine Go version from go.mod 17 | run: echo "GO_VERSION=$(go mod edit -json | jq -r .Go)" >> $GITHUB_ENV 18 | 19 | - uses: actions/setup-go@v5 20 | with: 21 | go-version: ${{ env.GO_VERSION }} 22 | 23 | - uses: actions/cache@v4 24 | with: 25 | path: ~/go/pkg/mod 26 | key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} 27 | restore-keys: | 28 | ${{ runner.os }}-go- 29 | 30 | - name: Run e2e tests 31 | run: make crd e2e-test -e KIND_KUBECTL_ARGS=--validate=false -e bats_args="--report-formatter junit" 32 | 33 | - name: Publish Test Report 34 | uses: mikepenz/action-junit-report@v3 35 | if: success() || failure() 36 | with: 37 | report_paths: '**/e2e/report.xml' 38 | github_token: ${{ secrets.GITHUB_TOKEN }} 39 | check_name: e2e-report 40 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: Lint 2 | 3 | on: 4 | pull_request: 5 | paths-ignore: 6 | - 'docs/**' 7 | push: 8 | branches: 9 | - master 10 | paths-ignore: 11 | - 'docs/**' 12 | 13 | 14 | jobs: 15 | lint: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - uses: actions/checkout@v4 19 | 20 | - name: Determine Go version from go.mod 21 | run: echo "GO_VERSION=$(go mod edit -json | jq -r .Go)" >> $GITHUB_ENV 22 | 23 | - uses: actions/setup-go@v5 24 | with: 25 | go-version: ${{ env.GO_VERSION }} 26 | 27 | - uses: actions/cache@v4 28 | with: 29 | path: ~/go/pkg/mod 30 | key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} 31 | restore-keys: | 32 | ${{ runner.os }}-go- 33 | 34 | - name: Run linters 35 | run: make lint 36 | -------------------------------------------------------------------------------- /.github/workflows/master.yml: -------------------------------------------------------------------------------- 1 | name: Master 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | 8 | jobs: 9 | dist: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | 14 | - name: Determine Go version from go.mod 15 | run: echo "GO_VERSION=$(go mod edit -json | jq -r .Go)" >> $GITHUB_ENV 16 | 17 | - uses: actions/setup-go@v5 18 | with: 19 | go-version: ${{ env.GO_VERSION }} 20 | 21 | - name: Set up QEMU 22 | uses: docker/setup-qemu-action@v2 23 | - name: Set up Docker Buildx 24 | uses: docker/setup-buildx-action@v2 25 | 26 | - uses: actions/cache@v4 27 | with: 28 | path: ~/go/pkg/mod 29 | key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} 30 | restore-keys: | 31 | ${{ runner.os }}-go- 32 | 33 | - name: Build docker images 34 | run: make docker-build -e IMG_TAG=${GITHUB_REF#refs/heads/} 35 | 36 | - name: Login to quay.io 37 | uses: docker/login-action@v3 38 | with: 39 | registry: quay.io 40 | username: ${{ secrets.QUAY_IO_USERNAME }} 41 | password: ${{ secrets.QUAY_IO_PASSWORD }} 42 | 43 | - name: Login to GHCR 44 | uses: docker/login-action@v3 45 | with: 46 | registry: ghcr.io 47 | username: ${{ github.repository_owner }} 48 | password: ${{ secrets.GITHUB_TOKEN }} 49 | 50 | - name: Push docker images 51 | run: make docker-push -e IMG_TAG=${GITHUB_REF#refs/heads/} 52 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - master 7 | paths-ignore: 8 | - charts/** 9 | - docs/** 10 | push: 11 | branches: 12 | - master 13 | paths-ignore: 14 | - charts/** 15 | - docs/** 16 | 17 | jobs: 18 | test: 19 | runs-on: ubuntu-latest 20 | steps: 21 | - uses: actions/checkout@v4 22 | 23 | - name: Determine Go version from go.mod 24 | run: echo "GO_VERSION=$(go mod edit -json | jq -r .Go)" >> $GITHUB_ENV 25 | 26 | - uses: actions/setup-go@v5 27 | with: 28 | go-version: ${{ env.GO_VERSION }} 29 | 30 | - uses: actions/cache@v4 31 | with: 32 | path: ~/go/pkg/mod 33 | key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} 34 | restore-keys: | 35 | ${{ runner.os }}-go- 36 | 37 | - name: Run tests 38 | run: make test 39 | 40 | - name: Run integration tests 41 | run: make integration-test 42 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # IDEs 2 | .idea 3 | .vscode 4 | 5 | # Binaries for programs and plugins 6 | *.exe 7 | *.dll 8 | *.so 9 | *.dylib 10 | __debug_bin 11 | 12 | # Test binary, build with `go test -c` 13 | *.test 14 | 15 | # Output of the go coverage tool, specifically when used with LiteIDE 16 | *.out 17 | coverage/ 18 | 19 | # Antora related 20 | .asciidoctor 21 | .cache 22 | _public/ 23 | _archive/ 24 | tmp/ 25 | 26 | .github/release-notes.md 27 | 28 | k8up-crd*.yaml 29 | 30 | /k8up 31 | 32 | # Go releaser 33 | dist/ 34 | 35 | # Test related 36 | node_modules/ 37 | e2e/debug 38 | .integration-test/ 39 | .e2e-test/ 40 | /.work/ 41 | 42 | # Charts 43 | .cr-release-packages/ 44 | .cr-index/ 45 | 46 | # Vagrant 47 | .vagrant/ 48 | 49 | # Container volumes mount 50 | .config/ 51 | .kube/ 52 | .npm/ 53 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | linters: 3 | exclusions: 4 | generated: lax 5 | presets: 6 | - comments 7 | - common-false-positives 8 | - legacy 9 | - std-error-handling 10 | rules: 11 | - linters: 12 | - staticcheck 13 | text: 'k8upv1.LegacyScheduleFinalizerName is deprecated: Migrate to ScheduleFinalizerName' 14 | paths: 15 | - third_party$ 16 | - builtin$ 17 | - examples$ 18 | formatters: 19 | exclusions: 20 | generated: lax 21 | paths: 22 | - third_party$ 23 | - builtin$ 24 | - examples$ 25 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @k8up-io/maintainer 2 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Community Code of Conduct 2 | 3 | K8up observes the [CNCF Community Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). 4 | 5 | The code of conduct is overseen by the K8up project maintainers. 6 | Possible code of conduct violations should be emailed to the project maintainers cncf-k8up-maintainers@lists.cncf.io. 7 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM docker.io/library/alpine:3.20 as restic 2 | 3 | RUN apk add --update --no-cache \ 4 | bash \ 5 | ca-certificates \ 6 | curl 7 | 8 | COPY go.mod fetch_restic.sh ./ 9 | RUN ./fetch_restic.sh /usr/local/bin/restic \ 10 | && /usr/local/bin/restic version 11 | 12 | FROM docker.io/library/alpine:3.20 as k8up 13 | 14 | ENTRYPOINT ["k8up"] 15 | 16 | RUN mkdir /.cache && chmod -R g=u /.cache 17 | 18 | RUN apk add --update --no-cache \ 19 | bash \ 20 | ca-certificates \ 21 | curl \ 22 | fuse \ 23 | openssh-client \ 24 | tzdata 25 | 26 | ENV RESTIC_BINARY=/usr/local/bin/restic 27 | 28 | COPY --from=restic /usr/local/bin/restic $RESTIC_BINARY 29 | COPY k8up /usr/local/bin/ 30 | 31 | RUN chmod a+x /usr/local/bin/k8up 32 | RUN $RESTIC_BINARY version 33 | 34 | USER 65532 35 | -------------------------------------------------------------------------------- /Makefile.restic-integration.vars.mk: -------------------------------------------------------------------------------- 1 | arch ?= amd64 2 | 3 | ifeq ("$(shell uname -s)", "Darwin") 4 | os ?= darwin 5 | else 6 | os ?= linux 7 | endif 8 | 9 | curl_args ?= --location --fail --silent --show-error 10 | 11 | backup_dir ?= $(integrationtest_dir)/backup 12 | restore_dir ?= $(integrationtest_dir)/restore 13 | 14 | stats_url ?= http://localhost:8091 15 | 16 | restic_version ?= $(shell $(GO_EXEC) mod edit -json | jq -r '.Require[] | select(.Path == "github.com/restic/restic").Version' | sed "s/v//") 17 | restic_path ?= $(go_bin)/restic 18 | restic_pid ?= $(integrationtest_dir)/restic.pid 19 | restic_url ?= https://github.com/restic/restic/releases/download/v$(restic_version)/restic_$(restic_version)_$(os)_$(arch).bz2 20 | restic_password ?= repopw 21 | 22 | minio_port ?= 9000 23 | minio_host ?= localhost 24 | minio_address = $(minio_host):$(minio_port) 25 | minio_path ?= $(go_bin)/minio 26 | minio_data ?= $(integrationtest_dir)/minio.d/data 27 | minio_config ?= $(integrationtest_dir)/minio.d/config 28 | minio_root_user ?= accesskey 29 | minio_root_password ?= secretkey 30 | minio_pid ?= $(integrationtest_dir)/minio.pid 31 | minio_url ?= https://dl.min.io/server/minio/release/$(os)-$(arch)/minio 32 | -------------------------------------------------------------------------------- /Makefile.vars.mk: -------------------------------------------------------------------------------- 1 | IMG_TAG ?= latest 2 | 3 | GO_EXEC ?= go 4 | K8UP_MAIN_GO ?= cmd/k8up/main.go 5 | K8UP_GOOS ?= linux 6 | K8UP_GOARCH ?= amd64 7 | 8 | CURDIR ?= $(shell pwd) 9 | BIN_FILENAME ?= $(CURDIR)/$(PROJECT_ROOT_DIR)/k8up 10 | WORK_DIR = $(CURDIR)/.work 11 | 12 | integrationtest_dir ?= $(CURDIR)/$(PROJECT_ROOT_DIR)/.integration-test 13 | e2etest_dir ?= $(CURDIR)/$(PROJECT_ROOT_DIR)/.e2e-test 14 | 15 | go_bin ?= $(PWD)/.work/bin 16 | $(go_bin): 17 | @mkdir -p $@ 18 | 19 | golangci_bin = $(go_bin)/golangci-lint 20 | 21 | CRD_FILE ?= k8up-crd.yaml 22 | CRD_ROOT_DIR ?= config/crd/apiextensions.k8s.io 23 | CRD_DOCS_REF_PATH ?= docs/modules/ROOT/pages/references/api-reference.adoc 24 | 25 | SAMPLES_ROOT_DIR ?= config/samples 26 | minio_sentinel = $(e2etest_dir)/minio_sentinel 27 | 28 | KIND_NODE_VERSION ?= v1.26.6 29 | KIND_KUBECONFIG ?= $(e2etest_dir)/kind-kubeconfig-$(KIND_NODE_VERSION) 30 | KIND_CLUSTER ?= k8up-$(KIND_NODE_VERSION) 31 | KIND_KUBECTL_ARGS ?= --validate=true 32 | 33 | ENABLE_LEADER_ELECTION ?= false 34 | 35 | E2E_TAG ?= e2e 36 | E2E_REGISTRY = local.dev 37 | E2E_REPO ?= k8up-io/k8up 38 | K8UP_E2E_IMG = $(E2E_REGISTRY)/$(E2E_REPO):$(E2E_TAG) 39 | 40 | BATS_FILES ?= . 41 | 42 | # Image URL to use all building/pushing image targets 43 | K8UP_GHCR_IMG ?= ghcr.io/k8up-io/k8up:$(IMG_TAG) 44 | K8UP_QUAY_IMG ?= quay.io/k8up-io/k8up:$(IMG_TAG) 45 | 46 | # Operator Integration Test 47 | ENVTEST_ADDITIONAL_FLAGS ?= --bin-dir "$(go_bin)" 48 | INTEGRATION_TEST_DEBUG_OUTPUT ?= false 49 | # See https://storage.googleapis.com/kubebuilder-tools/ for list of supported K8s versions 50 | ENVTEST_K8S_VERSION = 1.26.x 51 | -------------------------------------------------------------------------------- /OWNERS.md: -------------------------------------------------------------------------------- 1 | # K8up Maintainers 2 | 3 | This page lists all active members of the steering committee, as well as maintainers and reviewers for **this** repository. Each repository in the 4 | [K8up organization](https://github.com/k8up-io/) will list their repository maintainers and reviewers in their own `OWNERS.md` file. 5 | 6 | Please see [GOVERNANCE.md](GOVERNANCE.md) for governance guidelines and responsibilities for the steering committee, maintainers, and reviewers. 7 | 8 | See [CODEOWNERS](CODEOWNERS) for automatic PR assignment. 9 | 10 | ## Steering Committee 11 | 12 | This will be built up when time comes. 13 | 14 | ## Maintainers 15 | 16 | * Tobias Brunner ([tobru](https://github.com/tobru)) 17 | * Simon Beck ([Kidswiss](https://github.com/Kidswiss)) 18 | * Nicolas Bigler ([TheBigLee](https://github.com/TheBigLee)) 19 | * Łukasz Widera ([wejdross](https://github.com/wejdross)) 20 | * Gabriel Saratura ([zugao](https://github.com/zugao)) 21 | 22 | We also document the list of maintainers in the [GitHub team "Maintainer"](https://github.com/orgs/k8up-io/teams/maintainer/members). 23 | 24 | ## Reviewers 25 | 26 | We currently do not have nominated reviewers, we'll build them up when time comes. 27 | 28 | ## Emeritus maintainers 29 | 30 | As of today, we don't have any emeritus maintainers yet. 31 | -------------------------------------------------------------------------------- /api/v1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | // Package v1 contains API Schema definitions for the k8up v1 API group 2 | // +kubebuilder:object:generate=true 3 | // +groupName=k8up.io 4 | 5 | package v1 6 | 7 | import ( 8 | "k8s.io/apimachinery/pkg/runtime/schema" 9 | "sigs.k8s.io/controller-runtime/pkg/scheme" 10 | ) 11 | 12 | var ( 13 | // GroupVersion is group version used to register these objects 14 | GroupVersion = schema.GroupVersion{Group: "k8up.io", Version: "v1"} 15 | 16 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 17 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 18 | 19 | // AddToScheme adds the types in this group-version to the given scheme. 20 | AddToScheme = SchemeBuilder.AddToScheme 21 | ) 22 | -------------------------------------------------------------------------------- /api/v1/job_object.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "context" 5 | 6 | corev1 "k8s.io/api/core/v1" 7 | "sigs.k8s.io/controller-runtime/pkg/client" 8 | ) 9 | 10 | // +k8s:deepcopy-gen=false 11 | 12 | // JobObject is an interface that must be implemented by all CRDs that implement a job. 13 | type JobObject interface { 14 | client.Object 15 | GetStatus() Status 16 | SetStatus(s Status) 17 | GetType() JobType 18 | // GetResources returns the specified resource requirements 19 | GetResources() corev1.ResourceRequirements 20 | // GetPodSecurityContext returns the specified pod security context 21 | GetPodSecurityContext() *corev1.PodSecurityContext 22 | // GetActiveDeadlineSeconds returns the specified active deadline seconds timeout. 23 | GetActiveDeadlineSeconds() *int64 24 | // GetPodConfig returns the defined PodSpec 25 | GetPodConfig(context.Context, client.Client) (*PodConfig, error) 26 | } 27 | 28 | // +k8s:deepcopy-gen=false 29 | 30 | // JobObjectList is a sortable list of job objects 31 | type JobObjectList []JobObject 32 | 33 | func (jo JobObjectList) Len() int { return len(jo) } 34 | func (jo JobObjectList) Swap(i, j int) { jo[i], jo[j] = jo[j], jo[i] } 35 | 36 | func (jo JobObjectList) Less(i, j int) bool { 37 | if jo[i].GetCreationTimestamp().Time.Equal(jo[j].GetCreationTimestamp().Time) { 38 | return jo[i].GetName() < jo[j].GetName() 39 | } 40 | return jo[i].GetCreationTimestamp().Time.Before(jo[j].GetCreationTimestamp().Time) 41 | } 42 | -------------------------------------------------------------------------------- /api/v1/podconfig_types.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "context" 5 | 6 | corev1 "k8s.io/api/core/v1" 7 | apierrors "k8s.io/apimachinery/pkg/api/errors" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | "sigs.k8s.io/controller-runtime/pkg/client" 10 | ) 11 | 12 | // +kubebuilder:rbac:groups=k8up.io,resources=podconfigs,verbs=get;list;watch 13 | 14 | // PodConfigSpec contains the podTemplate definition. 15 | type PodConfigSpec struct { 16 | Template corev1.PodTemplateSpec `json:"template,omitempty"` 17 | } 18 | 19 | // PodConfigStatus defines the observed state of Snapshot 20 | type PodConfigStatus struct { 21 | } 22 | 23 | // +kubebuilder:object:root=true 24 | // +kubebuilder:subresource:status 25 | 26 | // PodConfig is the Schema for the PodConcig API 27 | // Any annotations and labels set on this object will also be set on 28 | // the final pod. 29 | type PodConfig struct { 30 | metav1.TypeMeta `json:",inline"` 31 | metav1.ObjectMeta `json:"metadata,omitempty"` 32 | 33 | Spec PodConfigSpec `json:"spec,omitempty"` 34 | Status PodConfigStatus `json:"status,omitempty"` 35 | } 36 | 37 | // +kubebuilder:object:root=true 38 | 39 | // SnapshotList contains a list of Snapshot 40 | type PodConfigList struct { 41 | metav1.TypeMeta `json:",inline"` 42 | metav1.ListMeta `json:"metadata,omitempty"` 43 | Items []PodConfig `json:"items"` 44 | } 45 | 46 | func NewPodConfig(ctx context.Context, name, namespace string, c client.Client) (*PodConfig, error) { 47 | config := &PodConfig{} 48 | err := c.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, config) 49 | if err != nil { 50 | if apierrors.IsNotFound(err) { 51 | return nil, nil 52 | } 53 | return nil, err 54 | } 55 | return config, nil 56 | } 57 | 58 | func init() { 59 | SchemeBuilder.Register(&PodConfig{}, &PodConfigList{}) 60 | } 61 | -------------------------------------------------------------------------------- /api/v1/prebackuppod_types.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | corev1 "k8s.io/api/core/v1" 5 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | ) 7 | 8 | // PreBackupPodSpec define pods that will be launched during the backup. After the backup 9 | // has finished (successfully or not), they should be removed again automatically 10 | // by the operator. 11 | type PreBackupPodSpec struct { 12 | // BackupCommand will be added to the backupcommand annotation on the pod. 13 | BackupCommand string `json:"backupCommand,omitempty"` 14 | FileExtension string `json:"fileExtension,omitempty"` 15 | // +kubebuilder:validation:Required 16 | Pod *Pod `json:"pod,omitempty"` 17 | } 18 | 19 | // Pod is a dummy struct to fix some code generation issues. 20 | type Pod struct { 21 | corev1.PodTemplateSpec `json:",inline"` 22 | } 23 | 24 | // +kubebuilder:object:root=true 25 | // +kubebuilder:subresource:status 26 | 27 | // PreBackupPod is the Schema for the prebackuppods API 28 | type PreBackupPod struct { 29 | metav1.TypeMeta `json:",inline"` 30 | metav1.ObjectMeta `json:"metadata,omitempty"` 31 | 32 | Spec PreBackupPodSpec `json:"spec,omitempty"` 33 | } 34 | 35 | // +kubebuilder:object:root=true 36 | 37 | // PreBackupPodList contains a list of PreBackupPod 38 | type PreBackupPodList struct { 39 | metav1.TypeMeta `json:",inline"` 40 | metav1.ListMeta `json:"metadata,omitempty"` 41 | Items []PreBackupPod `json:"items"` 42 | } 43 | 44 | func init() { 45 | SchemeBuilder.Register(&PreBackupPod{}, &PreBackupPodList{}) 46 | } 47 | -------------------------------------------------------------------------------- /api/v1/snapshot_types.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 5 | ) 6 | 7 | // +kubebuilder:rbac:groups=k8up.io,resources=snapshots,verbs=get;list;watch;create;update;patch;delete 8 | // +kubebuilder:rbac:groups=k8up.io,resources=snapshots/status;snapshots/finalizers,verbs=get;update;patch 9 | 10 | // SnapshotSpec contains all information needed about a restic snapshot so it 11 | // can be restored. 12 | type SnapshotSpec struct { 13 | ID *string `json:"id,omitempty"` 14 | Date *metav1.Time `json:"date,omitempty"` 15 | Paths *[]string `json:"paths,omitempty"` 16 | Repository *string `json:"repository,omitempty"` 17 | } 18 | 19 | // SnapshotStatus defines the observed state of Snapshot 20 | type SnapshotStatus struct { 21 | } 22 | 23 | // +kubebuilder:object:root=true 24 | // +kubebuilder:subresource:status 25 | // +kubebuilder:printcolumn:name="Date taken",type="string",JSONPath=`.spec.date`,description="Date when snapshot was taken" 26 | // +kubebuilder:printcolumn:name="Paths",type="string",JSONPath=`.spec.paths[*]`,description="Snapshot's paths" 27 | // +kubebuilder:printcolumn:name="Repository",type="string",JSONPath=`.spec.repository`,description="Repository Url" 28 | 29 | // Snapshot is the Schema for the snapshots API 30 | type Snapshot struct { 31 | metav1.TypeMeta `json:",inline"` 32 | metav1.ObjectMeta `json:"metadata,omitempty"` 33 | 34 | Spec SnapshotSpec `json:"spec,omitempty"` 35 | Status SnapshotStatus `json:"status,omitempty"` 36 | } 37 | 38 | // +kubebuilder:object:root=true 39 | 40 | // SnapshotList contains a list of Snapshot 41 | type SnapshotList struct { 42 | metav1.TypeMeta `json:",inline"` 43 | metav1.ListMeta `json:"metadata,omitempty"` 44 | Items []Snapshot `json:"items"` 45 | } 46 | 47 | func init() { 48 | SchemeBuilder.Register(&Snapshot{}, &SnapshotList{}) 49 | } 50 | -------------------------------------------------------------------------------- /charts/charts.mk: -------------------------------------------------------------------------------- 1 | helm_docs_bin := $(WORK_DIR)/helm-docs 2 | 3 | # Prepare binary 4 | # We need to set the Go arch since the binary is meant for the user's OS. 5 | $(helm_docs_bin): export GOOS = $(shell go env GOOS) 6 | $(helm_docs_bin): export GOARCH = $(shell go env GOARCH) 7 | $(helm_docs_bin): 8 | @mkdir -p $(WORK_DIR) 9 | cd charts && go build -o $@ github.com/norwoodj/helm-docs/cmd/helm-docs 10 | 11 | # This executes `make clean prepare` for every dir found in charts/ that has a Makefile. 12 | .PHONY: chart-prepare 13 | chart-prepare: release-prepare ## Prepare the Helm charts 14 | @find charts -type f -name Makefile | sed 's|/[^/]*$$||' | xargs -I '%' make -C '%' clean prepare 15 | 16 | .PHONY: chart-docs 17 | chart-docs: $(helm_docs_bin) ## Creates the Chart READMEs from template and values.yaml files 18 | @$(helm_docs_bin) \ 19 | --template-files ./.github/helm-docs-header.gotmpl.md \ 20 | --template-files README.gotmpl.md \ 21 | --template-files ./.github/helm-docs-footer.gotmpl.md 22 | 23 | .PHONY: chart-lint 24 | chart-lint: chart-prepare chart-docs ## Lint charts 25 | @echo 'Check for uncommitted changes ...' 26 | git diff --exit-code 27 | 28 | .PHONY: chart-test 29 | chart-test: ## Run unit tests for charts 30 | cd charts && go test ./... 31 | -------------------------------------------------------------------------------- /charts/k8up/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | 23 | *kustomize*/ 24 | Makefile 25 | *gotmpl* 26 | test/ 27 | -------------------------------------------------------------------------------- /charts/k8up/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | description: Kubernetes and OpenShift Backup Operator based on restic 3 | name: k8up 4 | home: https://k8up.io/ 5 | keywords: 6 | - backup 7 | - operator 8 | - restic 9 | version: 4.8.4 10 | sources: 11 | - https://github.com/k8up-io/k8up 12 | maintainers: 13 | - name: K8up Authors 14 | email: info@appuio.ch 15 | -------------------------------------------------------------------------------- /charts/k8up/Makefile: -------------------------------------------------------------------------------- 1 | MAKEFLAGS += --warn-undefined-variables 2 | SHELL := bash 3 | .SHELLFLAGS := -eu -o pipefail -c 4 | .DEFAULT_GOAL := help 5 | .DELETE_ON_ERROR: 6 | .SUFFIXES: 7 | 8 | rbac_gen_src = ../../config/rbac/role.yaml 9 | rbac_gen_tgt = templates/operator-clusterrole.yaml 10 | 11 | ifeq ($(shell uname -s),Darwin) 12 | sed := gsed -i 13 | else 14 | sed := sed -i 15 | endif 16 | 17 | $(rbac_gen_tgt): 18 | @cp $(rbac_gen_src) $@ 19 | @yq -i e '.metadata.name="{{ include \"k8up.fullname\" . }}-manager", del(.metadata.creationTimestamp)' $@ 20 | @yq -i e '.metadata.labels.replace="LABELS"' $@ 21 | @$(sed) -e 's/replace: LABELS/{{- include "k8up.labels" . | nindent 4 }}/g' $@ 22 | @$(sed) -e '1s/^/{{- if .Values.rbac.create -}}\n/' $@ 23 | @$(sed) -e '$$a{{- end -}}' $@ 24 | 25 | .PHONY: prepare 26 | prepare: $(rbac_gen_tgt) ## Helmify generated artifacts 27 | 28 | .PHONY: clean 29 | clean: ## Clean generated artifacts 30 | rm -rf $(rbac_gen_tgt) 31 | -------------------------------------------------------------------------------- /charts/k8up/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | ##################### 2 | ! Attention ! 3 | ##################### 4 | 5 | This Helm chart does not include CRDs. 6 | Please make sure you have installed or upgraded the necessary CRDs as instructed in the Chart README. 7 | 8 | ##################### 9 | -------------------------------------------------------------------------------- /charts/k8up/templates/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.serviceAccount.create .Values.rbac.create -}} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: {{ include "k8up.fullname" . }} 6 | labels: 7 | {{- include "k8up.labels" . | nindent 4 }} 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: ClusterRole 11 | name: {{ include "k8up.fullname" . }}-manager 12 | subjects: 13 | - kind: ServiceAccount 14 | name: {{ include "k8up.serviceAccountName" . }} 15 | namespace: {{ .Release.Namespace }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /charts/k8up/templates/executor-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create -}} 2 | --- 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRole 5 | metadata: 6 | name: 'k8up-executor' 7 | labels: 8 | {{- include "k8up.labels" . | nindent 4 }} 9 | rules: 10 | - apiGroups: 11 | - "" 12 | resources: 13 | - pods 14 | verbs: 15 | - get 16 | - list 17 | - apiGroups: 18 | - "" 19 | resources: 20 | - pods/exec 21 | verbs: 22 | - create 23 | - apiGroups: 24 | - k8up.io 25 | resources: 26 | - snapshots 27 | verbs: 28 | - create 29 | - delete 30 | - get 31 | - list 32 | - patch 33 | - update 34 | - watch 35 | {{- end -}} 36 | -------------------------------------------------------------------------------- /charts/k8up/templates/prometheus/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.metrics.serviceMonitor.enabled -}} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ include "k8up.fullname" . }}-monitor 6 | namespace: {{ default .Release.Namespace .Values.metrics.serviceMonitor.namespace }} 7 | labels: 8 | {{- include "k8up.labels" . | nindent 4 }} 9 | {{- with .Values.metrics.serviceMonitor.additionalLabels }} 10 | {{- toYaml . | nindent 4 }} 11 | {{- end }} 12 | spec: 13 | endpoints: 14 | - port: http 15 | interval: {{ .Values.metrics.serviceMonitor.scrapeInterval }} 16 | selector: 17 | matchLabels: 18 | {{- include "k8up.selectorLabels" . | nindent 6 }} 19 | {{- if .Values.metrics.serviceMonitor.namespace }} 20 | namespaceSelector: 21 | matchNames: 22 | - {{ .Release.Namespace }} 23 | {{- end }} 24 | {{- end }} 25 | -------------------------------------------------------------------------------- /charts/k8up/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "k8up.fullname" . }}-metrics 5 | labels: 6 | {{- include "k8up.labels" . | nindent 4 }} 7 | {{- with .Values.metrics.service.annotations }} 8 | annotations: 9 | {{- toYaml . | nindent 4 }} 10 | {{- end }} 11 | spec: 12 | type: {{ .Values.metrics.service.type }} 13 | {{- if .Values.metrics.service.ipFamilyPolicy }} 14 | ipFamilyPolicy: {{ .Values.metrics.service.ipFamilyPolicy }} 15 | {{- end }} 16 | {{- if .Values.metrics.service.ipFamilies }} 17 | ipFamilies: {{ .Values.metrics.service.ipFamilies | toYaml | nindent 2 }} 18 | {{- end }} 19 | ports: 20 | - name: http 21 | port: {{ .Values.metrics.service.port }} 22 | targetPort: http 23 | {{- if eq .Values.metrics.service.type "NodePort" }} 24 | nodePort: {{ .Values.metrics.service.nodePort }} 25 | {{- end }} 26 | selector: 27 | {{- include "k8up.selectorLabels" . | nindent 4 }} 28 | -------------------------------------------------------------------------------- /charts/k8up/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ template "k8up.serviceAccountName" . }} 6 | labels: 7 | {{ include "k8up.labels" . | indent 4 }} 8 | {{- with .Values.serviceAccount.annotations }} 9 | annotations: 10 | {{- toYaml . | nindent 4 }} 11 | {{- end }} 12 | 13 | {{- end -}} 14 | -------------------------------------------------------------------------------- /charts/k8up/templates/user-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create -}} 2 | --- 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRole 5 | metadata: 6 | labels: 7 | rbac.authorization.k8s.io/aggregate-to-admin: "true" 8 | rbac.authorization.k8s.io/aggregate-to-edit: "true" 9 | {{- include "k8up.staticLabels" . | nindent 4 }} 10 | name: {{ include "k8up.fullname" . }}-edit 11 | rules: 12 | - apiGroups: 13 | - k8up.io 14 | resources: 15 | - '*' 16 | verbs: 17 | - create 18 | - delete 19 | - get 20 | - list 21 | - patch 22 | - update 23 | - watch 24 | --- 25 | apiVersion: rbac.authorization.k8s.io/v1 26 | kind: ClusterRole 27 | metadata: 28 | labels: 29 | rbac.authorization.k8s.io/aggregate-to-view: "true" 30 | {{- include "k8up.staticLabels" . | nindent 4 }} 31 | name: {{ include "k8up.fullname" . }}-view 32 | rules: 33 | - apiGroups: 34 | - k8up.io 35 | resources: 36 | - '*' 37 | verbs: 38 | - get 39 | - list 40 | - watch 41 | {{- end -}} 42 | -------------------------------------------------------------------------------- /charts/k8up/test/main_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | var ( 4 | helmChartPath = ".." 5 | releaseName = "test-release" 6 | chartName = "k8up" 7 | ) 8 | -------------------------------------------------------------------------------- /charts/k8up/test/operator-clusterrole_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "strings" 5 | "testing" 6 | 7 | "github.com/gruntwork-io/terratest/modules/helm" 8 | "github.com/stretchr/testify/assert" 9 | "github.com/stretchr/testify/require" 10 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 11 | ) 12 | 13 | var ( 14 | tplRbac = []string{"templates/operator-clusterrole.yaml"} 15 | ) 16 | 17 | func Test_RBAC_GivenDefaultSetting_WhenRenderTemplate_ThenRenderRbacWithReplacedValues(t *testing.T) { 18 | options := &helm.Options{ 19 | //Logger: logger.Discard, 20 | } 21 | 22 | output := helm.RenderTemplate(t, options, helmChartPath, releaseName, tplRbac) 23 | 24 | docs := strings.Split(output, "\n---\n") 25 | assert.Len(t, docs, 1, "resources in file") 26 | 27 | for _, doc := range docs { 28 | obj := unstructured.Unstructured{} 29 | helm.UnmarshalK8SYaml(t, doc, &obj.Object) 30 | labels := obj.GetLabels() 31 | assert.Contains(t, labels, "app.kubernetes.io/name") 32 | assert.Contains(t, labels, "app.kubernetes.io/instance") 33 | assert.Contains(t, labels, "app.kubernetes.io/managed-by") 34 | assert.NotContains(t, labels, "app.kubernetes.io/version") 35 | 36 | name := obj.GetName() 37 | assert.Contains(t, name, strings.Join([]string{releaseName, chartName}, "-")) 38 | 39 | } 40 | } 41 | 42 | func Test_RBAC_GivenRbacDisabled_WhenRenderTemplate_ThenDontRenderRbacRules(t *testing.T) { 43 | options := &helm.Options{ 44 | SetValues: map[string]string{ 45 | "rbac.create": "false", 46 | }, 47 | } 48 | 49 | _, err := helm.RenderTemplateE(t, options, helmChartPath, releaseName, tplRbac) 50 | require.Error(t, err) 51 | } 52 | -------------------------------------------------------------------------------- /charts/k8up/test/prometheus/prometheus_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | var ( 4 | helmChartPath = "../.." 5 | releaseName = "test-release" 6 | ) 7 | -------------------------------------------------------------------------------- /charts/k8up/test/prometheus/testdata/custom_rules.yaml: -------------------------------------------------------------------------------- 1 | metrics: 2 | prometheusRule: 3 | enabled: true 4 | additionalRules: 5 | - alert: MyCustomRule 6 | expr: metric > 0 7 | for: 1m 8 | labels: 9 | severity: critical 10 | annotations: 11 | summary: Summary 12 | description: Description 13 | -------------------------------------------------------------------------------- /charts/k8up/test/prometheus/testdata/labels.yaml: -------------------------------------------------------------------------------- 1 | metrics: 2 | serviceMonitor: 3 | enabled: true 4 | additionalLabels: 5 | my-custom-label: my-value 6 | prometheusRule: 7 | enabled: true 8 | additionalLabels: 9 | my-custom-label: my-value 10 | -------------------------------------------------------------------------------- /charts/k8up/test/serviceaccount_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | "github.com/stretchr/testify/require" 8 | corev1 "k8s.io/api/core/v1" 9 | 10 | "github.com/gruntwork-io/terratest/modules/helm" 11 | ) 12 | 13 | var ( 14 | tplServiceAccount = []string{"templates/serviceaccount.yaml"} 15 | ) 16 | 17 | func Test_ServiceAccount_ShouldNotRender_IfDisabled(t *testing.T) { 18 | options := &helm.Options{ 19 | SetValues: map[string]string{ 20 | "serviceAccount.create": "false", 21 | }, 22 | } 23 | 24 | renderServiceAccount(t, options, true) 25 | 26 | } 27 | 28 | func Test_ServiceAccount_ShouldRender_ByDefault(t *testing.T) { 29 | want := releaseName + "-k8up" 30 | options := &helm.Options{} 31 | 32 | sa := renderServiceAccount(t, options, false) 33 | assert.Equal(t, want, sa.Name, "ServiceAccount does use configured name") 34 | } 35 | 36 | func Test_ServiceAccount_ShouldRender_CustomName(t *testing.T) { 37 | want := "test" 38 | options := &helm.Options{ 39 | SetValues: map[string]string{ 40 | "serviceAccount.name": want, 41 | }, 42 | } 43 | 44 | sa := renderServiceAccount(t, options, false) 45 | 46 | assert.Equal(t, want, sa.Name, "ServiceAccount does use configured name") 47 | } 48 | 49 | func renderServiceAccount(t *testing.T, options *helm.Options, wantErr bool) *corev1.ServiceAccount { 50 | output, err := helm.RenderTemplateE(t, options, helmChartPath, releaseName, tplServiceAccount) 51 | if wantErr { 52 | require.Error(t, err) 53 | return nil 54 | } 55 | require.NoError(t, err) 56 | sa := corev1.ServiceAccount{} 57 | helm.UnmarshalK8SYaml(t, output, &sa) 58 | return &sa 59 | } 60 | -------------------------------------------------------------------------------- /charts/k8up/test/testdata/deployment_1.yaml: -------------------------------------------------------------------------------- 1 | k8up: 2 | envVars: 3 | - name: VARIABLE 4 | value: VALUE 5 | 6 | affinity: 7 | nodeAffinity: 8 | requiredDuringSchedulingIgnoredDuringExecution: 9 | nodeSelectorTerms: 10 | - matchExpressions: 11 | - key: kubernetes.io/hostname 12 | operator: In 13 | values: 14 | - host 15 | -------------------------------------------------------------------------------- /charts/tools.go: -------------------------------------------------------------------------------- 1 | //go:build tools 2 | 3 | // Package tools is a place to put any tooling dependencies as imports. 4 | // Go modules will be forced to download and install them. 5 | package tools 6 | 7 | import ( 8 | // helm-docs 9 | _ "github.com/norwoodj/helm-docs/cmd/helm-docs" 10 | ) 11 | -------------------------------------------------------------------------------- /clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # checks whether the PID in the given file exists 4 | 5 | 6 | pidfile_exists() { 7 | test -f "${1}" 8 | return $? 9 | } 10 | 11 | pid_alive() { 12 | if ps --help 2>&1 | grep -q BusyBox; then 13 | xargs ps p >/dev/null < "${1}" 14 | else 15 | xargs ps -p >/dev/null < "${1}" 16 | fi 17 | 18 | return $? 19 | } 20 | 21 | if ! pidfile_exists "${1}"; then 22 | exit 0 23 | fi 24 | 25 | if ! pid_alive "${1}"; then 26 | rm "${1}" 27 | fi 28 | -------------------------------------------------------------------------------- /cli/restore/helpers.go: -------------------------------------------------------------------------------- 1 | package restore 2 | 3 | import ( 4 | "math/rand" 5 | "time" 6 | ) 7 | 8 | func RandomStringGenerator(n int) string { 9 | var characters = []rune("abcdefghijklmnopqrstuvwxyz1234567890") 10 | rand.New(rand.NewSource(time.Now().UnixNano())) 11 | b := make([]rune, n) 12 | for i := range b { 13 | b[i] = characters[rand.Intn(len(characters))] 14 | } 15 | return string(b) 16 | } 17 | -------------------------------------------------------------------------------- /cli/restore/restore.go: -------------------------------------------------------------------------------- 1 | package restore 2 | 3 | var ( 4 | Cfg = &RestoreConfig{} 5 | ) 6 | 7 | type RestoreConfig struct { 8 | // spec.restoreMethod.folder.claimName 9 | ClaimName string 10 | Kubeconfig string 11 | Namespace string 12 | // metadata.name 13 | RestoreName string 14 | // spec.podSecurityContext.runAsUser 15 | RunAsUser int64 16 | // one of restore methods s3 || pvc 17 | RestoreMethod string 18 | // spec.snapshot 19 | Snapshot string 20 | // spec.backend.repoPasswordSecretRef.name 21 | SecretRef string 22 | // spec.backend.repoPasswordSecretRef.key 23 | SecretRefKey string 24 | 25 | // spec.backend.s3.endpoint 26 | S3Endpoint string 27 | // spec.backend.s3.bucket 28 | S3Bucket string 29 | // spec.backend.s3.accessKeyIDSecretRef.name && spec.backend.s3.secretAccessKeySecretRef.name 30 | S3SecretRef string 31 | // spec.backend.s3.accessKeyIDSecretRef.key 32 | S3SecretRefUsernameKey string 33 | // spec.backend.s3.secretAccessKeySecretRef.key 34 | S3SecretRefPasswordKey string 35 | 36 | // spec.restoreMethod.s3.endpoint 37 | RestoreToS3Endpoint string 38 | // spec.restoreMethod.s3.bucket 39 | RestoreToS3Bucket string 40 | // spec.restoreMethod.s3.accessKeyIDSecretRef.name && spec.restoreMethod.s3.secretAccessKeySecretRef.name 41 | RestoreToS3Secret string 42 | // spec.restoreMethod.s3.accessKeyIDSecretRef.name 43 | RestoreToS3SecretUsernameKey string 44 | // spec.restoreMethod.s3.secretAccessKeySecretRef.name 45 | RestoreToS3SecretPasswordKey string 46 | } 47 | -------------------------------------------------------------------------------- /cmd/logger.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/go-logr/logr" 5 | "github.com/urfave/cli/v2" 6 | ) 7 | 8 | const loggerMetadataKeyName = "logger" 9 | 10 | // AppLogger retrieves the application-wide logger instance from the cli.Context's Metadata. 11 | // This function will return nil if SetAppLogger was not called before this function is called. 12 | func AppLogger(c *cli.Context) logr.Logger { 13 | return c.App.Metadata[loggerMetadataKeyName].(logr.Logger) 14 | } 15 | 16 | // SetAppLogger stores the application-wide logger instance to the cli.Context's Metadata, 17 | // so that it can later be retrieved by AppLogger. 18 | func SetAppLogger(c *cli.Context, logger logr.Logger) { 19 | c.App.Metadata[loggerMetadataKeyName] = logger 20 | } 21 | -------------------------------------------------------------------------------- /common/targzipwriter.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "archive/tar" 5 | "compress/gzip" 6 | "io" 7 | ) 8 | 9 | // TarGzipWriter consists of a pair of writers, namely a tar.Writer and gzip.Writer. 10 | // They are combined such that a valid `tar.gz` stream is created. 11 | 12 | // TarGzipWriter is a valid io.WriteCloser. 13 | // It implements WriteHeader from tar.Writer to create separate files within the tar archive. 14 | type TarGzipWriter struct { 15 | tarWriter *tar.Writer 16 | gzipWriter *gzip.Writer 17 | } 18 | 19 | // NewTarGzipWriter creates a new TarGzipWriter. 20 | func NewTarGzipWriter(w io.Writer) *TarGzipWriter { 21 | gzipWriter := gzip.NewWriter(w) 22 | tarWriter := tar.NewWriter(gzipWriter) 23 | 24 | return &TarGzipWriter{ 25 | tarWriter: tarWriter, 26 | gzipWriter: gzipWriter, 27 | } 28 | } 29 | 30 | // WriteHeader starts a new file in the tar archive; see tar.Writer. 31 | func (t *TarGzipWriter) WriteHeader(hdr *tar.Header) error { 32 | return t.tarWriter.WriteHeader(hdr) 33 | } 34 | 35 | // Write adds content to the current file in the tar gzip archive; see tar.Writer. 36 | func (t *TarGzipWriter) Write(p []byte) (int, error) { 37 | return t.tarWriter.Write(p) 38 | } 39 | 40 | // Close closes the inner tar.Writer and then subsequently the outer gzip.Writer. 41 | // 42 | // It returns the error of either call after both writers have been closed. 43 | // If both calls to each writer.Close() error, then the error of closing the gzip.Writer is returned. 44 | // 45 | // The downstream writer is left as it is, i.e. it must be closed independently. 46 | func (t *TarGzipWriter) Close() error { 47 | tarErr := t.tarWriter.Close() 48 | gzipErr := t.gzipWriter.Close() 49 | if gzipErr != nil { 50 | return gzipErr 51 | } 52 | return tarErr 53 | } 54 | -------------------------------------------------------------------------------- /config/samples/deployments/mariadb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: mariadb 6 | name: mariadb 7 | spec: 8 | progressDeadlineSeconds: 600 9 | replicas: 1 10 | revisionHistoryLimit: 2 11 | selector: 12 | matchLabels: 13 | app: mariadb 14 | strategy: 15 | rollingUpdate: 16 | maxSurge: 25% 17 | maxUnavailable: 25% 18 | type: RollingUpdate 19 | template: 20 | metadata: 21 | labels: 22 | app: mariadb 23 | annotations: 24 | k8up.io/backupcommand: mysqldump -uroot -psecure --all-databases 25 | spec: 26 | containers: 27 | - env: 28 | - name: MYSQL_ROOT_PASSWORD 29 | value: secure 30 | image: mariadb 31 | imagePullPolicy: Always 32 | name: mariadb 33 | ports: 34 | - containerPort: 3306 35 | protocol: TCP 36 | resources: {} 37 | terminationMessagePath: /dev/termination-log 38 | terminationMessagePolicy: File 39 | volumeMounts: 40 | - mountPath: /var/lib/mysql 41 | name: mariadb 42 | dnsPolicy: ClusterFirst 43 | restartPolicy: Always 44 | schedulerName: default-scheduler 45 | securityContext: {} 46 | terminationGracePeriodSeconds: 30 47 | volumes: 48 | - name: mariadb 49 | persistentVolumeClaim: 50 | claimName: mariadb 51 | 52 | --- 53 | 54 | kind: PersistentVolumeClaim 55 | apiVersion: v1 56 | metadata: 57 | name: mariadb 58 | spec: 59 | accessModes: 60 | - ReadWriteOnce 61 | resources: 62 | requests: 63 | storage: 1Gi 64 | -------------------------------------------------------------------------------- /config/samples/deployments/minio.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: minio 5 | 6 | --- 7 | 8 | apiVersion: apps/v1 # for k8s versions before 1.9.0 use apps/v1beta2 and before 1.8.0 use extensions/v1beta1 9 | kind: Deployment 10 | metadata: 11 | # This name uniquely identifies the Deployment 12 | name: minio 13 | namespace: minio 14 | spec: 15 | selector: 16 | matchLabels: 17 | app: minio 18 | strategy: 19 | type: Recreate 20 | template: 21 | metadata: 22 | labels: 23 | app: minio 24 | spec: 25 | containers: 26 | - name: minio 27 | image: docker.io/minio/minio:latest 28 | resources: {} 29 | args: 30 | - server 31 | - /storage 32 | - --console-address 33 | - ":9001" 34 | env: 35 | - name: MINIO_ROOT_PASSWORD 36 | value: minioadmin 37 | - name: MINIO_ROOT_USER 38 | value: "minioadmin" 39 | ports: 40 | - containerPort: 9000 41 | hostPort: 9000 42 | - containerPort: 9001 43 | hostPort: 9001 44 | volumeMounts: 45 | - name: storage 46 | mountPath: "/storage" 47 | volumes: 48 | - name: storage 49 | persistentVolumeClaim: 50 | claimName: minio 51 | 52 | --- 53 | 54 | kind: PersistentVolumeClaim 55 | apiVersion: v1 56 | metadata: 57 | name: minio 58 | namespace: minio 59 | spec: 60 | accessModes: 61 | - ReadWriteOnce 62 | resources: 63 | requests: 64 | storage: 1Gi 65 | 66 | --- 67 | 68 | apiVersion: v1 69 | kind: Service 70 | metadata: 71 | labels: 72 | app: minio 73 | name: minio 74 | namespace: minio 75 | spec: 76 | ports: 77 | - name: "9000" 78 | port: 9000 79 | targetPort: 9000 80 | - name: gui 81 | port: 9001 82 | targetPort: 9001 83 | selector: 84 | app: minio 85 | status: 86 | loadBalancer: {} 87 | -------------------------------------------------------------------------------- /config/samples/deployments/pv-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: pv0003 5 | spec: 6 | capacity: 7 | storage: 5Gi 8 | volumeMode: Filesystem 9 | accessModes: 10 | - ReadWriteOnce 11 | persistentVolumeReclaimPolicy: Delete 12 | storageClassName: slow 13 | hostPath: 14 | path: /tmp 15 | -------------------------------------------------------------------------------- /config/samples/deployments/pvc-example.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | annotations: 5 | k8up.io/backup: 'true' 6 | name: myclaim 7 | spec: 8 | accessModes: 9 | # So it works in KIND 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 1Gi 14 | -------------------------------------------------------------------------------- /config/samples/k8up_v1_archive.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Archive 3 | metadata: 4 | name: archive-test 5 | spec: 6 | repoPasswordSecretRef: 7 | name: backup-repo 8 | key: password 9 | restoreMethod: 10 | s3: 11 | endpoint: http://minio.minio:9000 12 | bucket: restoremini 13 | accessKeyIDSecretRef: 14 | name: backup-credentials 15 | key: username 16 | secretAccessKeySecretRef: 17 | name: backup-credentials 18 | key: password 19 | backend: 20 | s3: 21 | endpoint: http://minio.minio:9000 22 | bucket: k8up 23 | accessKeyIDSecretRef: 24 | name: backup-credentials 25 | key: username 26 | secretAccessKeySecretRef: 27 | name: backup-credentials 28 | key: password 29 | -------------------------------------------------------------------------------- /config/samples/k8up_v1_backup.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Backup 3 | metadata: 4 | name: k8up-test 5 | spec: 6 | failedJobsHistoryLimit: 2 7 | successfulJobsHistoryLimit: 2 8 | tags: 9 | - test 10 | - dump 11 | - mariadb 12 | backend: 13 | repoPasswordSecretRef: 14 | name: backup-repo 15 | key: password 16 | s3: 17 | endpoint: http://minio.minio:9000 18 | bucket: k8up 19 | accessKeyIDSecretRef: 20 | name: backup-credentials 21 | key: username 22 | secretAccessKeySecretRef: 23 | name: backup-credentials 24 | key: password 25 | -------------------------------------------------------------------------------- /config/samples/k8up_v1_check.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Check 3 | metadata: 4 | name: check-test 5 | spec: 6 | resources: 7 | requests: 8 | memory: "64Mi" 9 | cpu: "250m" 10 | backend: 11 | repoPasswordSecretRef: 12 | name: backup-repo 13 | key: password 14 | s3: 15 | endpoint: http://minio.minio:9000 16 | bucket: k8up 17 | accessKeyIDSecretRef: 18 | name: backup-credentials 19 | key: username 20 | secretAccessKeySecretRef: 21 | name: backup-credentials 22 | key: password 23 | -------------------------------------------------------------------------------- /config/samples/k8up_v1_prebackuppod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: PreBackupPod 3 | metadata: 4 | name: mysqldump 5 | spec: 6 | backupCommand: mysqldump -u$USER -p$PW -h $DB_HOST --all-databases 7 | pod: 8 | spec: 9 | initContainers: 10 | - name: init 11 | image: mariadb 12 | imagePullPolicy: IfNotPresent 13 | command: 14 | - sleep 15 | - '20s' 16 | containers: 17 | - env: 18 | - name: USER 19 | value: dumper 20 | - name: PW 21 | value: topsecret 22 | - name: DB_HOST 23 | value: mariadb.example.com 24 | image: mariadb 25 | command: 26 | - sleep 27 | - infinity 28 | imagePullPolicy: IfNotPresent 29 | name: mysqldump 30 | -------------------------------------------------------------------------------- /config/samples/k8up_v1_prune.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Prune 3 | metadata: 4 | name: prune-test 5 | spec: 6 | retention: 7 | keepLast: 5 8 | keepDaily: 14 9 | backend: 10 | repoPasswordSecretRef: 11 | name: backup-repo 12 | key: password 13 | s3: 14 | endpoint: http://minio.minio:9000 15 | bucket: k8up 16 | accessKeyIDSecretRef: 17 | name: backup-credentials 18 | key: username 19 | secretAccessKeySecretRef: 20 | name: backup-credentials 21 | key: password 22 | -------------------------------------------------------------------------------- /config/samples/k8up_v1_restore.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Restore 3 | metadata: 4 | name: restore-disk 5 | spec: 6 | restoreMethod: 7 | folder: 8 | claimName: restore 9 | 10 | backend: 11 | repoPasswordSecretRef: 12 | name: backup-repo 13 | key: password 14 | s3: 15 | endpoint: http://minio.minio:9000 16 | bucket: k8up 17 | accessKeyIDSecretRef: 18 | name: backup-credentials 19 | key: username 20 | secretAccessKeySecretRef: 21 | name: backup-credentials 22 | key: password 23 | --- 24 | apiVersion: k8up.io/v1 25 | kind: Restore 26 | metadata: 27 | name: restore-s3 28 | spec: 29 | restoreMethod: 30 | s3: 31 | endpoint: http://minio.minio:9000 32 | bucket: restoremini 33 | accessKeyIDSecretRef: 34 | name: backup-credentials 35 | key: username 36 | secretAccessKeySecretRef: 37 | name: backup-credentials 38 | key: password 39 | backend: 40 | repoPasswordSecretRef: 41 | name: backup-repo 42 | key: password 43 | s3: 44 | endpoint: http://minio.minio:9000 45 | bucket: k8up 46 | accessKeyIDSecretRef: 47 | name: backup-credentials 48 | key: username 49 | secretAccessKeySecretRef: 50 | name: backup-credentials 51 | key: password 52 | --- 53 | apiVersion: k8up.io/v1 54 | kind: Restore 55 | metadata: 56 | name: restore-s3-global 57 | spec: 58 | restoreMethod: 59 | s3: {} 60 | backend: 61 | s3: {} 62 | --- 63 | # Here we only override the restic password and the bucket from the global 64 | # settings. 65 | apiVersion: k8up.io/v1 66 | kind: Restore 67 | metadata: 68 | name: restore-s3-override 69 | spec: 70 | restoreMethod: 71 | s3: 72 | bucket: k8up-restore 73 | backend: 74 | repoPasswordSecretRef: 75 | key: repopw 76 | name: repopw 77 | s3: 78 | bucket: k8up-namespace 79 | -------------------------------------------------------------------------------- /config/samples/k8up_v1_schedule.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k8up.io/v1 3 | kind: Schedule 4 | metadata: 5 | name: schedule-test 6 | spec: 7 | resourceRequirementsTemplate: 8 | requests: 9 | memory: "64Mi" 10 | cpu: "250m" 11 | limits: 12 | memory: "128Mi" 13 | cpu: "500m" 14 | backend: 15 | repoPasswordSecretRef: 16 | name: backup-repo 17 | key: password 18 | s3: 19 | endpoint: http://minio.minio:9000 20 | bucket: k8up 21 | accessKeyIDSecretRef: 22 | name: backup-credentials 23 | key: username 24 | secretAccessKeySecretRef: 25 | name: backup-credentials 26 | key: password 27 | archive: 28 | schedule: '0 * * * *' 29 | restoreMethod: 30 | s3: 31 | endpoint: http://minio.minio:9000 32 | bucket: restoremini 33 | accessKeyIDSecretRef: 34 | name: backup-credentials 35 | key: username 36 | secretAccessKeySecretRef: 37 | name: backup-credentials 38 | key: password 39 | backup: 40 | schedule: '*/2 * * * *' 41 | failedJobsHistoryLimit: 2 42 | successfulJobsHistoryLimit: 2 43 | promURL: http://minio.minio:9000 44 | clusterName: default 45 | resources: 46 | requests: 47 | memory: "64Mi" 48 | cpu: "250m" 49 | limits: 50 | memory: "256Mi" 51 | cpu: "1000m" 52 | check: 53 | resources: 54 | requests: 55 | memory: "64Mi" 56 | cpu: "250m" 57 | schedule: '@hourly-random' 58 | promURL: http://minio.minio:9000 59 | clusterName: default 60 | prune: 61 | schedule: '*/4 * * * *' 62 | retention: 63 | keepLast: 5 64 | keepDaily: 14 65 | -------------------------------------------------------------------------------- /config/samples/k8up_v1_snapshot.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Snapshot 3 | metadata: 4 | name: snapshot-sample 5 | -------------------------------------------------------------------------------- /config/samples/kustomization.yaml: -------------------------------------------------------------------------------- 1 | ## Append samples you want in your CSV to this file as resources ## 2 | resources: 3 | - k8up_v1_archive.yaml 4 | - k8up_v1_backup.yaml 5 | - k8up_v1_check.yaml 6 | - k8up_v1_prebackuppod.yaml 7 | - k8up_v1_prune.yaml 8 | - k8up_v1_restore.yaml 9 | - k8up_v1_schedule.yaml 10 | - k8up_v1_snapshot.yaml 11 | # +kubebuilder:scaffold:manifestskustomizesamples 12 | - secrets.yaml 13 | -------------------------------------------------------------------------------- /config/samples/prometheus/prometheus.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 15s 3 | scrape_timeout: 10s 4 | evaluation_interval: 15s 5 | rule_files: 6 | - /etc/prometheus/rules.yml 7 | alerting: 8 | alertmanagers: 9 | - static_configs: 10 | - targets: [] 11 | scheme: http 12 | timeout: 10s 13 | scrape_configs: 14 | - job_name: prometheus 15 | honor_timestamps: true 16 | scrape_interval: 15s 17 | scrape_timeout: 10s 18 | metrics_path: /metrics 19 | scheme: http 20 | static_configs: 21 | - targets: 22 | - localhost:9090 23 | # For running operators in debug mode outside kubernetes 24 | - job_name: operator-debug 25 | honor_timestamps: true 26 | scrape_interval: 15s 27 | scrape_timeout: 10s 28 | metrics_path: /metrics 29 | scheme: http 30 | static_configs: 31 | - targets: 32 | - 10.144.1.175:8081 33 | # For running the operator within kubernetes in a prod env 34 | - job_name: operator-kubernetes 35 | scrape_interval: 60s 36 | kubernetes_sd_configs: 37 | - role: pod 38 | relabel_configs: 39 | - source_labels: [__meta_kubernetes_namespace] 40 | regex: appuio-k8up-operator 41 | action: keep 42 | - source_labels: [__meta_kubernetes_pod_label_app] 43 | regex: k8up-operator 44 | action: keep 45 | - source_labels: [__meta_kubernetes_pod_container_port_number] 46 | regex: 47 | action: drop 48 | - source_labels: [__meta_kubernetes_pod_label_pod_template_hash] 49 | regex: 50 | action: drop 51 | -------------------------------------------------------------------------------- /config/samples/prometheus/rules.yaml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: K8up 3 | rules: 4 | - alert: K8upBackupLastErrors 5 | expr: k8up_backup_restic_last_errors > 0 6 | for: 1m 7 | labels: 8 | severity: critical 9 | annotations: 10 | summary: Amount of errors of last restic backup 11 | description: This alert is fired when error number is > 0 12 | - alert: K8upBackupFailed 13 | expr: rate(k8up_jobs_failed_counter[1d]) > 0 14 | for: 1m 15 | labels: 16 | severity: critical 17 | annotations: 18 | summary: "Job in {{ $labels.namespace }} of type {{ $labels.jobType }} failed" 19 | - alert: K8upBackupNotRunning 20 | expr: sum(rate(k8up_jobs_total[25h])) == 0 and on(namespace) k8up_schedules_gauge > 0 21 | for: 1m 22 | labels: 23 | severity: critical 24 | annotations: 25 | summary: "No K8up jobs were run in {{ $labels.namespace }} within the last 24 hours. Check the operator, there might be a deadlock" 26 | -------------------------------------------------------------------------------- /config/samples/secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: backup-credentials 5 | type: Opaque 6 | stringData: 7 | username: minioadmin 8 | password: minioadmin 9 | --- 10 | apiVersion: v1 11 | kind: Secret 12 | metadata: 13 | name: backup-repo 14 | type: Opaque 15 | stringData: 16 | password: asdf 17 | -------------------------------------------------------------------------------- /deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: test-deployment 6 | labels: 7 | app: test-app 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: test-app 13 | template: 14 | metadata: 15 | labels: 16 | app: test-app 17 | spec: 18 | containers: 19 | - name: test 20 | image: busybox 21 | command: ["sleep", "100000"] 22 | volumeMounts: 23 | - name: test 24 | mountPath: /test 25 | volumes: 26 | - name: test 27 | persistentVolumeClaim: 28 | claimName: test-pvc 29 | -------------------------------------------------------------------------------- /docs/antora.yml: -------------------------------------------------------------------------------- 1 | name: k8up 2 | title: K8up Documentation 3 | version: master 4 | prerelease: -master 5 | display_version: master 6 | start_page: ROOT:index.adoc 7 | nav: 8 | - modules/ROOT/nav.adoc 9 | asciidoc: 10 | attributes: 11 | releaseVersion: latest 12 | -------------------------------------------------------------------------------- /docs/api-gen-config.yaml: -------------------------------------------------------------------------------- 1 | processor: 2 | # RE2 regular expressions describing types that should be excluded from the generated documentation. 3 | ignoreTypes: 4 | - "Associa(ted|tor|tionStatus|tionConf)$" 5 | # RE2 regular expressions describing type fields that should be excluded from the generated documentation. 6 | ignoreFields: 7 | - "status$" 8 | - "TypeMeta$" 9 | 10 | render: 11 | # Version of Kubernetes to use when generating links to Kubernetes API documentation. 12 | kubernetesVersion: "1.20" 13 | -------------------------------------------------------------------------------- /docs/api-templates/gv-details.tpl: -------------------------------------------------------------------------------- 1 | {{- define "gvDetails" -}} 2 | {{- $gv := . -}} 3 | [id="{{ asciidocGroupVersionID $gv | asciidocRenderAnchorID }}"] 4 | == {{ $gv.GroupVersionString }} 5 | 6 | {{ $gv.Doc }} 7 | 8 | {{- if $gv.Kinds }} 9 | .Resource Types 10 | {{- range $gv.SortedKinds }} 11 | - {{ $gv.TypeForKind . | asciidocRenderTypeLink }} 12 | {{- end }} 13 | {{ end }} 14 | 15 | {{ range $gv.SortedTypes }} 16 | {{ template "type" . }} 17 | {{ end }} 18 | 19 | {{- end -}} 20 | -------------------------------------------------------------------------------- /docs/api-templates/gv-list.tpl: -------------------------------------------------------------------------------- 1 | {{- define "gvList" -}} 2 | {{- $groupVersions := . -}} 3 | 4 | // Generated documentation. Please do not edit. 5 | :anchor_prefix: k8s-api 6 | 7 | [id="api-reference"] 8 | = API Reference 9 | 10 | This is a https://github.com/elastic/crd-ref-docs[generated] API documentation. 11 | 12 | TIP: A more sophisticated documentation is available under https://doc.crds.dev/github.com/k8up-io/k8up. 13 | 14 | .Packages 15 | {{- range $groupVersions }} 16 | - {{ asciidocRenderGVLink . }} 17 | {{- end }} 18 | 19 | {{ range $groupVersions }} 20 | {{ template "gvDetails" . }} 21 | {{ end }} 22 | 23 | {{- end -}} 24 | -------------------------------------------------------------------------------- /docs/api-templates/type-members.tpl: -------------------------------------------------------------------------------- 1 | {{- define "type_members" -}} 2 | {{- $field := . -}} 3 | {{- if eq $field.Name "metadata" -}} 4 | Refer to Kubernetes API documentation for fields of `metadata`. 5 | {{ else -}} 6 | {{ $field.Doc }} 7 | {{- end -}} 8 | {{- end -}} 9 | -------------------------------------------------------------------------------- /docs/api-templates/type.tpl: -------------------------------------------------------------------------------- 1 | {{- define "type" -}} 2 | {{- $type := . -}} 3 | {{- if asciidocShouldRenderType $type -}} 4 | 5 | [id="{{ asciidocTypeID $type | asciidocRenderAnchorID }}"] 6 | === {{ $type.Name }} {{ if $type.IsAlias }}({{ asciidocRenderTypeLink $type.UnderlyingType }}) {{ end }} 7 | 8 | {{ $type.Doc }} 9 | 10 | {{ if $type.References -}} 11 | .Appears In: 12 | **** 13 | {{- range $type.SortedReferences }} 14 | - {{ asciidocRenderTypeLink . }} 15 | {{- end }} 16 | **** 17 | {{- end }} 18 | 19 | {{ if $type.Members -}} 20 | [cols="25a,75a", options="header"] 21 | |=== 22 | | Field | Description 23 | {{ if $type.GVK -}} 24 | | *`apiVersion`* __string__ | `{{ $type.GVK.Group }}/{{ $type.GVK.Version }}` 25 | | *`kind`* __string__ | `{{ $type.GVK.Kind }}` 26 | {{ end -}} 27 | 28 | {{ range $type.Members -}} 29 | | *`{{ .Name }}`* __{{ asciidocRenderType .Type }}__ | {{ template "type_members" . }} 30 | {{ end -}} 31 | |=== 32 | {{ end -}} 33 | 34 | {{- end -}} 35 | {{- end -}} 36 | -------------------------------------------------------------------------------- /docs/modules/ROOT/assets/attachments/slides.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/k8up-io/k8up/56163bc155fccd93729233f5824ca7b249bccbcd/docs/modules/ROOT/assets/attachments/slides.pdf -------------------------------------------------------------------------------- /docs/modules/ROOT/assets/images/k8up-logo-square.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/k8up-io/k8up/56163bc155fccd93729233f5824ca7b249bccbcd/docs/modules/ROOT/assets/images/k8up-logo-square.png -------------------------------------------------------------------------------- /docs/modules/ROOT/assets/images/k8up-logo-square.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | -------------------------------------------------------------------------------- /docs/modules/ROOT/assets/images/k8up-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/k8up-io/k8up/56163bc155fccd93729233f5824ca7b249bccbcd/docs/modules/ROOT/assets/images/k8up-logo.png -------------------------------------------------------------------------------- /docs/modules/ROOT/assets/images/minio_browser.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/k8up-io/k8up/56163bc155fccd93729233f5824ca7b249bccbcd/docs/modules/ROOT/assets/images/minio_browser.png -------------------------------------------------------------------------------- /docs/modules/ROOT/assets/images/tutorial/k9s-delete.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/k8up-io/k8up/56163bc155fccd93729233f5824ca7b249bccbcd/docs/modules/ROOT/assets/images/tutorial/k9s-delete.png -------------------------------------------------------------------------------- /docs/modules/ROOT/assets/images/tutorial/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/k8up-io/k8up/56163bc155fccd93729233f5824ca7b249bccbcd/docs/modules/ROOT/assets/images/tutorial/logo.png -------------------------------------------------------------------------------- /docs/modules/ROOT/assets/images/tutorial/minio-browser.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/k8up-io/k8up/56163bc155fccd93729233f5824ca7b249bccbcd/docs/modules/ROOT/assets/images/tutorial/minio-browser.png -------------------------------------------------------------------------------- /docs/modules/ROOT/assets/images/tutorial/wordpress-db-error.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/k8up-io/k8up/56163bc155fccd93729233f5824ca7b249bccbcd/docs/modules/ROOT/assets/images/tutorial/wordpress-db-error.png -------------------------------------------------------------------------------- /docs/modules/ROOT/assets/images/tutorial/wordpress-defaced.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/k8up-io/k8up/56163bc155fccd93729233f5824ca7b249bccbcd/docs/modules/ROOT/assets/images/tutorial/wordpress-defaced.png -------------------------------------------------------------------------------- /docs/modules/ROOT/assets/images/tutorial/wordpress-install.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/k8up-io/k8up/56163bc155fccd93729233f5824ca7b249bccbcd/docs/modules/ROOT/assets/images/tutorial/wordpress-install.png -------------------------------------------------------------------------------- /docs/modules/ROOT/assets/images/tutorial/wordpress-restored.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/k8up-io/k8up/56163bc155fccd93729233f5824ca7b249bccbcd/docs/modules/ROOT/assets/images/tutorial/wordpress-restored.png -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/archive.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Archive 3 | metadata: 4 | name: archive-test 5 | spec: 6 | repoPasswordSecretRef: 7 | name: backup-repo 8 | key: password 9 | restoreMethod: 10 | s3: 11 | endpoint: http://10.144.1.224:9000 12 | bucket: restoremini 13 | accessKeyIDSecretRef: 14 | name: backup-credentials 15 | key: username 16 | secretAccessKeySecretRef: 17 | name: backup-credentials 18 | key: password 19 | backend: 20 | s3: 21 | endpoint: http://10.144.1.224:9000 22 | bucket: k8up 23 | accessKeyIDSecretRef: 24 | name: backup-credentials 25 | key: username 26 | secretAccessKeySecretRef: 27 | name: backup-credentials 28 | key: password 29 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/credentials.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: backup-credentials 5 | namespace: default 6 | type: Opaque 7 | stringData: 8 | username: minio 9 | password: minio123 10 | 11 | --- 12 | 13 | apiVersion: v1 14 | kind: Secret 15 | metadata: 16 | name: backup-repo 17 | namespace: default 18 | type: Opaque 19 | stringData: 20 | password: p@ssw0rd 21 | 22 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/minio-standalone-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | # This name uniquely identifies the PVC. This is used in deployment. 5 | name: minio-pv-claim 6 | spec: 7 | # Read more about access modes here: http://kubernetes.io/docs/user-guide/persistent-volumes/#access-modes 8 | accessModes: 9 | # The volume is mounted as read-write by a single node 10 | - ReadWriteOnce 11 | resources: 12 | # This is the request for storage. Should be available in the cluster. 13 | requests: 14 | storage: 10Gi 15 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/minio-standalone-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | # This name uniquely identifies the service 5 | name: minio-service 6 | spec: 7 | type: LoadBalancer 8 | ports: 9 | - port: 9000 10 | targetPort: 9000 11 | protocol: TCP 12 | selector: 13 | # Looks for labels `app:minio` in the namespace and applies the spec 14 | app: minio 15 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/open-stack-swift-auth.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: "open-stack-secret" 5 | stringData: 6 | OS_AUTH_URL: "https://your-provider.auth.com/v3" 7 | OS_IDENTITY_API_VERSION: "3" 8 | OS_USER_DOMAIN_NAME: "Default" 9 | OS_PROJECT_DOMAIN_NAME: "Default" 10 | OS_TENANT_ID: "Open Stack Tenant Id" 11 | OS_TENANT_NAME: "Open Stack Tenant Name" 12 | OS_USERNAME: "Username" 13 | OS_PASSWORD: "Password" 14 | OS_REGION_NAME: "US" 15 | 16 | --- 17 | apiVersion: v1 18 | kind: Secret 19 | metadata: 20 | name: "restic-repository-password" 21 | stringData: 22 | password: "secret_pass" 23 | 24 | --- 25 | apiVersion: k8up.io/v1 26 | kind: Backup 27 | metadata: 28 | name: k8up-test-swift 29 | spec: 30 | tags: 31 | - prod 32 | - archive 33 | - important 34 | failedJobsHistoryLimit: 4 35 | successfulJobsHistoryLimit: 0 36 | backend: 37 | envFrom: 38 | - secretRef: 39 | name: "open-stack-secret" 40 | repoPasswordSecretRef: 41 | name: "restic-repository-password" 42 | key: "password" 43 | swift: 44 | path: "/container-path" 45 | container: "my-backup-container" 46 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/pvc.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: apvc 5 | spec: 6 | accessModes: 7 | - ReadWriteMany 8 | resources: 9 | requests: 10 | storage: 5Gi 11 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/references/effective-schedule.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: List 3 | items: 4 | - apiVersion: k8up.io/v1 5 | kind: EffectiveSchedule 6 | metadata: 7 | name: backup-dll789k5qql624wx 8 | namespace: k8up-system 9 | spec: 10 | effectiveSchedules: 11 | - name: schedule-test 12 | namespace: default 13 | generatedSchedule: 4 * * * * 14 | jobType: backup 15 | - apiVersion: k8up.io/v1 16 | kind: EffectiveSchedule 17 | metadata: 18 | name: check-qhmm4xmgsrwmj4dh 19 | namespace: k8up-system 20 | spec: 21 | effectiveSchedules: 22 | - name: schedule-test 23 | namespace: default 24 | generatedSchedule: 44 * * * * 25 | jobType: check 26 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/schedule.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Schedule 3 | metadata: 4 | name: schedule-test 5 | spec: 6 | backend: 7 | s3: 8 | endpoint: http://minio:9000 9 | bucket: backups 10 | accessKeyIDSecretRef: 11 | name: minio-credentials 12 | key: username 13 | secretAccessKeySecretRef: 14 | name: minio-credentials 15 | key: password 16 | repoPasswordSecretRef: 17 | name: backup-repo 18 | key: password 19 | backup: 20 | schedule: '*/5 * * * *' 21 | failedJobsHistoryLimit: 2 22 | successfulJobsHistoryLimit: 2 23 | # optional 24 | #promURL: https://prometheus-io-instance:8443 25 | check: 26 | schedule: '0 1 * * 1' 27 | # optional 28 | #promURL: https://prometheus-io-instance:8443 29 | prune: 30 | schedule: '0 1 * * 0' 31 | retention: 32 | keepLast: 5 33 | keepDaily: 14 34 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/tutorial/backup.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Backup 3 | metadata: 4 | name: backup-test 5 | spec: 6 | failedJobsHistoryLimit: 2 7 | successfulJobsHistoryLimit: 2 8 | backend: 9 | repoPasswordSecretRef: 10 | name: backup-repo 11 | key: password 12 | s3: 13 | endpoint: http://minio:9000 14 | bucket: backups 15 | accessKeyIDSecretRef: 16 | name: minio-credentials 17 | key: username 18 | secretAccessKeySecretRef: 19 | name: minio-credentials 20 | key: password 21 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/tutorial/mariadb/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: mariadb 5 | labels: 6 | app: wordpress 7 | spec: 8 | selector: 9 | matchLabels: 10 | app: wordpress 11 | tier: mariadb 12 | strategy: 13 | type: Recreate 14 | template: 15 | metadata: 16 | labels: 17 | app: wordpress 18 | tier: mariadb 19 | annotations: 20 | k8up.io/backupcommand: /bin/bash -c 'mysqldump -uroot -p"${MARIADB_ROOT_PASSWORD}" --all-databases' 21 | spec: 22 | containers: 23 | - image: mariadb:10.4 24 | name: mariadb 25 | readinessProbe: 26 | timeoutSeconds: 1 27 | initialDelaySeconds: 5 28 | exec: 29 | command: 30 | - "/bin/sh" 31 | - "-i" 32 | - "-c" 33 | - mysql -h 127.0.0.1 -uroot -p"${MARIADB_ROOT_PASSWORD}" -D mysql -e 'SELECT 1' 34 | livenessProbe: 35 | timeoutSeconds: 1 36 | initialDelaySeconds: 30 37 | tcpSocket: 38 | port: 3306 39 | env: 40 | - name: MARIADB_ROOT_PASSWORD 41 | valueFrom: 42 | secretKeyRef: 43 | name: mariadb-pass 44 | key: password 45 | ports: 46 | - containerPort: 3306 47 | name: mariadb 48 | volumeMounts: 49 | - name: mariadb-persistent-storage 50 | mountPath: /var/lib/mysql 51 | volumes: 52 | - name: mariadb-persistent-storage 53 | persistentVolumeClaim: 54 | claimName: mariadb-pvc 55 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/tutorial/mariadb/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - deployment.yaml 3 | - pvc.yaml 4 | - service.yaml 5 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/tutorial/mariadb/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: mariadb-pvc 5 | labels: 6 | app: wordpress 7 | spec: 8 | accessModes: 9 | - ReadWriteOnce 10 | resources: 11 | requests: 12 | storage: 10Gi 13 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/tutorial/mariadb/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: mariadb 5 | labels: 6 | app: wordpress 7 | spec: 8 | ports: 9 | - port: 3306 10 | selector: 11 | app: wordpress 12 | tier: mariadb 13 | clusterIP: None 14 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/tutorial/minio/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: minio 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: minio 9 | strategy: 10 | type: Recreate 11 | template: 12 | metadata: 13 | labels: 14 | app: minio 15 | spec: 16 | volumes: 17 | - name: data 18 | persistentVolumeClaim: 19 | claimName: minio-pvc 20 | containers: 21 | - name: minio 22 | volumeMounts: 23 | - name: data 24 | mountPath: "/data" 25 | image: minio/minio 26 | args: 27 | - server 28 | - /data 29 | - '--console-address=:9001' 30 | env: 31 | - name: MINIO_ROOT_USER 32 | valueFrom: 33 | secretKeyRef: 34 | name: minio-credentials 35 | key: username 36 | - name: MINIO_ROOT_PASSWORD 37 | valueFrom: 38 | secretKeyRef: 39 | name: minio-credentials 40 | key: password 41 | ports: 42 | - containerPort: 9000 43 | - containerPort: 9001 44 | readinessProbe: 45 | httpGet: 46 | path: /minio/health/ready 47 | port: 9000 48 | initialDelaySeconds: 120 49 | periodSeconds: 20 50 | livenessProbe: 51 | httpGet: 52 | path: /minio/health/live 53 | port: 9000 54 | initialDelaySeconds: 120 55 | periodSeconds: 20 56 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/tutorial/minio/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - pvc.yaml 3 | - deployment.yaml 4 | - service.yaml 5 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/tutorial/minio/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: minio-pvc 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | resources: 9 | requests: 10 | storage: 20Gi 11 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/tutorial/minio/service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: minio 6 | spec: 7 | type: NodePort 8 | ports: 9 | - port: 9000 10 | name: api 11 | targetPort: 9000 12 | protocol: TCP 13 | - port: 9001 14 | name: webui 15 | targetPort: 9001 16 | protocol: TCP 17 | selector: 18 | app: minio 19 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/tutorial/restore/wordpress-bucket-to-bucket.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Restore 3 | metadata: 4 | name: restore-wordpress 5 | spec: 6 | snapshot: f0dd5684 7 | podSecurityContext: 8 | runAsUser: 0 9 | restoreMethod: 10 | s3: 11 | endpoint: http://minio:9000 12 | bucket: backup2 13 | accessKeyIDSecretRef: 14 | name: minio-credentials 15 | key: username 16 | secretAccessKeySecretRef: 17 | name: minio-credentials 18 | key: password 19 | backend: 20 | repoPasswordSecretRef: 21 | name: backup-repo 22 | key: password 23 | s3: 24 | endpoint: http://minio:9000 25 | bucket: backups 26 | accessKeyIDSecretRef: 27 | name: minio-credentials 28 | key: username 29 | secretAccessKeySecretRef: 30 | name: minio-credentials 31 | key: password 32 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/tutorial/restore/wordpress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Restore 3 | metadata: 4 | name: restore-wordpress 5 | spec: 6 | snapshot: f0dd5684 7 | podSecurityContext: 8 | runAsUser: 0 9 | restoreMethod: 10 | folder: 11 | claimName: wordpress-pvc 12 | backend: 13 | repoPasswordSecretRef: 14 | name: backup-repo 15 | key: password 16 | s3: 17 | endpoint: http://minio:9000 18 | bucket: backups 19 | accessKeyIDSecretRef: 20 | name: minio-credentials 21 | key: username 22 | secretAccessKeySecretRef: 23 | name: minio-credentials 24 | key: password 25 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/tutorial/scripts/1_setup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This script rebuilds the complete minikube cluster in one shot, 4 | # creating a ready-to-use WordPress + MariaDB + Minio environment. 5 | 6 | echo "" 7 | echo "••• Launching Minikube •••" 8 | minikube start --memory 4096 --disk-size 60g --cpus 4 9 | kubectl config use-context minikube 10 | 11 | echo "" 12 | echo "••• Installing Secrets •••" 13 | kubectl apply -k secrets 14 | 15 | echo "" 16 | echo "••• Installing Minio •••" 17 | kubectl apply -k minio 18 | 19 | echo "" 20 | echo "••• Installing MariaDB •••" 21 | kubectl apply -k mariadb 22 | 23 | echo "" 24 | echo "••• Installing WordPress •••" 25 | kubectl apply -k wordpress 26 | 27 | echo "" 28 | echo "••• Installing CRDs for K8up •••" 29 | kubectl apply -f https://github.com/k8up-io/k8up/releases/download/v2.2.0/k8up-crd.yaml 30 | 31 | echo "" 32 | echo "••• Installing K8up •••" 33 | helm repo add k8up-io https://k8up-io.github.io/k8up 34 | helm repo update 35 | helm install k8up-io/k8up --generate-name 36 | 37 | echo "" 38 | echo "••• Watch pods •••" 39 | k9s 40 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/tutorial/scripts/2_browser.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Open browser showing Minio 4 | minikube service minio 5 | 6 | # Open browser showing WordPress 7 | minikube service wordpress 8 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/tutorial/scripts/3_backup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This script triggers a backup job 4 | 5 | # Set Minikube context 6 | kubectl config use-context minikube 7 | 8 | # Trigger backup 9 | kubectl apply -f backup.yaml 10 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/tutorial/scripts/4_restore.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This script restores the contents of a backup to its rightful PVCs. 4 | # After the pods that perform the restore operation are "Completed", 5 | # execute the '5_restore_files.sh' script, 6 | # and after that the '6_delete_restore_pods.sh' script. 7 | 8 | source scripts/environment.sh 9 | 10 | # Set Minikube context 11 | kubectl config use-context minikube 12 | 13 | # Restore WordPress PVC 14 | SNAPSHOT_ID=$(restic snapshots --json --last --path /data/wordpress-pvc | jq -r '.[0].id') 15 | yq e '.spec.snapshot="'${SNAPSHOT_ID}'"' restore/wordpress.yaml | kubectl apply -f - 16 | 17 | # Read SQL data from Restic into file 18 | SNAPSHOT_ID=$(restic snapshots --json --last --path /default-mariadb | jq -r '.[0].id') 19 | 20 | # Restore MariaDB data 21 | MARIADB_POD=$(kubectl get pods -o custom-columns="NAME:.metadata.name" --no-headers -l "app=wordpress,tier=mariadb") 22 | # the environment variable should come from the pod - not from the local shell. 23 | # shellcheck disable=SC2016 24 | restic dump "${SNAPSHOT_ID}" /default-mariadb | kubectl exec -i "$MARIADB_POD" -- /bin/bash -c 'mysql -uroot --password="${MARIADB_ROOT_PASSWORD}"' 25 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/tutorial/scripts/5_schedule.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | source scripts/environment.sh 4 | 5 | # Set Minikube context 6 | kubectl config use-context minikube 7 | 8 | # Set the schedule 9 | kubectl apply -f ../schedule.yaml 10 | 11 | # Watch how the number of snapshots grow 12 | watch restic snapshots 13 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/tutorial/scripts/6_stop.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | kill $MINIO_PORT 4 | echo $MINIO_PORT 5 | minikube stop 6 | minikube delete 7 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/tutorial/scripts/environment.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | kubectl port-forward svc/minio 9000:9000 & 4 | export MINIO_PORT=$! 5 | export KUBECONFIG="" 6 | export RESTIC_REPOSITORY=s3:http://localhost:9000/backups/ 7 | export RESTIC_PASSWORD=p@ssw0rd 8 | export AWS_ACCESS_KEY_ID=minio 9 | export AWS_SECRET_ACCESS_KEY=minio123 10 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/tutorial/secrets/backup-repo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: backup-repo 5 | namespace: default 6 | type: Opaque 7 | stringData: 8 | password: p@ssw0rd 9 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/tutorial/secrets/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - minio-credentials.yaml 3 | - backup-repo.yaml 4 | - mariadb-pass.yaml 5 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/tutorial/secrets/mariadb-pass.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: mariadb-pass 5 | namespace: default 6 | type: Opaque 7 | stringData: 8 | password: ola0thai0eixieCie6Yahcooz3doojee 9 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/tutorial/secrets/minio-credentials.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: minio-credentials 5 | namespace: default 6 | type: Opaque 7 | stringData: 8 | username: minio 9 | password: minio123 10 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/tutorial/wordpress/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: wordpress 5 | labels: 6 | app: wordpress 7 | spec: 8 | selector: 9 | matchLabels: 10 | app: wordpress 11 | tier: frontend 12 | strategy: 13 | type: Recreate 14 | template: 15 | metadata: 16 | labels: 17 | app: wordpress 18 | tier: frontend 19 | spec: 20 | containers: 21 | - image: wordpress:5.4-apache 22 | name: wordpress 23 | env: 24 | - name: WORDPRESS_DB_HOST 25 | value: mariadb 26 | - name: WORDPRESS_DB_PASSWORD 27 | valueFrom: 28 | secretKeyRef: 29 | name: mariadb-pass 30 | key: password 31 | ports: 32 | - containerPort: 80 33 | name: wordpress 34 | volumeMounts: 35 | - name: wordpress-persistent-storage 36 | mountPath: /var/www/html 37 | volumes: 38 | - name: wordpress-persistent-storage 39 | persistentVolumeClaim: 40 | claimName: wordpress-pvc 41 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/tutorial/wordpress/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - deployment.yaml 3 | - pvc.yaml 4 | - service.yaml 5 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/tutorial/wordpress/pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: wordpress-pvc 6 | labels: 7 | app: wordpress 8 | annotations: 9 | k8up.io/backup: "true" 10 | spec: 11 | accessModes: 12 | - ReadWriteOnce 13 | resources: 14 | requests: 15 | storage: 10Gi 16 | --- 17 | apiVersion: v1 18 | kind: PersistentVolumeClaim 19 | metadata: 20 | name: wordpress-restore-pvc 21 | labels: 22 | app: wordpress 23 | annotations: 24 | k8up.io/backup: "true" 25 | spec: 26 | accessModes: 27 | - ReadWriteOnce 28 | resources: 29 | requests: 30 | storage: 10Gi 31 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/tutorial/wordpress/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: wordpress 5 | labels: 6 | app: wordpress 7 | spec: 8 | ports: 9 | - port: 80 10 | selector: 11 | app: wordpress 12 | tier: frontend 13 | type: LoadBalancer 14 | -------------------------------------------------------------------------------- /docs/modules/ROOT/examples/usage/k8up.txt: -------------------------------------------------------------------------------- 1 | NAME: 2 | k8up - A new cli application 3 | 4 | USAGE: 5 | k8up [global options] command [command options] [arguments...] 6 | 7 | VERSION: 8 | snapshot 9 | 10 | COMMANDS: 11 | operator 12 | restic 13 | cli 14 | help, h Shows a list of commands or help for one command 15 | 16 | GLOBAL OPTIONS: 17 | --debug, --verbose, -d sets the log level to debug (default: false) [$K8UP_DEBUG] 18 | --help, -h show help (default: false) 19 | --version, -v print the version (default: false) 20 | 21 | COPYRIGHT: 22 | (c) 2021 VSHN AG 23 | -------------------------------------------------------------------------------- /docs/modules/ROOT/pages/.vale.ini: -------------------------------------------------------------------------------- 1 | StylesPath = /styles 2 | MinAlertLevel = warning # suggestion, warning or error 3 | 4 | # Only check Asciidoc files 5 | [*.adoc] 6 | BasedOnStyles = Microsoft 7 | 8 | Microsoft.GenderBias = warning 9 | Microsoft.Contractions = warning 10 | Microsoft.Quotes = warning 11 | Microsoft.RangeFormat = warning 12 | Microsoft.Avoid = warning 13 | 14 | # Ignore auto-generated references 15 | [references/api-reference.adoc] 16 | BasedOnStyles = "" 17 | -------------------------------------------------------------------------------- /docs/modules/ROOT/pages/about/code_of_conduct.adoc: -------------------------------------------------------------------------------- 1 | 2 | = Code of Conduct 3 | 4 | Please consult the https://github.com/k8up-io/k8up/blob/master/CODE_OF_CONDUCT.md[CODE_OF_CONDUCT.md] file. 5 | -------------------------------------------------------------------------------- /docs/modules/ROOT/pages/about/community.adoc: -------------------------------------------------------------------------------- 1 | = Community 2 | 3 | Connect with the K8up community: 4 | 5 | https://github.com/k8up-io/k8up/discussions[Discussions]:: The project has GitHub discussions enabled where we discuss everything around K8up. Feel free to hop in there. 6 | https://app.slack.com/client/T08PSQ7BQ/C06GP0D5FEF[Chat]:: We use the https://slack.cncf.io/[CNCF Slack Workspace] and are reachable in the #k8up channel. 7 | https://github.com/k8up-io/k8up[GitHub]:: The source code and issue tracking is available on _GitHub_ under https://github.com/k8up-io/k8up[k8up-io/k8up]. 8 | <>:: Join us in our monthly recurring community meeting. 9 | 10 | == Monthly community meeting 11 | 12 | The K8up community hosts a monthly community meeting in Zoom: 13 | 14 | Time and Date:: 15 | Every first Monday of the month at 5:00 PM Europe/Zurich timezone. 16 | You can use https://www.timeanddate.com/worldclock/converter.html?p1=268[timeanddate.com^] to convert into your timezone. 17 | 18 | Zoom link:: 19 | https://vshn.zoom.us/j/82326347868?pwd=0ImBHdwZH98raNP78fFlVI9zci7w6w.1[Zoom^] 20 | 21 | The agenda:: 22 | We collect inputs for the agenda in a https://docs.google.com/document/d/1O687amnDyQIXReeeKkylzxWgq1BZ7tZdIQN0-x3ca5g/edit?usp=sharing[Google Docs^] document. 23 | + 24 | The general agenda is: 25 | 26 | * Ask the maintainer 27 | * Discuss roadmap 28 | 29 | Currently, we don't do any recordings of the meeting, until the community wishes to do so. 30 | 31 | == Maintainer 32 | 33 | Current maintainers are documented in the https://github.com/k8up-io/k8up/blob/master/OWNERS.md[OWNERS.md] file. 34 | 35 | == Project sponsor 36 | 37 | The project is sponsored by https://vshn.ch/[VSHN AG]. 38 | -------------------------------------------------------------------------------- /docs/modules/ROOT/pages/about/roadmap.adoc: -------------------------------------------------------------------------------- 1 | = K8up Roadmap 2 | 3 | > It's done when it's done 4 | 5 | We're tracking the roadmap on https://github.com/k8up-io/k8up/milestones[GitHub by using Milestones]. 6 | -------------------------------------------------------------------------------- /docs/modules/ROOT/pages/about/visual_design.adoc: -------------------------------------------------------------------------------- 1 | = K8up Visual Design 2 | 3 | == Logo 4 | 5 | The logos are available under https://github.com/k8up-io/k8up/tree/master/docs/modules/ROOT/assets/images[assets/images]. 6 | 7 | === Wide 8 | 9 | link:/k8up/_images/k8up-logo.png[PNG], link:/k8up/_images/k8up-logo.svg[SVG] 10 | 11 | image::k8up-logo.png[link="/k8up/_images/k8up-logo.png"] 12 | 13 | === Square 14 | 15 | link:/k8up/_images/k8up-logo-square.png[PNG], link:/k8up/_images/k8up-logo-square.svg[SVG] 16 | 17 | image::k8up-logo-square.png[link="/k8up/_images/k8up-logo-square.png"] 18 | 19 | == Colors 20 | 21 | The color palette is as follows: 22 | 23 | [cols=","] 24 | |=== 25 | 26 | |{set:cellbgcolor:#1A5CE6} 27 | |{set:cellbgcolor:#white}#1A5CE6 28 | 29 | |{set:cellbgcolor:#a08794} 30 | |{set:cellbgcolor:#white}#a08794 31 | 32 | |{set:cellbgcolor:#bb7e8c} 33 | |{set:cellbgcolor:#white}#bb7e8c 34 | 35 | |{set:cellbgcolor:#c9b6be} 36 | |{set:cellbgcolor:#white}#c9b6be 37 | 38 | |{set:cellbgcolor:#d1becf} 39 | |{set:cellbgcolor:#white}#d1becf 40 | 41 | |=== 42 | 43 | https://coolors.co/1a5ce6-a08794-bb7e8c-c9b6be-d1becf[https://coolors.co/]. 44 | -------------------------------------------------------------------------------- /docs/modules/ROOT/pages/about/vulnerability_reporting.adoc: -------------------------------------------------------------------------------- 1 | = Vulnerability Reporting 2 | 3 | To report a vulnerability, please report it on GitHub directly. 4 | 5 | You can follow the procedure described https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing/privately-reporting-a-security-vulnerability[here]: 6 | 7 | . Navigate to the https://github.com/k8up-io/k8up/security[security tab] on the repository 8 | . Click on 'Advisories' 9 | . Click on 'Report a vulnerability' 10 | . Detail the issue 11 | 12 | The reporter(s) can typically expect a response within 24 hours acknowledging the issue was received. 13 | 14 | If a response is not received within 24 hours, please reach out to any https://github.com/orgs/k8up-io/teams/maintainer/members[maintainer] directly to confirm receipt of the issue. -------------------------------------------------------------------------------- /docs/modules/ROOT/pages/explanations/missing-docs.adoc: -------------------------------------------------------------------------------- 1 | = Missing Documentation 2 | 3 | For the following keywords documentation is missing and will be added over time. 4 | This should give you an idea what is included, but currently not documented. 5 | 6 | * Describe prune and check jobs and their implications (for example locking) 7 | * Webhooks with backup information 8 | * Prometheus Metrics of Operator 9 | * Prometheus Pushgateway metrics of `k8up restic` 10 | * K8up HA (leader election) 11 | * Index of possible annotations 12 | * More detailed description of backupcommand annotation 13 | * Tag support https://github.com/k8up-io/k8up/pull/94 14 | * document how/that the operator picks up running jobs on restart 15 | -------------------------------------------------------------------------------- /docs/modules/ROOT/pages/explanations/rwo.adoc: -------------------------------------------------------------------------------- 1 | = How RWO Backups are implemented 2 | 3 | K8s does not prevent mounting a RWO PVC to multiple pods, if they are scheduled on the same host. 4 | K8up uses this fact to provide the ability to back up RWO PVCs. 5 | 6 | For a given backup in a namespace K8up will list all the PVCs. 7 | The PVCs are then grouped depending on their type: 8 | 9 | * all RWX PVCs are grouped together 10 | * RWO PVCs are grouped by k8s node where they are currently mounted 11 | 12 | K8up will then deploy backup jobs according to the grouping, a single job for all RWX PVCs and a job for each K8s node. 13 | The jobs themselves work as before, they loop over the mounted PVCs and do a file backup via restic. 14 | -------------------------------------------------------------------------------- /docs/modules/ROOT/pages/explanations/system-requirements.adoc: -------------------------------------------------------------------------------- 1 | = System Requirements 2 | 3 | == Supported Kubernetes Versions 4 | 5 | K8up (v2 or later) officially only supports recent stable Kubernetes versions. 6 | 7 | K8up v1 (not maintained anymore) supports legacy Kubernetes clusters such as OpenShift `3.11` (Kubernetes 1.11). 8 | 9 | See the {page-origin-url}[Repository,window=_blank] which upstream Kubernetes version is supported. 10 | 11 | == Supported CPU architectures 12 | 13 | K8up is only built, tested and supported on Linux. 14 | The only supported CPU architecture is currently _x64_ (otherwise known as _AMD64_ and _x86-64_). 15 | 16 | In v2.x, we added Docker images for the _AArch64_ CPU architecture (otherwise known as _arm64_). 17 | These builds are currently provided on a best-effort basis without further testing or support. 18 | -------------------------------------------------------------------------------- /docs/modules/ROOT/pages/explanations/what-has-changed-in-v1.adoc: -------------------------------------------------------------------------------- 1 | = Changes in K8up v1.0 2 | 3 | TIP: See xref:how-tos/upgrade.adoc#upgrade_0_to_1[the upgrade instructions] for detailed instructions about how to upgrade from version `0.x` to `1.x`. 4 | 5 | https://github.com/k8up-io/k8up/releases/tag/v1.0.0[K8up v1.0] is a big milestone for K8up. 6 | Although K8up `1.x` itself is backwards compatible with `0.x` resources, the installation method has changed greatly. 7 | 8 | K8up sprang to life before Operators "exploded" in the Kubernetes ecosystem. 9 | There weren't many frameworks available then. 10 | It was initially built with https://github.com/spotahome/kooper[Kooper], but for v1.0 it was rewritten and migrated the Operator to https://github.com/operator-framework/operator-sdk[Operator SDK] and https://github.com/kubernetes-sigs/kubebuilder[Kubebuilder]. 11 | 12 | The resources should stay the same and are backwards compatible. 13 | But the CRDs feature new properties and thus should be upgraded. 14 | 15 | Additionally, K8up comes with a new CRD `EffectiveSchedule`. 16 | This resource is needed for the xref:references/schedule-specification.adoc[K8up specific schedules]. 17 | Related to this, a new environment variable `BACKUP_OPERATOR_NAMESPACE` is required. 18 | However this defaults to the installation namespace when using Kustomize or Helm. 19 | -------------------------------------------------------------------------------- /docs/modules/ROOT/pages/explanations/what-has-changed-in-v2.adoc: -------------------------------------------------------------------------------- 1 | = Changes in K8up v2.0 2 | 3 | TIP: See xref:how-tos/upgrade.adoc#upgrade_1_to_2[the upgrade instructions] for detailed instructions about how to upgrade from version `0.x` to `1.x`. 4 | 5 | https://github.com/k8up-io/k8up/releases/tag/v2.0.0[K8up v2.0] is another milestone for K8up. 6 | Because of the move to its own GitHub organization, some resources have been renamed. 7 | This makes K8up `2.x` incompatible with previous resources. 8 | 9 | Previously, the code that invoked the `restic` binary was in its own project, which was called `wrestic`. 10 | Over time, we realised that the two components are tightly coupled. 11 | For users, it was not clear, whether a bug was in the _wrestic_ code or in the _k8up_ code. 12 | Neither was it clear to us, all the time. 13 | Therefore, we decided that it would be best to merge the two repositories into one. 14 | 15 | We expect this to have additional benefits. 16 | For one, there is only one Docker image going forward, `ghcr.io/k8up-io/k8up` (or `quay.io/k8up-io/k8up`, respectively). 17 | It contains all the code for both parts, the operator and the restic adapter, and they can be released together as one. 18 | Because often the release of one component required a release of the other component anyway. 19 | 20 | We also believe that the merge made K8up easier to comprehend. 21 | We hope that in the future both parts can also share more code. 22 | For now, their CLI interfaces have been aligned, which already helps during development. 23 | 24 | We have big plans for future versions of K8up, but they all require a solid foundation. 25 | We believe that K8up v2 is that solid foundation. 26 | 27 | Going forward, K8up v2 drops support for very old Kubernetes versions, for example OpenShift 3.11. 28 | If you are using Prometheus for alerting, note that metrics names changed their prefix from `baas` to `k8up`. 29 | -------------------------------------------------------------------------------- /docs/modules/ROOT/pages/how-tos/generic-env-vars.adoc: -------------------------------------------------------------------------------- 1 | = Generic Restic Environment Variables 2 | 3 | Most of the supported backup backends allow you to specify authentication details as secret values. 4 | 5 | One such example is the S3 backend spec; it has `accessKeyIDSecretRef` and `secretAccessKeySecretRef` fields. 6 | 7 | It is however sometimes useful to pass additional environment variables to the container that runs the backup. 8 | This can be achieved with the help of the `envFrom` field that references a config map or a secret at the backend level. 9 | 10 | Please be aware that you could potentially add conflicting or duplicate environment variables. 11 | 12 | Check the api reference for the xref:references/api-reference.adoc#{anchor_prefix}-github-com-k8up-io-k8up-api-v1-backend[Backend]. 13 | 14 | You can read link:https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#configure-all-key-value-pairs-in-a-configmap-as-container-environment-variables[the kubernetes documentation] for more information on how to use `envFrom`. 15 | 16 | == Open Stack Swift Authentication Example 17 | 18 | The following example shows you how to configure Open Stack Swift authentication using a Kubernetes secret and `envFrom` configuration for your backend. 19 | 20 | You can read more on link:https://wiki.openstack.org/wiki/OpenStackClient/Authentication[Open Stack Client Authentication]. 21 | 22 | [source,yaml] 23 | ---- 24 | include::example$open-stack-swift-auth.yaml[] 25 | ---- 26 | -------------------------------------------------------------------------------- /docs/modules/ROOT/pages/how-tos/installation.adoc: -------------------------------------------------------------------------------- 1 | = How to Install K8up 2 | 3 | == Helm 4 | 5 | The most convenient way to install K8up on your Kubernetes cluster is by using https://helm.sh/[helm]. 6 | 7 | Please refer to the separate installation instructions in the https://github.com/k8up-io/k8up/tree/master/charts/k8up[Helm chart]. 8 | 9 | == Command-Line Tool (CLI) 10 | 11 | The command-line tool can be downloaded from the https://github.com/k8up-io/k8up/releases["Releases" page on GitHub], and installed in your `$PATH`. 12 | 13 | After installation, run the `k8up --version` command to make sure it is properly installed. 14 | 15 | == Samples 16 | 17 | You can find some examples of use of K8up in the `config/samples/` folder of the K8up repository: 18 | 19 | [source,bash] 20 | ---- 21 | kubectl apply -k config/samples/ 22 | ---- 23 | 24 | Please be aware that these manifests are intended for testing, development, and as examples. 25 | -------------------------------------------------------------------------------- /docs/modules/ROOT/pages/how-tos/optimize-schedules.adoc: -------------------------------------------------------------------------------- 1 | = How to Optimize schedules across the cluster 2 | 3 | K8up features advanced scheduling mechanisms that allow you to optimize backup and other schedules. 4 | 5 | When you start having hundreds of backup, check, prune or archive schedules it can become a bad idea to run them all at the same time. 6 | However, manually trying to balance the schedules to different times is no solution either. 7 | 8 | Enter smart schedules. 9 | In addition to the standard Cron syntax (for example `* */12 * * *`) K8up features stable randomization of schedules. 10 | Enter a special `-random`-suffixed schedule to your spec in order to let K8up generate a schedule for you. 11 | 12 | A schedule of `@weekly-random` generates an effective schedule like `52 4 * * 4`. 13 | 14 | TIP: The full specification of the schedules can be found in the xref:references/schedule-specification.adoc[Schedule Specifications] 15 | 16 | The generated schedules are randomized with a stable seed based on the namespace and name of the schedule object. 17 | That means that every schedule object will have different schedules, but deleting and recreating the same schedule will have the same effective schedules. 18 | This behaviour allows portability between clusters. 19 | -------------------------------------------------------------------------------- /docs/modules/ROOT/pages/references/annotations.adoc: -------------------------------------------------------------------------------- 1 | = Annotations 2 | 3 | Users can influence some aspects of K8up by defining annotations on certain resources. 4 | 5 | [NOTE] 6 | ==== 7 | These annotations can have different names in your cluster, depending on how K8up is configured. 8 | See xref:references/operator-config-reference.adoc[Operator Configuration reference] 9 | ==== 10 | 11 | |=== 12 | |Annotation |Description |Accepted Values |Applicable Resources |xref:references/operator-config-reference.adoc[Configuration Option] 13 | 14 | |`k8up.io/backup` 15 | |If defined, this influences whether this Pod/PVC shall be backed up (`true`) or not (`false`). If omitted, K8up will default to `true`, unless `$BACKUP_SKIP_WITHOUT_ANNOTATION` is set to `true`. 16 | |Either `'true'` or `'false'` 17 | |`Pod`, `PersistentVolumeClaim` 18 | |`BACKUP_ANNOTATION` 19 | 20 | |`k8up.io/backupcommand` 21 | |If defined, this command is invoked in the context of this `Pod` on the beginning of a backup. 22 | |A string that represents a command (and its arguments) to execute, for example `mysqldump -uroot -psecure --all-databases`. 23 | See xref:how-tos/application-aware-backups.adoc[Application Aware Backups] for more information and an example. 24 | |`Pod` 25 | |`BACKUP_BACKUPCOMMANDANNOTATION` 26 | 27 | |`k8up.io/file-extension` 28 | |The output of the `k8up.syn.tool/backupcommand` annotation is written to a file in order for it to be backed up. 29 | This annotation defines the file extension of that string. 30 | |A string which is valid file-extension on the source system, for example `.sql`. 31 | |`Pod` 32 | |`BACKUP_FILEEXTENSIONANNOTATION` 33 | 34 | |`k8up.io/backupcommand-container` 35 | |Specify in which container inside pod backup should be done 36 | |A string which is valid pod name. 37 | |`Pod` 38 | |`BACKUP_CONTAINERANNOTATION` 39 | |=== 40 | -------------------------------------------------------------------------------- /docs/modules/ROOT/pages/references/operator-config-reference.adoc: -------------------------------------------------------------------------------- 1 | = Operator Configuration Reference 2 | 3 | The `k8up operator` can be configured in two ways: 4 | 5 | . Per namespace backups. Optimal for shared clusters. 6 | . Global settings with namespaced schedules. Optimal for private clusters. 7 | 8 | == Arguments and Environment Variables 9 | 10 | You need to define `BACKUP_OPERATOR_NAMESPACE` (or `--operator-namespace` respectively), but everything else can be left to their default values. 11 | 12 | See `k8up operator --help` for all the options: 13 | 14 | [source,txt] 15 | ---- 16 | include::example$usage/operator.txt[] 17 | ---- 18 | 19 | == Global Settings 20 | 21 | Each variable starting with `BACKUP_GLOBAL*` can be used to declare a global default for all namespaces. 22 | For example, if you configure the S3 bucket and credentials here, you won’t have to specify them in the Schedule or Backup resource definitions. 23 | 24 | NOTE: It is always possible to overwrite the global settings. Simply declare the specific setting in the relevant resource definition and it will be applied instead of the global default. 25 | -------------------------------------------------------------------------------- /docs/modules/ROOT/pages/references/restic-config-reference.adoc: -------------------------------------------------------------------------------- 1 | = Restic Configuration Reference 2 | 3 | For users, `k8up restic` can only be configured through the respective xref:references/object-specifications.adoc[Custom Resources `Schedule`, `Backup`, `Check`, `Prune`, `Archive` and `Restore`] and through xref:references/operator-config-reference.adoc[the _global_ properties of the `k8up operator`]. 4 | 5 | [NOTE] 6 | The `k8up restic` module used to be a component called _wrestic_. 7 | Their repositories were merged in 2021. 8 | 9 | == Arguments and Environment Variables 10 | 11 | The required arguments depend on the actions that should be performed. 12 | 13 | * The environment variable `RESTIC_PASSWORD` must always be defined. 14 | It is not used by `k8up restic` directly, but it is used when the actual `restic` binary is invoked. 15 | * The argument `--resticRepository` must always be defined. 16 | * If `--prune` is set, then all the `--keepWithin*` arguments need to be valid and positive durations, https://pkg.go.dev/time#ParseDuration[see the respective Go documentation]. 17 | * If `--prune` is set, then all the other `--keep*` arguments (except `--keepTags`) need to be positive numbers (integers). 18 | * If `--restore` is set, then `--restoreType` must be defined as well. 19 | * If `--restore` is set and `--restoreType` is set to `s3`, then all the `--restoreS3*` arguments have to be defined as well. 20 | 21 | See `k8up restic --help` for all the options: 22 | 23 | [source,txt] 24 | ---- 25 | include::example$usage/restic.txt[] 26 | ---- 27 | -------------------------------------------------------------------------------- /docs/modules/ROOT/pages/tutorials/presentations.adoc: -------------------------------------------------------------------------------- 1 | = K8up Presentations 2 | 3 | The slides below were presented at the https://www.meetup.com/cloud-native-computing-switzerland/events/285960277/[Cloud Native Computing Meetup], on Thursday February 23rd, 2023. 4 | 5 | Click on them and use the arrow keys to navigate from left to right. 6 | 7 | ++++ 8 | 9 | ++++ 10 | -------------------------------------------------------------------------------- /e2e/.gitignore: -------------------------------------------------------------------------------- 1 | report.xml 2 | debug/ 3 | node_modules/ 4 | -------------------------------------------------------------------------------- /e2e/Makefile: -------------------------------------------------------------------------------- 1 | clean_targets += e2e-clean 2 | 3 | uname_s := $(shell uname -s) 4 | ifeq ($(uname_s),Linux) 5 | xargs := xargs --no-run-if-empty 6 | else 7 | xargs := xargs 8 | endif 9 | 10 | bats := node_modules/bats/bin/bats 11 | bats_args ?= 12 | 13 | .PHONY: e2e-test 14 | e2e-test: export KUBECONFIG = $(KIND_KUBECONFIG) 15 | e2e-test: export E2E_IMAGE = $(K8UP_E2E_IMG) 16 | e2e-test: export IMG_REGISTRY = $(E2E_REGISTRY) 17 | e2e-test: export IMG_TAG = $(E2E_TAG) 18 | e2e-test: export IMG_REPO = $(E2E_REPO) 19 | e2e-test: e2e-setup install kind-load-image ## Run the E2E tests 20 | @cd ./e2e && $(bats) $(bats_args) $(BATS_FILES) 21 | 22 | .PHONY: kind-load-image 23 | kind-load-image: kind-setup docker-build ## Load the e2e container image onto e2e cluster 24 | @$(KIND) load docker-image --name $(KIND_CLUSTER) $(K8UP_E2E_IMG) 25 | 26 | .PHONY: e2e-setup 27 | e2e-setup: export KUBECONFIG = $(KIND_KUBECONFIG) 28 | e2e-setup: chart-prepare 29 | e2e-setup: e2e-cmctl 30 | e2e-setup: e2e/node_modules kind-setup | $(e2etest_dir) ## Run the e2e setup 31 | 32 | .PHONY: clean 33 | e2e-clean: kind-clean ## Remove all e2e-related resources (incl. all e2e Docker images) 34 | docker images --filter "reference=$(E2E_REGISTRY)/$(E2E_REPO)" --format "{{.Repository }}:{{ .Tag }}" | $(xargs) docker rmi || true 35 | rm -rf e2e/debug e2e/node_modules $(e2etest_dir) 36 | 37 | ### 38 | ### Artifacts 39 | ### 40 | 41 | $(e2etest_dir): 42 | mkdir -p $(e2etest_dir) 43 | 44 | e2e/node_modules: 45 | @npm --prefix ./e2e install 46 | 47 | e2e-cmctl: 48 | @command -v cmctl > /dev/null || $(GO_EXEC) install github.com/cert-manager/cmctl/v2@latest 49 | -------------------------------------------------------------------------------- /e2e/definitions/annotated-subject/deployment-error.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: annotated-subject-deployment 6 | namespace: k8up-e2e-subject 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: subject 12 | template: 13 | metadata: 14 | labels: 15 | app: subject 16 | annotations: 17 | k8up.io/backupcommand: 'invalid' 18 | k8up.io/backupcommand-container: subject-container 19 | spec: 20 | containers: 21 | - image: busybox 22 | imagePullPolicy: IfNotPresent 23 | name: dummy-container-blocking-first-position 24 | command: 25 | - "/bin/sh" 26 | - "-c" 27 | - "sleep infinity" 28 | - name: subject-container 29 | image: quay.io/prometheus/busybox:latest 30 | imagePullPolicy: IfNotPresent 31 | args: 32 | - sh 33 | - -c 34 | - | 35 | sleep infinity 36 | securityContext: 37 | runAsUser: $ID 38 | volumeMounts: 39 | - name: volume 40 | mountPath: /data 41 | env: 42 | - name: BACKUP_FILE_CONTENT 43 | value: "" 44 | - name: BACKUP_FILE_NAME 45 | value: "" 46 | volumes: 47 | - name: volume 48 | persistentVolumeClaim: 49 | claimName: subject-pvc 50 | -------------------------------------------------------------------------------- /e2e/definitions/annotated-subject/deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: annotated-subject-deployment 6 | namespace: k8up-e2e-subject 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: subject 12 | template: 13 | metadata: 14 | labels: 15 | app: subject 16 | annotations: 17 | k8up.io/backupcommand: '/tmp/test.sh' 18 | k8up.io/file-extension: '.txt' 19 | k8up.io/backupcommand-container: subject-container 20 | spec: 21 | containers: 22 | - image: busybox 23 | imagePullPolicy: IfNotPresent 24 | name: dummy-container-blocking-first-position 25 | command: 26 | - "/bin/sh" 27 | - "-c" 28 | - "sleep infinity" 29 | - name: subject-container 30 | image: quay.io/prometheus/busybox:latest 31 | imagePullPolicy: IfNotPresent 32 | args: 33 | - sh 34 | - -c 35 | - | 36 | printf "$BACKUP_FILE_CONTENT" | tee "/data/$BACKUP_FILE_NAME" && \ 37 | printf '#!/bin/sh\nsleep 30s\necho %s\n' "$BACKUP_FILE_CONTENT" | tee /tmp/test.sh && chmod a+x /tmp/test.sh && \ 38 | echo && \ 39 | ls -la /data && \ 40 | echo "test file /data/$BACKUP_FILE_NAME written, sleeping now" && \ 41 | sleep infinity 42 | securityContext: 43 | runAsUser: $ID 44 | volumeMounts: 45 | - name: volume 46 | mountPath: /data 47 | env: 48 | - name: BACKUP_FILE_CONTENT 49 | value: "" 50 | - name: BACKUP_FILE_NAME 51 | value: "" 52 | volumes: 53 | - name: volume 54 | persistentVolumeClaim: 55 | claimName: subject-pvc 56 | -------------------------------------------------------------------------------- /e2e/definitions/annotated-subject/pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: subject-pod 5 | namespace: k8up-e2e-subject 6 | annotations: 7 | k8up.io/backupcommand: '/tmp/test.sh' 8 | k8up.io/file-extension: '.txt' 9 | k8up.io/backupcommand-container: subject-container 10 | spec: 11 | containers: 12 | - image: busybox 13 | imagePullPolicy: IfNotPresent 14 | name: dummy-container-blocking-first-position 15 | command: 16 | - "/bin/sh" 17 | - "-c" 18 | - "sleep infinity" 19 | - name: subject-container 20 | image: quay.io/prometheus/busybox:latest 21 | imagePullPolicy: IfNotPresent 22 | args: 23 | - sh 24 | - -c 25 | - | 26 | printf '#!/bin/sh\nsleep 30s\necho %s\n' "$BACKUP_FILE_CONTENT" | tee /tmp/test.sh && chmod a+x /tmp/test.sh && \ 27 | echo && \ 28 | echo "sleeping now" && \ 29 | sleep infinity 30 | securityContext: 31 | runAsUser: $ID 32 | env: 33 | - name: BACKUP_FILE_CONTENT 34 | value: "" 35 | - name: BACKUP_FILE_NAME 36 | value: "" 37 | -------------------------------------------------------------------------------- /e2e/definitions/archive/config-mtls-env.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: k8up-s3-mtls-archive-mtls-env 5 | namespace: k8up-e2e-subject 6 | data: 7 | CA_CERT_FILE: /mnt/tls/ca.crt 8 | CLIENT_CERT_FILE: /mnt/tls/tls.crt 9 | CLIENT_KEY_FILE: /mnt/tls/tls.key 10 | RESTORE_CA_CERT_FILE: /mnt/tls/ca.crt 11 | RESTORE_CLIENT_CERT_FILE: /mnt/tls/tls.crt 12 | RESTORE_CLIENT_KEY_FILE: /mnt/tls/tls.key -------------------------------------------------------------------------------- /e2e/definitions/archive/s3-mtls-archive-mtls-env.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Archive 3 | metadata: 4 | name: k8up-s3-mtls-archive-mtls-env 5 | namespace: k8up-e2e-subject 6 | spec: 7 | failedJobsHistoryLimit: 1 8 | successfulJobsHistoryLimit: 1 9 | restoreMethod: 10 | s3: 11 | endpoint: https://minio-mtls.minio-e2e.svc.cluster.local 12 | bucket: archive 13 | accessKeyIDSecretRef: 14 | name: backup-credentials 15 | key: username 16 | secretAccessKeySecretRef: 17 | name: backup-credentials 18 | key: password 19 | backend: 20 | repoPasswordSecretRef: 21 | name: backup-repo 22 | key: password 23 | envFrom: 24 | - configMapRef: 25 | name: k8up-s3-mtls-archive-mtls-env 26 | s3: 27 | endpoint: https://minio-mtls.minio-e2e.svc.cluster.local 28 | bucket: backup 29 | accessKeyIDSecretRef: 30 | name: backup-credentials 31 | key: username 32 | secretAccessKeySecretRef: 33 | name: backup-credentials 34 | key: password 35 | volumeMounts: 36 | - name: minio-client-mtls 37 | mountPath: /mnt/tls/ 38 | podSecurityContext: 39 | fsGroup: $ID 40 | runAsUser: $ID 41 | volumes: 42 | - name: minio-client-mtls 43 | secret: 44 | secretName: minio-client-mtls 45 | defaultMode: 420 46 | -------------------------------------------------------------------------------- /e2e/definitions/archive/s3-mtls-archive-mtls.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Archive 3 | metadata: 4 | name: k8up-s3-mtls-archive-mtls 5 | namespace: k8up-e2e-subject 6 | spec: 7 | failedJobsHistoryLimit: 1 8 | successfulJobsHistoryLimit: 1 9 | restoreMethod: 10 | tlsOptions: 11 | caCert: /mnt/tls/ca.crt 12 | clientCert: /mnt/tls/tls.crt 13 | clientKey: /mnt/tls/tls.key 14 | s3: 15 | endpoint: https://minio-mtls.minio-e2e.svc.cluster.local 16 | bucket: archive 17 | accessKeyIDSecretRef: 18 | name: backup-credentials 19 | key: username 20 | secretAccessKeySecretRef: 21 | name: backup-credentials 22 | key: password 23 | backend: 24 | repoPasswordSecretRef: 25 | name: backup-repo 26 | key: password 27 | tlsOptions: 28 | caCert: /mnt/tls/ca.crt 29 | clientCert: /mnt/tls/tls.crt 30 | clientKey: /mnt/tls/tls.key 31 | s3: 32 | endpoint: https://minio-mtls.minio-e2e.svc.cluster.local 33 | bucket: backup 34 | accessKeyIDSecretRef: 35 | name: backup-credentials 36 | key: username 37 | secretAccessKeySecretRef: 38 | name: backup-credentials 39 | key: password 40 | volumeMounts: 41 | - name: minio-client-mtls 42 | mountPath: /mnt/tls/ 43 | podSecurityContext: 44 | fsGroup: $ID 45 | runAsUser: $ID 46 | volumes: 47 | - name: minio-client-mtls 48 | secret: 49 | secretName: minio-client-mtls 50 | defaultMode: 420 51 | -------------------------------------------------------------------------------- /e2e/definitions/archive/s3-mtls-archive-tls.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Archive 3 | metadata: 4 | name: k8up-s3-mtls-archive-tls 5 | namespace: k8up-e2e-subject 6 | spec: 7 | failedJobsHistoryLimit: 1 8 | successfulJobsHistoryLimit: 1 9 | restoreMethod: 10 | tlsOptions: 11 | caCert: /mnt/tls/ca.crt 12 | clientCert: /mnt/tls/tls.crt 13 | clientKey: /mnt/tls/tls.key 14 | s3: 15 | endpoint: https://minio-mtls.minio-e2e.svc.cluster.local 16 | bucket: archive 17 | accessKeyIDSecretRef: 18 | name: backup-credentials 19 | key: username 20 | secretAccessKeySecretRef: 21 | name: backup-credentials 22 | key: password 23 | volumeMounts: 24 | - name: minio-client-mtls 25 | mountPath: /mnt/tls/ 26 | backend: 27 | repoPasswordSecretRef: 28 | name: backup-repo 29 | key: password 30 | tlsOptions: 31 | caCert: /mnt/ca/ca.crt 32 | s3: 33 | endpoint: https://minio-tls.minio-e2e.svc.cluster.local 34 | bucket: backup 35 | accessKeyIDSecretRef: 36 | name: backup-credentials 37 | key: username 38 | secretAccessKeySecretRef: 39 | name: backup-credentials 40 | key: password 41 | volumeMounts: 42 | - name: minio-ca-tls 43 | mountPath: /mnt/ca/ 44 | podSecurityContext: 45 | fsGroup: $ID 46 | runAsUser: $ID 47 | volumes: 48 | - name: minio-ca-tls 49 | secret: 50 | secretName: minio-ca-tls 51 | defaultMode: 420 52 | - name: minio-client-mtls 53 | secret: 54 | secretName: minio-client-mtls 55 | defaultMode: 420 56 | -------------------------------------------------------------------------------- /e2e/definitions/archive/s3-tls-archive-mtls.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Archive 3 | metadata: 4 | name: k8up-s3-tls-archive-mtls 5 | namespace: k8up-e2e-subject 6 | spec: 7 | failedJobsHistoryLimit: 1 8 | successfulJobsHistoryLimit: 1 9 | restoreMethod: 10 | tlsOptions: 11 | caCert: /mnt/ca/ca.crt 12 | s3: 13 | endpoint: https://minio-tls.minio-e2e.svc.cluster.local 14 | bucket: archive 15 | accessKeyIDSecretRef: 16 | name: backup-credentials 17 | key: username 18 | secretAccessKeySecretRef: 19 | name: backup-credentials 20 | key: password 21 | volumeMounts: 22 | - name: minio-ca-tls 23 | mountPath: /mnt/ca/ 24 | backend: 25 | repoPasswordSecretRef: 26 | name: backup-repo 27 | key: password 28 | tlsOptions: 29 | caCert: /mnt/tls/ca.crt 30 | clientCert: /mnt/tls/tls.crt 31 | clientKey: /mnt/tls/tls.key 32 | s3: 33 | endpoint: https://minio-mtls.minio-e2e.svc.cluster.local 34 | bucket: backup 35 | accessKeyIDSecretRef: 36 | name: backup-credentials 37 | key: username 38 | secretAccessKeySecretRef: 39 | name: backup-credentials 40 | key: password 41 | volumeMounts: 42 | - name: minio-client-mtls 43 | mountPath: /mnt/tls/ 44 | podSecurityContext: 45 | fsGroup: $ID 46 | runAsUser: $ID 47 | volumes: 48 | - name: minio-ca-tls 49 | secret: 50 | secretName: minio-ca-tls 51 | defaultMode: 420 52 | - name: minio-client-mtls 53 | secret: 54 | secretName: minio-client-mtls 55 | defaultMode: 420 56 | -------------------------------------------------------------------------------- /e2e/definitions/archive/s3-tls-archive-tls.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Archive 3 | metadata: 4 | name: k8up-s3-tls-archive-tls 5 | namespace: k8up-e2e-subject 6 | spec: 7 | failedJobsHistoryLimit: 1 8 | successfulJobsHistoryLimit: 1 9 | restoreMethod: 10 | tlsOptions: 11 | caCert: /mnt/ca/ca.crt 12 | s3: 13 | endpoint: https://minio-tls.minio-e2e.svc.cluster.local 14 | bucket: archive 15 | accessKeyIDSecretRef: 16 | name: backup-credentials 17 | key: username 18 | secretAccessKeySecretRef: 19 | name: backup-credentials 20 | key: password 21 | backend: 22 | repoPasswordSecretRef: 23 | name: backup-repo 24 | key: password 25 | tlsOptions: 26 | caCert: /mnt/ca/ca.crt 27 | s3: 28 | endpoint: https://minio-tls.minio-e2e.svc.cluster.local 29 | bucket: backup 30 | accessKeyIDSecretRef: 31 | name: backup-credentials 32 | key: username 33 | secretAccessKeySecretRef: 34 | name: backup-credentials 35 | key: password 36 | volumeMounts: 37 | - name: minio-ca-tls 38 | mountPath: /mnt/ca/ 39 | podSecurityContext: 40 | fsGroup: $ID 41 | runAsUser: $ID 42 | volumes: 43 | - name: minio-ca-tls 44 | secret: 45 | secretName: minio-ca-tls 46 | defaultMode: 420 47 | -------------------------------------------------------------------------------- /e2e/definitions/backup/backup-mtls-env.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Backup 3 | metadata: 4 | name: k8up-backup-mtls-env 5 | namespace: k8up-e2e-subject 6 | spec: 7 | failedJobsHistoryLimit: 1 8 | successfulJobsHistoryLimit: 1 9 | backend: 10 | repoPasswordSecretRef: 11 | name: backup-repo 12 | key: password 13 | envFrom: 14 | - configMapRef: 15 | name: k8up-backup-mtls-env 16 | s3: 17 | endpoint: https://minio-mtls.minio-e2e.svc.cluster.local 18 | bucket: backup 19 | accessKeyIDSecretRef: 20 | name: backup-credentials 21 | key: username 22 | secretAccessKeySecretRef: 23 | name: backup-credentials 24 | key: password 25 | volumeMounts: 26 | - name: minio-client-mtls 27 | mountPath: /mnt/tls/ 28 | podSecurityContext: 29 | fsGroup: $ID 30 | runAsUser: $ID 31 | volumes: 32 | - name: minio-client-mtls 33 | secret: 34 | secretName: minio-client-mtls 35 | defaultMode: 420 36 | -------------------------------------------------------------------------------- /e2e/definitions/backup/backup-mtls.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Backup 3 | metadata: 4 | name: k8up-backup-mtls 5 | namespace: k8up-e2e-subject 6 | spec: 7 | failedJobsHistoryLimit: 1 8 | successfulJobsHistoryLimit: 1 9 | backend: 10 | repoPasswordSecretRef: 11 | name: backup-repo 12 | key: password 13 | tlsOptions: 14 | caCert: /mnt/tls/ca.crt 15 | clientCert: /mnt/tls/tls.crt 16 | clientKey: /mnt/tls/tls.key 17 | s3: 18 | endpoint: https://minio-mtls.minio-e2e.svc.cluster.local 19 | bucket: backup 20 | accessKeyIDSecretRef: 21 | name: backup-credentials 22 | key: username 23 | secretAccessKeySecretRef: 24 | name: backup-credentials 25 | key: password 26 | volumeMounts: 27 | - name: minio-client-mtls 28 | mountPath: /mnt/tls/ 29 | podSecurityContext: 30 | fsGroup: $ID 31 | runAsUser: $ID 32 | volumes: 33 | - name: minio-client-mtls 34 | secret: 35 | secretName: minio-client-mtls 36 | defaultMode: 420 37 | -------------------------------------------------------------------------------- /e2e/definitions/backup/backup-selectors.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k8up.io/v1 3 | kind: Backup 4 | metadata: 5 | name: k8up-backup-selectors 6 | namespace: k8up-e2e-subject 7 | spec: 8 | labelSelectors: 9 | - matchExpressions: 10 | - key: exists 11 | operator: Exists 12 | - matchExpressions: 13 | - key: specific-values 14 | operator: In 15 | values: 16 | - specific-value-1 17 | - specific-value-2 18 | failedJobsHistoryLimit: 1 19 | successfulJobsHistoryLimit: 1 20 | backend: 21 | repoPasswordSecretRef: 22 | name: backup-repo 23 | key: password 24 | s3: 25 | endpoint: http://minio.minio-e2e.svc.cluster.local:9000 26 | bucket: backup 27 | accessKeyIDSecretRef: 28 | name: backup-credentials 29 | key: username 30 | secretAccessKeySecretRef: 31 | name: backup-credentials 32 | key: password 33 | podSecurityContext: 34 | runAsUser: $ID 35 | -------------------------------------------------------------------------------- /e2e/definitions/backup/backup-tls.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Backup 3 | metadata: 4 | name: k8up-backup-tls 5 | namespace: k8up-e2e-subject 6 | spec: 7 | failedJobsHistoryLimit: 1 8 | successfulJobsHistoryLimit: 1 9 | backend: 10 | repoPasswordSecretRef: 11 | name: backup-repo 12 | key: password 13 | tlsOptions: 14 | caCert: /mnt/ca/ca.crt 15 | s3: 16 | endpoint: https://minio-tls.minio-e2e.svc.cluster.local 17 | bucket: backup 18 | accessKeyIDSecretRef: 19 | name: backup-credentials 20 | key: username 21 | secretAccessKeySecretRef: 22 | name: backup-credentials 23 | key: password 24 | volumeMounts: 25 | - name: minio-ca-tls 26 | mountPath: /mnt/ca/ 27 | podSecurityContext: 28 | fsGroup: $ID 29 | runAsUser: $ID 30 | volumes: 31 | - name: minio-ca-tls 32 | secret: 33 | secretName: minio-ca-tls 34 | defaultMode: 420 35 | -------------------------------------------------------------------------------- /e2e/definitions/backup/backup.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Backup 3 | metadata: 4 | name: k8up-backup 5 | namespace: k8up-e2e-subject 6 | labels: 7 | e2e: 'true' 8 | spec: 9 | failedJobsHistoryLimit: 1 10 | successfulJobsHistoryLimit: 1 11 | backend: 12 | repoPasswordSecretRef: 13 | name: backup-repo 14 | key: password 15 | s3: 16 | endpoint: http://minio.minio-e2e.svc.cluster.local:9000 17 | bucket: backup 18 | accessKeyIDSecretRef: 19 | name: backup-credentials 20 | key: username 21 | secretAccessKeySecretRef: 22 | name: backup-credentials 23 | key: password 24 | podSecurityContext: 25 | runAsUser: $ID 26 | -------------------------------------------------------------------------------- /e2e/definitions/backup/config-mtls-env.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: k8up-backup-mtls-env 5 | namespace: k8up-e2e-subject 6 | data: 7 | CA_CERT_FILE: /mnt/tls/ca.crt 8 | CLIENT_CERT_FILE: /mnt/tls/tls.crt 9 | CLIENT_KEY_FILE: /mnt/tls/tls.key -------------------------------------------------------------------------------- /e2e/definitions/backup/podconfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: PodConfig 3 | metadata: 4 | name: podconfig 5 | namespace: k8up-e2e-subject 6 | annotations: 7 | test: test 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: foo # Should not be in the final container 13 | command: # Should not be in the final container 14 | - more 15 | - foo 16 | env: 17 | - name: FOO 18 | value: bar 19 | securityContext: 20 | allowPrivilegeEscalation: true 21 | -------------------------------------------------------------------------------- /e2e/definitions/cert/issure.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Issuer 3 | metadata: 4 | name: selfsigned-issuer 5 | namespace: minio-e2e 6 | spec: 7 | selfSigned: { } 8 | -------------------------------------------------------------------------------- /e2e/definitions/cert/minio-ca.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: minio-root-ca 5 | namespace: minio-e2e 6 | spec: 7 | isCA: true 8 | commonName: minio-root-ca 9 | subject: 10 | organizations: 11 | - Minio 12 | secretName: minio-root-ca 13 | duration: 17520h0m0s 14 | renewBefore: 2190h0m0s 15 | privateKey: 16 | algorithm: ECDSA 17 | size: 256 18 | issuerRef: 19 | name: selfsigned-issuer 20 | kind: Issuer 21 | group: cert-manager.io 22 | --- 23 | apiVersion: cert-manager.io/v1 24 | kind: Issuer 25 | metadata: 26 | name: minio-intermediate-ca 27 | namespace: minio-e2e 28 | spec: 29 | ca: 30 | secretName: minio-root-ca 31 | -------------------------------------------------------------------------------- /e2e/definitions/cert/minio-mtls.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: minio-server-mtls 5 | namespace: minio-e2e 6 | spec: 7 | isCA: false 8 | secretName: minio-server-mtls 9 | dnsNames: 10 | - minio-mtls.minio-e2e.svc.cluster.local 11 | - minio-mtls.minio-e2e 12 | - minio-mtls 13 | issuerRef: 14 | name: minio-intermediate-ca 15 | duration: 8760h 16 | renewBefore: 2190h 17 | usages: 18 | - server auth 19 | - client auth 20 | --- 21 | apiVersion: cert-manager.io/v1 22 | kind: Certificate 23 | metadata: 24 | name: minio-client-mtls 25 | namespace: minio-e2e 26 | spec: 27 | secretName: minio-client-mtls 28 | isCA: false 29 | duration: 2160h 30 | renewBefore: 720m 31 | usages: 32 | - server auth 33 | - client auth 34 | commonName: "minio-mtls" 35 | issuerRef: 36 | name: minio-intermediate-ca 37 | -------------------------------------------------------------------------------- /e2e/definitions/cert/minio-tls.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: minio-server-tls 5 | namespace: minio-e2e 6 | spec: 7 | isCA: false 8 | secretName: minio-server-tls 9 | dnsNames: 10 | - minio-tls.minio-e2e.svc.cluster.local 11 | - minio-tls.minio-e2e 12 | - minio-tls 13 | issuerRef: 14 | name: minio-intermediate-ca 15 | duration: 8760h 16 | renewBefore: 2190h 17 | usages: 18 | - server auth 19 | -------------------------------------------------------------------------------- /e2e/definitions/check/check-mtls-env.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Check 3 | metadata: 4 | name: k8up-check-mtls-env 5 | namespace: k8up-e2e-subject 6 | spec: 7 | failedJobsHistoryLimit: 1 8 | successfulJobsHistoryLimit: 1 9 | backend: 10 | repoPasswordSecretRef: 11 | name: backup-repo 12 | key: password 13 | envFrom: 14 | - configMapRef: 15 | name: k8up-check-mtls-env 16 | s3: 17 | endpoint: https://minio-mtls.minio-e2e.svc.cluster.local 18 | bucket: backup 19 | accessKeyIDSecretRef: 20 | name: backup-credentials 21 | key: username 22 | secretAccessKeySecretRef: 23 | name: backup-credentials 24 | key: password 25 | volumeMounts: 26 | - name: minio-client-mtls 27 | mountPath: /mnt/tls/ 28 | podSecurityContext: 29 | fsGroup: $ID 30 | runAsUser: $ID 31 | volumes: 32 | - name: minio-client-mtls 33 | secret: 34 | secretName: minio-client-mtls 35 | defaultMode: 420 36 | -------------------------------------------------------------------------------- /e2e/definitions/check/check-mtls.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Check 3 | metadata: 4 | name: k8up-check-mtls 5 | namespace: k8up-e2e-subject 6 | spec: 7 | failedJobsHistoryLimit: 1 8 | successfulJobsHistoryLimit: 1 9 | backend: 10 | repoPasswordSecretRef: 11 | name: backup-repo 12 | key: password 13 | tlsOptions: 14 | caCert: /mnt/tls/ca.crt 15 | clientCert: /mnt/tls/tls.crt 16 | clientKey: /mnt/tls/tls.key 17 | s3: 18 | endpoint: https://minio-mtls.minio-e2e.svc.cluster.local 19 | bucket: backup 20 | accessKeyIDSecretRef: 21 | name: backup-credentials 22 | key: username 23 | secretAccessKeySecretRef: 24 | name: backup-credentials 25 | key: password 26 | volumeMounts: 27 | - name: minio-client-mtls 28 | mountPath: /mnt/tls/ 29 | podSecurityContext: 30 | fsGroup: $ID 31 | runAsUser: $ID 32 | volumes: 33 | - name: minio-client-mtls 34 | secret: 35 | secretName: minio-client-mtls 36 | defaultMode: 420 37 | -------------------------------------------------------------------------------- /e2e/definitions/check/check-tls.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Check 3 | metadata: 4 | name: k8up-check-tls 5 | namespace: k8up-e2e-subject 6 | spec: 7 | failedJobsHistoryLimit: 1 8 | successfulJobsHistoryLimit: 1 9 | backend: 10 | repoPasswordSecretRef: 11 | name: backup-repo 12 | key: password 13 | tlsOptions: 14 | caCert: /mnt/ca/ca.crt 15 | s3: 16 | endpoint: https://minio-tls.minio-e2e.svc.cluster.local 17 | bucket: backup 18 | accessKeyIDSecretRef: 19 | name: backup-credentials 20 | key: username 21 | secretAccessKeySecretRef: 22 | name: backup-credentials 23 | key: password 24 | volumeMounts: 25 | - name: minio-ca-tls 26 | mountPath: /mnt/ca/ 27 | podSecurityContext: 28 | fsGroup: $ID 29 | runAsUser: $ID 30 | volumes: 31 | - name: minio-ca-tls 32 | secret: 33 | secretName: minio-ca-tls 34 | defaultMode: 420 35 | -------------------------------------------------------------------------------- /e2e/definitions/check/config-mtls-env.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: k8up-check-mtls-env 5 | namespace: k8up-e2e-subject 6 | data: 7 | CA_CERT_FILE: /mnt/tls/ca.crt 8 | CLIENT_CERT_FILE: /mnt/tls/tls.crt 9 | CLIENT_KEY_FILE: /mnt/tls/tls.key -------------------------------------------------------------------------------- /e2e/definitions/kind/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kind.x-k8s.io/v1alpha4 2 | kind: Cluster 3 | nodes: 4 | - role: control-plane 5 | - role: worker 6 | labels: 7 | worker: true 8 | -------------------------------------------------------------------------------- /e2e/definitions/minio/helm.yaml: -------------------------------------------------------------------------------- 1 | replicas: 1 2 | mode: standalone 3 | resources: 4 | requests: 5 | memory: 250M 6 | persistence: 7 | size: 1Gi 8 | buckets: 9 | - name: backup 10 | policy: none 11 | purge: false 12 | mode: standalone 13 | rootUser: minioadmin 14 | rootPassword: minioadmin 15 | -------------------------------------------------------------------------------- /e2e/definitions/minio/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: minio 5 | namespace: minio 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | resources: 10 | requests: 11 | storage: 1Gi 12 | -------------------------------------------------------------------------------- /e2e/definitions/operator/values.yaml: -------------------------------------------------------------------------------- 1 | podAnnotations: 2 | imagesha: sha256:72859c783009c5d7cba63d94891bb06f0de93a8a6e2e658a48d8d3c93e3f0603 3 | image: 4 | pullPolicy: IfNotPresent 5 | registry: $E2E_REGISTRY 6 | repository: $E2E_REPO 7 | tag: $E2E_TAG 8 | k8up: 9 | envVars: 10 | - name: K8UP_DEBUG 11 | value: "true" 12 | - name: BACKUP_GLOBAL_BACKOFF_LIMIT 13 | value: "2" 14 | -------------------------------------------------------------------------------- /e2e/definitions/prebackup/prebackup-match-labels.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k8up.io/v1 3 | kind: PreBackupPod 4 | metadata: 5 | name: prebackup-label-specific-value-1 6 | namespace: k8up-e2e-subject 7 | labels: 8 | specific-values: specific-value-1 9 | spec: 10 | backupCommand: sh -c 'echo hello there' 11 | pod: 12 | spec: 13 | containers: 14 | - image: busybox 15 | command: 16 | - 'sleep' 17 | - 'infinity' 18 | imagePullPolicy: Always 19 | name: specific-label-value-1 20 | --- 21 | apiVersion: k8up.io/v1 22 | kind: PreBackupPod 23 | metadata: 24 | name: prebackup-label-exists 25 | namespace: k8up-e2e-subject 26 | labels: 27 | exists: arbitrary 28 | spec: 29 | backupCommand: sh -c 'echo whatup' 30 | pod: 31 | spec: 32 | containers: 33 | - image: busybox 34 | command: 35 | - 'sleep' 36 | - 'infinity' 37 | imagePullPolicy: Always 38 | name: arbitrary-label-value 39 | -------------------------------------------------------------------------------- /e2e/definitions/prebackup/prebackup-no-labels.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k8up.io/v1 3 | kind: PreBackupPod 4 | metadata: 5 | name: prebackup 6 | namespace: k8up-e2e-subject 7 | spec: 8 | backupCommand: sh -c 'echo hello there' 9 | pod: 10 | spec: 11 | containers: 12 | - image: busybox 13 | command: 14 | - 'sleep' 15 | - 'infinity' 16 | imagePullPolicy: Always 17 | name: busybox 18 | 19 | -------------------------------------------------------------------------------- /e2e/definitions/proxy/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx 5 | namespace: minio-e2e 6 | labels: 7 | app: nginx 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | replicas: 1 13 | template: 14 | metadata: 15 | labels: 16 | app: nginx 17 | spec: 18 | containers: 19 | - name: nginx 20 | image: nginx:1.24.0 21 | ports: 22 | - containerPort: 80 23 | - containerPort: 443 24 | volumeMounts: 25 | - name: nginx-config 26 | mountPath: /etc/nginx/nginx.conf 27 | subPath: nginx.conf 28 | - name: minio-tls 29 | mountPath: /mnt/tls/ 30 | - name: minio-mtls 31 | mountPath: /mnt/mtls/ 32 | volumes: 33 | - name: nginx-config 34 | configMap: 35 | name: nginx-conf 36 | - name: minio-tls 37 | secret: 38 | secretName: minio-server-tls 39 | defaultMode: 420 40 | - name: minio-mtls 41 | secret: 42 | secretName: minio-server-mtls 43 | defaultMode: 420 44 | -------------------------------------------------------------------------------- /e2e/definitions/proxy/service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: minio-tls 5 | namespace: minio-e2e 6 | spec: 7 | selector: 8 | app: nginx 9 | ports: 10 | - protocol: TCP 11 | port: 80 12 | targetPort: 80 13 | name: nginx-tls-80 14 | - protocol: TCP 15 | port: 443 16 | targetPort: 443 17 | name: nginx-tls-443 18 | --- 19 | kind: Service 20 | apiVersion: v1 21 | metadata: 22 | name: minio-mtls 23 | namespace: minio-e2e 24 | spec: 25 | selector: 26 | app: nginx 27 | ports: 28 | - protocol: TCP 29 | port: 80 30 | targetPort: 80 31 | name: nginx-mtls-80 32 | - protocol: TCP 33 | port: 443 34 | targetPort: 443 35 | name: nginx-mtls-443 36 | -------------------------------------------------------------------------------- /e2e/definitions/pv/pvc.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: subject-pvc 5 | namespace: k8up-e2e-subject 6 | spec: 7 | accessModes: 8 | - ReadWriteMany 9 | resources: 10 | requests: 11 | storage: 1Gi 12 | storageClassName: standard 13 | volumeMode: Filesystem 14 | -------------------------------------------------------------------------------- /e2e/definitions/pv/pvcs-matching-labels.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # used to test a backup with labelSelectors - the below match and should be picked up 3 | # The other, non-labeled PVC is expected to be ignored 4 | kind: PersistentVolumeClaim 5 | apiVersion: v1 6 | metadata: 7 | name: subject-pvc-label-exists 8 | namespace: k8up-e2e-subject 9 | labels: 10 | exists: value 11 | spec: 12 | accessModes: 13 | - ReadWriteMany 14 | resources: 15 | requests: 16 | storage: 100Mi 17 | storageClassName: standard 18 | volumeMode: Filesystem 19 | --- 20 | kind: PersistentVolumeClaim 21 | apiVersion: v1 22 | metadata: 23 | name: subject-pvc-specific-value-1 24 | namespace: k8up-e2e-subject 25 | labels: 26 | specific-values: specific-value-1 27 | spec: 28 | accessModes: 29 | - ReadWriteMany 30 | resources: 31 | requests: 32 | storage: 100Mi 33 | storageClassName: standard 34 | volumeMode: Filesystem 35 | --- 36 | kind: PersistentVolumeClaim 37 | apiVersion: v1 38 | metadata: 39 | name: subject-pvc-specific-value-2 40 | namespace: k8up-e2e-subject 41 | labels: 42 | specific-values: specific-value-2 43 | spec: 44 | accessModes: 45 | - ReadWriteMany 46 | resources: 47 | requests: 48 | storage: 100Mi 49 | storageClassName: standard 50 | volumeMode: Filesystem 51 | 52 | -------------------------------------------------------------------------------- /e2e/definitions/pvc-rwo-subject/controlplane.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: pvc-rwo-subject-pvc-controlplane 5 | namespace: k8up-e2e-subject 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | resources: 10 | requests: 11 | storage: 10Mi 12 | storageClassName: standard 13 | volumeMode: Filesystem 14 | --- 15 | apiVersion: apps/v1 16 | kind: Deployment 17 | metadata: 18 | name: pvc-rwo-subject-controlplane 19 | namespace: k8up-e2e-subject 20 | spec: 21 | replicas: 1 22 | selector: 23 | matchLabels: 24 | app: subject-controlplane 25 | template: 26 | metadata: 27 | labels: 28 | app: subject-controlplane 29 | spec: 30 | containers: 31 | - name: pvc-rwo-subject-container 32 | image: quay.io/prometheus/busybox:latest 33 | imagePullPolicy: IfNotPresent 34 | args: 35 | - sh 36 | - -c 37 | - | 38 | printf "$BACKUP_FILE_CONTENT" | tee "/data/$BACKUP_FILE_NAME" && \ 39 | echo && \ 40 | ls -la /data && \ 41 | echo "test file /data/$BACKUP_FILE_NAME written, sleeping now" && \ 42 | sleep infinity 43 | securityContext: 44 | runAsUser: $ID 45 | volumeMounts: 46 | - name: volume 47 | mountPath: /data 48 | env: 49 | - name: BACKUP_FILE_CONTENT 50 | value: "" 51 | - name: BACKUP_FILE_NAME 52 | value: "" 53 | volumes: 54 | - name: volume 55 | persistentVolumeClaim: 56 | claimName: pvc-rwo-subject-pvc-controlplane 57 | tolerations: 58 | - effect: NoSchedule 59 | key: node-role.kubernetes.io/master 60 | - effect: NoSchedule 61 | key: node-role.kubernetes.io/control-plane 62 | nodeSelector: 63 | node-role.kubernetes.io/control-plane: "" 64 | -------------------------------------------------------------------------------- /e2e/definitions/pvc-rwo-subject/worker.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: pvc-rwo-subject-pvc-worker 5 | namespace: k8up-e2e-subject 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | resources: 10 | requests: 11 | storage: 10Mi 12 | storageClassName: standard 13 | volumeMode: Filesystem 14 | --- 15 | apiVersion: apps/v1 16 | kind: Deployment 17 | metadata: 18 | name: pvc-rwo-subject-worker 19 | namespace: k8up-e2e-subject 20 | spec: 21 | replicas: 1 22 | selector: 23 | matchLabels: 24 | app: subject-worker 25 | template: 26 | metadata: 27 | labels: 28 | app: subject-worker 29 | spec: 30 | containers: 31 | - name: pvc-rwo-subject-container 32 | image: quay.io/prometheus/busybox:latest 33 | imagePullPolicy: IfNotPresent 34 | args: 35 | - sh 36 | - -c 37 | - | 38 | printf "$BACKUP_FILE_CONTENT" | tee "/data/$BACKUP_FILE_NAME" && \ 39 | echo && \ 40 | ls -la /data && \ 41 | echo "test file /data/$BACKUP_FILE_NAME written, sleeping now" && \ 42 | sleep infinity 43 | securityContext: 44 | runAsUser: $ID 45 | volumeMounts: 46 | - name: volume 47 | mountPath: /data 48 | env: 49 | - name: BACKUP_FILE_CONTENT 50 | value: "" 51 | - name: BACKUP_FILE_NAME 52 | value: "" 53 | volumes: 54 | - name: volume 55 | persistentVolumeClaim: 56 | claimName: pvc-rwo-subject-pvc-worker 57 | nodeSelector: 58 | worker: "true" 59 | -------------------------------------------------------------------------------- /e2e/definitions/restore/config-mtls-env.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: k8up-s3-mtls-restore-mtls-env 5 | namespace: k8up-e2e-subject 6 | data: 7 | CA_CERT_FILE: /mnt/tls/ca.crt 8 | CLIENT_CERT_FILE: /mnt/tls/tls.crt 9 | CLIENT_KEY_FILE: /mnt/tls/tls.key 10 | RESTORE_CA_CERT_FILE: /mnt/tls/ca.crt 11 | RESTORE_CLIENT_CERT_FILE: /mnt/tls/tls.crt 12 | RESTORE_CLIENT_KEY_FILE: /mnt/tls/tls.key -------------------------------------------------------------------------------- /e2e/definitions/restore/restore-backupcommand.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Restore 3 | metadata: 4 | name: k8up-restore-backupcommand 5 | namespace: k8up-e2e-subject 6 | spec: 7 | snapshot: $SNAPSHOT_ID 8 | failedJobsHistoryLimit: 1 9 | successfulJobsHistoryLimit: 1 10 | restoreMethod: 11 | folder: 12 | claimName: subject-pvc 13 | backend: 14 | repoPasswordSecretRef: 15 | name: backup-repo 16 | key: password 17 | s3: 18 | endpoint: http://minio.minio-e2e.svc.cluster.local:9000 19 | bucket: backup 20 | accessKeyIDSecretRef: 21 | name: backup-credentials 22 | key: username 23 | secretAccessKeySecretRef: 24 | name: backup-credentials 25 | key: password 26 | podSecurityContext: 27 | runAsUser: $ID 28 | -------------------------------------------------------------------------------- /e2e/definitions/restore/restore-mtls.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Restore 3 | metadata: 4 | name: k8up-restore-mtls 5 | namespace: k8up-e2e-subject 6 | spec: 7 | failedJobsHistoryLimit: 1 8 | successfulJobsHistoryLimit: 1 9 | restoreMethod: 10 | folder: 11 | claimName: subject-pvc 12 | backend: 13 | repoPasswordSecretRef: 14 | name: backup-repo 15 | key: password 16 | tlsOptions: 17 | caCert: /mnt/tls/ca.crt 18 | clientCert: /mnt/tls/tls.crt 19 | clientKey: /mnt/tls/tls.key 20 | s3: 21 | endpoint: https://minio-mtls.minio-e2e.svc.cluster.local 22 | bucket: backup 23 | accessKeyIDSecretRef: 24 | name: backup-credentials 25 | key: username 26 | secretAccessKeySecretRef: 27 | name: backup-credentials 28 | key: password 29 | volumeMounts: 30 | - name: minio-client-mtls 31 | mountPath: /mnt/tls/ 32 | podSecurityContext: 33 | fsGroup: $ID 34 | runAsUser: $ID 35 | volumes: 36 | - name: minio-client-mtls 37 | secret: 38 | secretName: minio-client-mtls 39 | defaultMode: 420 40 | -------------------------------------------------------------------------------- /e2e/definitions/restore/restore-tls.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Restore 3 | metadata: 4 | name: k8up-restore-tls 5 | namespace: k8up-e2e-subject 6 | spec: 7 | failedJobsHistoryLimit: 1 8 | successfulJobsHistoryLimit: 1 9 | restoreMethod: 10 | folder: 11 | claimName: subject-pvc 12 | backend: 13 | repoPasswordSecretRef: 14 | name: backup-repo 15 | key: password 16 | tlsOptions: 17 | caCert: /mnt/ca/ca.crt 18 | s3: 19 | endpoint: https://minio-tls.minio-e2e.svc.cluster.local 20 | bucket: backup 21 | accessKeyIDSecretRef: 22 | name: backup-credentials 23 | key: username 24 | secretAccessKeySecretRef: 25 | name: backup-credentials 26 | key: password 27 | volumeMounts: 28 | - name: minio-ca-tls 29 | mountPath: /mnt/ca/ 30 | podSecurityContext: 31 | fsGroup: $ID 32 | runAsUser: $ID 33 | volumes: 34 | - name: minio-ca-tls 35 | secret: 36 | secretName: minio-ca-tls 37 | defaultMode: 420 38 | -------------------------------------------------------------------------------- /e2e/definitions/restore/restore.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Restore 3 | metadata: 4 | name: k8up-restore 5 | namespace: k8up-e2e-subject 6 | spec: 7 | failedJobsHistoryLimit: 1 8 | successfulJobsHistoryLimit: 1 9 | restoreMethod: 10 | folder: 11 | claimName: subject-pvc 12 | backend: 13 | repoPasswordSecretRef: 14 | name: backup-repo 15 | key: password 16 | s3: 17 | endpoint: http://minio.minio-e2e.svc.cluster.local:9000 18 | bucket: backup 19 | accessKeyIDSecretRef: 20 | name: backup-credentials 21 | key: username 22 | secretAccessKeySecretRef: 23 | name: backup-credentials 24 | key: password 25 | podSecurityContext: 26 | runAsUser: $ID 27 | -------------------------------------------------------------------------------- /e2e/definitions/restore/s3-mtls-restore-mtls-env.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Restore 3 | metadata: 4 | name: k8up-s3-mtls-restore-mtls-env 5 | namespace: k8up-e2e-subject 6 | spec: 7 | failedJobsHistoryLimit: 1 8 | successfulJobsHistoryLimit: 1 9 | restoreMethod: 10 | s3: 11 | endpoint: https://minio-mtls.minio-e2e.svc.cluster.local 12 | bucket: restore 13 | accessKeyIDSecretRef: 14 | name: backup-credentials 15 | key: username 16 | secretAccessKeySecretRef: 17 | name: backup-credentials 18 | key: password 19 | backend: 20 | repoPasswordSecretRef: 21 | name: backup-repo 22 | key: password 23 | envFrom: 24 | - configMapRef: 25 | name: k8up-s3-mtls-restore-mtls-env 26 | s3: 27 | endpoint: https://minio-mtls.minio-e2e.svc.cluster.local 28 | bucket: backup 29 | accessKeyIDSecretRef: 30 | name: backup-credentials 31 | key: username 32 | secretAccessKeySecretRef: 33 | name: backup-credentials 34 | key: password 35 | volumeMounts: 36 | - name: minio-client-mtls 37 | mountPath: /mnt/tls/ 38 | podSecurityContext: 39 | fsGroup: $ID 40 | runAsUser: $ID 41 | volumes: 42 | - name: minio-client-mtls 43 | secret: 44 | secretName: minio-client-mtls 45 | defaultMode: 420 46 | -------------------------------------------------------------------------------- /e2e/definitions/restore/s3-mtls-restore-mtls.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Restore 3 | metadata: 4 | name: k8up-s3-mtls-restore-mtls 5 | namespace: k8up-e2e-subject 6 | spec: 7 | failedJobsHistoryLimit: 1 8 | successfulJobsHistoryLimit: 1 9 | restoreMethod: 10 | tlsOptions: 11 | caCert: /mnt/tls/ca.crt 12 | clientCert: /mnt/tls/tls.crt 13 | clientKey: /mnt/tls/tls.key 14 | s3: 15 | endpoint: https://minio-mtls.minio-e2e.svc.cluster.local 16 | bucket: restore 17 | accessKeyIDSecretRef: 18 | name: backup-credentials 19 | key: username 20 | secretAccessKeySecretRef: 21 | name: backup-credentials 22 | key: password 23 | backend: 24 | repoPasswordSecretRef: 25 | name: backup-repo 26 | key: password 27 | tlsOptions: 28 | caCert: /mnt/tls/ca.crt 29 | clientCert: /mnt/tls/tls.crt 30 | clientKey: /mnt/tls/tls.key 31 | s3: 32 | endpoint: https://minio-mtls.minio-e2e.svc.cluster.local 33 | bucket: backup 34 | accessKeyIDSecretRef: 35 | name: backup-credentials 36 | key: username 37 | secretAccessKeySecretRef: 38 | name: backup-credentials 39 | key: password 40 | volumeMounts: 41 | - name: minio-client-mtls 42 | mountPath: /mnt/tls/ 43 | podSecurityContext: 44 | fsGroup: $ID 45 | runAsUser: $ID 46 | volumes: 47 | - name: minio-client-mtls 48 | secret: 49 | secretName: minio-client-mtls 50 | defaultMode: 420 51 | -------------------------------------------------------------------------------- /e2e/definitions/restore/s3-mtls-restore-tls.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Restore 3 | metadata: 4 | name: k8up-s3-mtls-restore-tls 5 | namespace: k8up-e2e-subject 6 | spec: 7 | failedJobsHistoryLimit: 1 8 | successfulJobsHistoryLimit: 1 9 | restoreMethod: 10 | tlsOptions: 11 | caCert: /mnt/tls/ca.crt 12 | clientCert: /mnt/tls/tls.crt 13 | clientKey: /mnt/tls/tls.key 14 | s3: 15 | endpoint: https://minio-mtls.minio-e2e.svc.cluster.local 16 | bucket: restore 17 | accessKeyIDSecretRef: 18 | name: backup-credentials 19 | key: username 20 | secretAccessKeySecretRef: 21 | name: backup-credentials 22 | key: password 23 | volumeMounts: 24 | - name: minio-client-mtls 25 | mountPath: /mnt/tls/ 26 | backend: 27 | repoPasswordSecretRef: 28 | name: backup-repo 29 | key: password 30 | tlsOptions: 31 | caCert: /mnt/ca/ca.crt 32 | s3: 33 | endpoint: https://minio-tls.minio-e2e.svc.cluster.local 34 | bucket: backup 35 | accessKeyIDSecretRef: 36 | name: backup-credentials 37 | key: username 38 | secretAccessKeySecretRef: 39 | name: backup-credentials 40 | key: password 41 | volumeMounts: 42 | - name: minio-ca-tls 43 | mountPath: /mnt/ca/ 44 | podSecurityContext: 45 | fsGroup: $ID 46 | runAsUser: $ID 47 | volumes: 48 | - name: minio-ca-tls 49 | secret: 50 | secretName: minio-ca-tls 51 | defaultMode: 420 52 | - name: minio-client-mtls 53 | secret: 54 | secretName: minio-client-mtls 55 | defaultMode: 420 56 | -------------------------------------------------------------------------------- /e2e/definitions/restore/s3-tls-restore-mtls.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Restore 3 | metadata: 4 | name: k8up-s3-tls-restore-mtls 5 | namespace: k8up-e2e-subject 6 | spec: 7 | failedJobsHistoryLimit: 1 8 | successfulJobsHistoryLimit: 1 9 | restoreMethod: 10 | tlsOptions: 11 | caCert: /mnt/ca/ca.crt 12 | s3: 13 | endpoint: https://minio-tls.minio-e2e.svc.cluster.local 14 | bucket: restore 15 | accessKeyIDSecretRef: 16 | name: backup-credentials 17 | key: username 18 | secretAccessKeySecretRef: 19 | name: backup-credentials 20 | key: password 21 | volumeMounts: 22 | - name: minio-ca-tls 23 | mountPath: /mnt/ca/ 24 | backend: 25 | repoPasswordSecretRef: 26 | name: backup-repo 27 | key: password 28 | tlsOptions: 29 | caCert: /mnt/tls/ca.crt 30 | clientCert: /mnt/tls/tls.crt 31 | clientKey: /mnt/tls/tls.key 32 | s3: 33 | endpoint: https://minio-mtls.minio-e2e.svc.cluster.local 34 | bucket: backup 35 | accessKeyIDSecretRef: 36 | name: backup-credentials 37 | key: username 38 | secretAccessKeySecretRef: 39 | name: backup-credentials 40 | key: password 41 | volumeMounts: 42 | - name: minio-client-mtls 43 | mountPath: /mnt/tls/ 44 | podSecurityContext: 45 | fsGroup: $ID 46 | runAsUser: $ID 47 | volumes: 48 | - name: minio-ca-tls 49 | secret: 50 | secretName: minio-ca-tls 51 | defaultMode: 420 52 | - name: minio-client-mtls 53 | secret: 54 | secretName: minio-client-mtls 55 | defaultMode: 420 56 | -------------------------------------------------------------------------------- /e2e/definitions/restore/s3-tls-restore-tls.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: Restore 3 | metadata: 4 | name: k8up-s3-tls-restore-tls 5 | namespace: k8up-e2e-subject 6 | spec: 7 | failedJobsHistoryLimit: 1 8 | successfulJobsHistoryLimit: 1 9 | restoreMethod: 10 | tlsOptions: 11 | caCert: /mnt/ca/ca.crt 12 | s3: 13 | endpoint: https://minio-tls.minio-e2e.svc.cluster.local 14 | bucket: restore 15 | accessKeyIDSecretRef: 16 | name: backup-credentials 17 | key: username 18 | secretAccessKeySecretRef: 19 | name: backup-credentials 20 | key: password 21 | backend: 22 | repoPasswordSecretRef: 23 | name: backup-repo 24 | key: password 25 | tlsOptions: 26 | caCert: /mnt/ca/ca.crt 27 | s3: 28 | endpoint: https://minio-tls.minio-e2e.svc.cluster.local 29 | bucket: backup 30 | accessKeyIDSecretRef: 31 | name: backup-credentials 32 | key: username 33 | secretAccessKeySecretRef: 34 | name: backup-credentials 35 | key: password 36 | volumeMounts: 37 | - name: minio-ca-tls 38 | mountPath: /mnt/ca/ 39 | podSecurityContext: 40 | fsGroup: $ID 41 | runAsUser: $ID 42 | volumes: 43 | - name: minio-ca-tls 44 | secret: 45 | secretName: minio-ca-tls 46 | defaultMode: 420 47 | -------------------------------------------------------------------------------- /e2e/definitions/secrets/secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: backup-credentials 5 | namespace: k8up-e2e-subject 6 | type: Opaque 7 | stringData: 8 | username: minioadmin 9 | password: minioadmin 10 | --- 11 | apiVersion: v1 12 | kind: Secret 13 | metadata: 14 | name: backup-repo 15 | namespace: k8up-e2e-subject 16 | type: Opaque 17 | stringData: 18 | password: myreposecret 19 | -------------------------------------------------------------------------------- /e2e/definitions/subject/deployment-pvc-with-labels.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Those are used to validate 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | name: subject-deployment 7 | namespace: k8up-e2e-subject 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: subject 13 | template: 14 | metadata: 15 | labels: 16 | app: subject 17 | spec: 18 | containers: 19 | - name: subject-container 20 | image: quay.io/prometheus/busybox:latest 21 | imagePullPolicy: IfNotPresent 22 | args: 23 | - sh 24 | - -c 25 | - | 26 | printf "$BACKUP_FILE_CONTENT" | tee "/data/$BACKUP_FILE_NAME" && \ 27 | echo && \ 28 | ls -la /data && \ 29 | echo "test file /data/$BACKUP_FILE_NAME written, sleeping now" && \ 30 | sleep infinity 31 | securityContext: 32 | runAsUser: $ID 33 | volumeMounts: 34 | - name: volume 35 | mountPath: /data 36 | env: 37 | - name: BACKUP_FILE_CONTENT 38 | value: "" 39 | - name: BACKUP_FILE_NAME 40 | value: "" 41 | volumes: 42 | - name: volume 43 | persistentVolumeClaim: 44 | claimName: subject-pvc 45 | -------------------------------------------------------------------------------- /e2e/definitions/subject/deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: subject-deployment 6 | namespace: k8up-e2e-subject 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: subject 12 | template: 13 | metadata: 14 | labels: 15 | app: subject 16 | spec: 17 | containers: 18 | - name: subject-container 19 | image: quay.io/prometheus/busybox:latest 20 | imagePullPolicy: IfNotPresent 21 | args: 22 | - sh 23 | - -c 24 | - | 25 | printf "$BACKUP_FILE_CONTENT" | tee "/data/$BACKUP_FILE_NAME" && \ 26 | echo && \ 27 | ls -la /data && \ 28 | echo "test file /data/$BACKUP_FILE_NAME written, sleeping now" && \ 29 | sleep infinity 30 | securityContext: 31 | runAsUser: $ID 32 | volumeMounts: 33 | - name: volume 34 | mountPath: /data 35 | env: 36 | - name: BACKUP_FILE_CONTENT 37 | value: "" 38 | - name: BACKUP_FILE_NAME 39 | value: "" 40 | volumes: 41 | - name: volume 42 | persistentVolumeClaim: 43 | claimName: subject-pvc 44 | -------------------------------------------------------------------------------- /e2e/kind.mk: -------------------------------------------------------------------------------- 1 | curl_args ?= --location --fail --silent --show-error 2 | 3 | KIND ?= $(go_bin)/kind 4 | 5 | .PHONY: kind-setup 6 | kind-setup: export KUBECONFIG = $(KIND_KUBECONFIG) 7 | kind-setup: $(KIND_KUBECONFIG) | $(e2etest_dir) ## Creates the kind cluster 8 | 9 | .PHONY: kind-clean 10 | kind-clean: export KUBECONFIG = $(KIND_KUBECONFIG) 11 | kind-clean: $(KIND) ## Remove the kind Cluster 12 | @$(KIND) delete cluster --name $(KIND_CLUSTER) || true 13 | @rm -rf $(KIND) $(kind_marker) $(KIND_KUBECONFIG) 14 | 15 | ### 16 | ### Artifacts 17 | ### 18 | 19 | $(KIND_KUBECONFIG): export KUBECONFIG = $(KIND_KUBECONFIG) 20 | $(KIND_KUBECONFIG): $(KIND) 21 | @mkdir -p e2e/debug/data/pvc-subject 22 | $(KIND) create cluster \ 23 | --name $(KIND_CLUSTER) \ 24 | --image kindest/node:$(KIND_NODE_VERSION) \ 25 | --config e2e/definitions/kind/config.yaml 26 | @kubectl version 27 | @kubectl cluster-info 28 | @kubectl config use-context kind-$(KIND_CLUSTER) 29 | # Applies local-path-config.yaml to kind cluster and forces restart of provisioner - can be simplified once https://github.com/kubernetes-sigs/kind/pull/3090 is merged. 30 | # This is necessary due to the multi node cluster. Classic k8s hostPath provisioner doesn't permit multi node and sharedFileSystemPath support is only in local-path-provisioner v0.0.23. 31 | @kubectl apply -n local-path-storage -f https://raw.githubusercontent.com/rancher/local-path-provisioner/v0.0.23/deploy/local-path-storage.yaml 32 | @kubectl get cm -n local-path-storage local-path-config -o yaml|yq $(yq --help | grep -q eval && echo e) '.data."config.json"="{\"nodePathMap\":[],\"sharedFileSystemPath\": \"/tmp/e2e/local-path-provisioner\"}"'|kubectl apply -f - 33 | @kubectl delete po -n local-path-storage --all 34 | 35 | $(KIND): export GOBIN = $(go_bin) 36 | $(KIND): | $(go_bin) 37 | $(GO_EXEC) install sigs.k8s.io/kind@latest 38 | -------------------------------------------------------------------------------- /e2e/package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "operator", 3 | "lockfileVersion": 2, 4 | "requires": true, 5 | "packages": { 6 | "": { 7 | "name": "operator", 8 | "devDependencies": { 9 | "bats": "1.11.0" 10 | } 11 | }, 12 | "node_modules/bats": { 13 | "version": "1.11.0", 14 | "resolved": "https://registry.npmjs.org/bats/-/bats-1.11.0.tgz", 15 | "integrity": "sha512-qiKdnS4ID3bJ1MaEOKuZe12R4w+t+psJF0ICj+UdkiHBBoObPMHv8xmD3w6F4a5qwUyZUHS+413lxENBNy8xcQ==", 16 | "dev": true, 17 | "bin": { 18 | "bats": "bin/bats" 19 | } 20 | } 21 | }, 22 | "dependencies": { 23 | "bats": { 24 | "version": "1.11.0", 25 | "resolved": "https://registry.npmjs.org/bats/-/bats-1.11.0.tgz", 26 | "integrity": "sha512-qiKdnS4ID3bJ1MaEOKuZe12R4w+t+psJF0ICj+UdkiHBBoObPMHv8xmD3w6F4a5qwUyZUHS+413lxENBNy8xcQ==", 27 | "dev": true 28 | } 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /e2e/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "operator", 3 | "devDependencies": { 4 | "bats": "1.11.0" 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /e2e/test-01-lint.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | load "lib/utils" 3 | load "lib/linter" 4 | 5 | @test "Lint assertions in e2e tests" { 6 | 7 | for file in test*.bats; do 8 | echo "Linting '${file}'..." 9 | run lint "${file}" 10 | 11 | if [ $status -ne 0 ]; then 12 | echo "$output"; 13 | return $status; 14 | fi 15 | done 16 | } 17 | -------------------------------------------------------------------------------- /e2e/test-02-deployment.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | load "lib/utils" 4 | load "lib/detik" 5 | load "lib/k8up" 6 | 7 | DETIK_CLIENT_NAME="kubectl" 8 | DETIK_CLIENT_NAMESPACE="k8up-system" 9 | DEBUG_DETIK="true" 10 | 11 | @test "Given Operator config, When applying manifests, Then expect running pod" { 12 | # Remove traces of operator deployments from other tests 13 | kubectl delete namespace "$DETIK_CLIENT_NAMESPACE" --ignore-not-found 14 | kubectl create namespace "$DETIK_CLIENT_NAMESPACE" || true 15 | 16 | given_a_running_operator 17 | 18 | try "at most 10 times every 2s to find 1 pod named 'k8up' with '.spec.containers[*].image' being '${E2E_IMAGE}'" 19 | try "at most 20 times every 2s to find 1 pod named 'k8up' with 'status' being 'running'" 20 | } 21 | -------------------------------------------------------------------------------- /e2e/test-04-restore.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | load "lib/utils" 4 | load "lib/detik" 5 | load "lib/k8up" 6 | 7 | # shellcheck disable=SC2034 8 | DETIK_CLIENT_NAME="kubectl" 9 | # shellcheck disable=SC2034 10 | DETIK_CLIENT_NAMESPACE="k8up-e2e-subject" 11 | # shellcheck disable=SC2034 12 | DEBUG_DETIK="true" 13 | 14 | @test "Given an existing Restic repository, When creating a Restore, Then Restore to PVC" { 15 | # Backup 16 | expected_content="Old content: $(timestamp)" 17 | expected_filename="old_file.txt" 18 | given_a_running_operator 19 | given_a_clean_ns 20 | given_s3_storage 21 | given_an_existing_backup "${expected_filename}" "${expected_content}" 22 | 23 | # Delete and create new subject 24 | new_content="New content: $(timestamp)" 25 | new_filename="new_file.txt" 26 | given_a_clean_ns 27 | given_a_subject "${new_filename}" "${new_content}" 28 | 29 | # Restore 30 | kubectl apply -f definitions/secrets 31 | yq e '.spec.podSecurityContext.runAsUser='$(id -u)'' definitions/restore/restore.yaml | kubectl apply -f - 32 | 33 | try "at most 10 times every 1s to get Restore named 'k8up-restore' and verify that '.status.started' is 'true'" 34 | try "at most 10 times every 1s to get Job named 'k8up-restore' and verify that '.status.active' is '1'" 35 | 36 | wait_until restore/k8up-restore completed 37 | verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Restore named 'k8up-restore'" 38 | 39 | expect_file_in_container 'deploy/subject-deployment' 'subject-container' "/data/${expected_filename}" "${expected_content}" 40 | expect_file_in_container 'deploy/subject-deployment' 'subject-container' "/data/${new_filename}" "${new_content}" 41 | } 42 | -------------------------------------------------------------------------------- /e2e/test-09-pod-backupcommand.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | load "lib/utils" 4 | load "lib/detik" 5 | load "lib/k8up" 6 | 7 | # shellcheck disable=SC2034 8 | DETIK_CLIENT_NAME="kubectl" 9 | # shellcheck disable=SC2034 10 | DETIK_CLIENT_NAMESPACE="k8up-e2e-subject" 11 | # shellcheck disable=SC2034 12 | DEBUG_DETIK="true" 13 | 14 | @test "Creating a Backup of an annotated pod" { 15 | expected_content="expected content: $(timestamp)" 16 | expected_filename="expected_filename.txt" 17 | 18 | given_a_running_operator 19 | given_a_clean_ns 20 | given_s3_storage 21 | given_an_annotated_subject_pod "${expected_filename}" "${expected_content}" 22 | 23 | kubectl apply -f definitions/secrets 24 | yq e '.spec.podSecurityContext.runAsUser='$(id -u)'' definitions/backup/backup.yaml | kubectl apply -f - 25 | 26 | try "at most 10 times every 5s to get backup named 'k8up-backup' and verify that '.status.started' is 'true'" 27 | verify_object_value_by_label job 'k8up.io/owned-by=backup_k8up-backup' '.status.active' 1 true 28 | 29 | wait_until backup/k8up-backup completed 30 | 31 | run restic snapshots 32 | 33 | echo "---BEGIN restic snapshots output---" 34 | echo "${output}" | jq . 35 | echo "---END---" 36 | 37 | echo -n "Number of Snapshots >= 1? " 38 | jq -e 'length >= 1' <<< "${output}" # Ensure that there was actually a backup created 39 | 40 | run get_latest_snap_by_path /k8up-e2e-subject-subject-container.txt 41 | 42 | run restic dump --path /k8up-e2e-subject-subject-container.txt "${output}" k8up-e2e-subject-subject-container.txt 43 | 44 | echo "---BEGIN actual /k8up-e2e-subject-subject-container.txt---" 45 | echo "${output}" 46 | echo "---END---" 47 | 48 | echo "${output} = ${expected_content}" 49 | [ "${output}" = "${expected_content}" ] 50 | } 51 | -------------------------------------------------------------------------------- /e2e/test-10-restore-self-signed-tls.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | load "lib/utils" 4 | load "lib/detik" 5 | load "lib/k8up" 6 | 7 | # shellcheck disable=SC2034 8 | DETIK_CLIENT_NAME="kubectl" 9 | # shellcheck disable=SC2034 10 | DETIK_CLIENT_NAMESPACE="k8up-e2e-subject" 11 | # shellcheck disable=SC2034 12 | DEBUG_DETIK="true" 13 | 14 | @test "Given an existing Restic repository, When creating a Restore (mTLS), Then Restore to S3 (mTLS) - using self-signed issuer" { 15 | # Backup 16 | expected_content="Old content for mtls: $(timestamp)" 17 | expected_filename="old_file.txt" 18 | given_a_running_operator 19 | given_a_clean_ns 20 | given_s3_storage 21 | give_self_signed_issuer 22 | given_an_existing_mtls_backup "${expected_filename}" "${expected_content}" 23 | 24 | # Restore 25 | kubectl apply -f definitions/secrets 26 | yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/restore/s3-mtls-restore-mtls.yaml | kubectl apply -f - 27 | 28 | try "at most 10 times every 1s to get Restore named 'k8up-s3-mtls-restore-mtls' and verify that '.status.started' is 'true'" 29 | try "at most 10 times every 1s to get Job named 'k8up-s3-mtls-restore-mtls' and verify that '.status.active' is '1'" 30 | 31 | wait_until restore/k8up-s3-mtls-restore-mtls completed 32 | verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Restore named 'k8up-s3-mtls-restore-mtls'" 33 | 34 | expect_dl_file_in_container 'deploy/subject-dl-deployment' 'subject-container' "/data/${expected_filename}" "${expected_content}" 35 | } 36 | -------------------------------------------------------------------------------- /e2e/test-11-archive-self-signed-tls.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | load "lib/utils" 4 | load "lib/detik" 5 | load "lib/k8up" 6 | 7 | # shellcheck disable=SC2034 8 | DETIK_CLIENT_NAME="kubectl" 9 | # shellcheck disable=SC2034 10 | DETIK_CLIENT_NAMESPACE="k8up-e2e-subject" 11 | # shellcheck disable=SC2034 12 | DEBUG_DETIK="true" 13 | 14 | @test "Given an existing Restic repository, When creating a Archive (mTLS), Then Restore to S3 (mTLS) - using self-signed issuer" { 15 | # Backup 16 | expected_content="Old content for mtls: $(timestamp)" 17 | expected_filename="old_file.txt" 18 | given_a_running_operator 19 | given_a_clean_ns 20 | given_s3_storage 21 | give_self_signed_issuer 22 | given_an_existing_mtls_backup "${expected_filename}" "${expected_content}" 23 | given_a_clean_archive archive 24 | 25 | # Archive 26 | kubectl apply -f definitions/secrets 27 | yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/archive/s3-mtls-archive-mtls.yaml | kubectl apply -f - 28 | 29 | try "at most 10 times every 1s to get Archive named 'k8up-s3-mtls-archive-mtls' and verify that '.status.started' is 'true'" 30 | try "at most 10 times every 1s to get Job named 'k8up-s3-mtls-archive-mtls' and verify that '.status.active' is '1'" 31 | 32 | wait_until archive/k8up-s3-mtls-archive-mtls completed 33 | verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Archive named 'k8up-s3-mtls-archive-mtls'" 34 | 35 | run restic list snapshots 36 | 37 | echo "---BEGIN total restic snapshots output---" 38 | total_snapshots=$(echo -e "${output}" | wc -l) 39 | echo "${total_snapshots}" 40 | echo "---END---" 41 | 42 | run mc ls minio/archive 43 | 44 | echo "---BEGIN total archives output---" 45 | total_archives=$(echo -n -e "${output}" | wc -l) 46 | echo "${total_archives}" 47 | echo "---END---" 48 | 49 | [ "$total_snapshots" -eq "$total_archives" ] 50 | } 51 | -------------------------------------------------------------------------------- /e2e/test-12-annotated-failure.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | load "lib/utils" 4 | load "lib/detik" 5 | load "lib/k8up" 6 | 7 | # shellcheck disable=SC2034 8 | DETIK_CLIENT_NAME="kubectl" 9 | # shellcheck disable=SC2034 10 | DETIK_CLIENT_NAMESPACE="k8up-e2e-subject" 11 | # shellcheck disable=SC2034 12 | DEBUG_DETIK="true" 13 | 14 | @test "Given annotated app, When creating a backup, Then expect Error" { 15 | expected_content="expected content: $(timestamp)" 16 | expected_filename="expected_filename.txt" 17 | 18 | given_a_running_operator 19 | given_a_clean_ns 20 | given_s3_storage 21 | given_a_broken_annotated_subject "${expected_filename}" "${expected_content}" 22 | 23 | kubectl apply -f definitions/secrets 24 | yq e '.spec.podSecurityContext.runAsUser='$(id -u)'' definitions/backup/backup.yaml | kubectl apply -f - 25 | 26 | try "at most 10 times every 5s to get backup named 'k8up-backup' and verify that '.status.started' is 'true'" 27 | verify_object_value_by_label job 'k8up.io/owned-by=backup_k8up-backup' '.status.active' 1 true 28 | 29 | wait_for_until_jsonpath backup/k8up-backup 2m 'jsonpath={.status.conditions[?(@.type=="Completed")].reason}=Failed' 30 | 31 | } 32 | -------------------------------------------------------------------------------- /e2e/test-13-cleanup-empty-jobs.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | load "lib/utils" 4 | load "lib/detik" 5 | load "lib/k8up" 6 | 7 | # shellcheck disable=SC2034 8 | DETIK_CLIENT_NAME="kubectl" 9 | # shellcheck disable=SC2034 10 | DETIK_CLIENT_NAMESPACE="k8up-e2e-subject" 11 | # shellcheck disable=SC2034 12 | DEBUG_DETIK="true" 13 | 14 | @test "Given empty namespace, When creating multiple backups, Then expect cleanup" { 15 | given_a_running_operator 16 | given_a_clean_ns 17 | given_s3_storage 18 | 19 | kubectl apply -f definitions/secrets 20 | yq e '.spec.podSecurityContext.runAsUser='$(id -u)' | .metadata.name="first-backup"' definitions/backup/backup.yaml | kubectl apply -f - 21 | 22 | yq e '.spec.podSecurityContext.runAsUser='$(id -u)' | .metadata.name="second-backup"' definitions/backup/backup.yaml | kubectl apply -f - 23 | 24 | kubectl -n "$DETIK_CLIENT_NAMESPACE" annotate backup/second-backup reconcile=now 25 | 26 | wait_for_until_jsonpath backup/second-backup 5m 'jsonpath={.status.conditions[?(@.type=="Scrubbed")].message}="Deleted 1 resources"' 27 | 28 | } 29 | -------------------------------------------------------------------------------- /envtest/rootpath.go: -------------------------------------------------------------------------------- 1 | package envtest 2 | 3 | import ( 4 | "path/filepath" 5 | "runtime" 6 | ) 7 | 8 | var ( 9 | _, b, _, _ = runtime.Caller(0) 10 | 11 | // Root folder of this project 12 | Root = filepath.Join(filepath.Dir(b), "..") 13 | ) 14 | -------------------------------------------------------------------------------- /exec.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # runs a command and echo's it's pid 4 | 5 | PID_FILE=${PID_FILE-${1}} 6 | shift 1 7 | 8 | ./clean.sh "${PID_FILE}" 9 | if [ -f "${PID_FILE}" ]; then 10 | echo "The process is already running as PID '$(cat "${PID_FILE}")'." 11 | exit 0 12 | fi 13 | 14 | echo ">>>>>>>>> $(date) <<<<<<<<" | \ 15 | tee -a "${PID_FILE}.stdout" >> "${PID_FILE}.stderr" 16 | 17 | env | grep MINIO 18 | 19 | "${@}" 1>>"${PID_FILE}.stdout" 2>>"${PID_FILE}.stderr" & 20 | PID=$! 21 | 22 | echo $PID > "${PID_FILE}" 23 | 24 | echo "Running '${*}' with PID $PID" 25 | echo "Writing STDOUT to '${PID_FILE}.stdout'" 26 | echo "Writing STDERR to '${PID_FILE}.stderr'" 27 | 28 | -------------------------------------------------------------------------------- /fetch_restic.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -exuo pipefail 4 | 5 | cpu_arch() { 6 | case $(uname -m) in 7 | i386) echo "386" ;; 8 | i686) echo "386" ;; 9 | x86_64) echo "amd64" ;; 10 | arm) echo "arm" ;; 11 | armv7l) echo "arm" ;; 12 | aarch64) echo "arm64" ;; 13 | *) exit 1 ;; 14 | esac 15 | } 16 | 17 | os() { 18 | case $(uname -s) in 19 | Darwin) echo "darwin" ;; 20 | Linux) echo "linux" ;; 21 | *) exit 1 ;; 22 | esac 23 | } 24 | 25 | restic_version() { 26 | grep -e 'restic/restic' < go.mod \ 27 | | grep -oe '[0-9]*\.[0-9]*\.[0-9]*' 28 | } 29 | 30 | fetch_restic() { 31 | local RESTIC_DEST="${1}" 32 | local RESTIC_VERSION="${2-$(restic_version)}" 33 | local RESTIC_OS="${3-$(os)}" 34 | local RESTIC_ARCH="${4-$(cpu_arch)}" 35 | 36 | curl \ 37 | --silent \ 38 | --location \ 39 | "https://github.com/restic/restic/releases/download/v${RESTIC_VERSION}/restic_${RESTIC_VERSION}_${RESTIC_OS}_${RESTIC_ARCH}.bz2" \ 40 | | bzip2 -d \ 41 | > "${RESTIC_DEST}" 42 | chmod a+x "${RESTIC_DEST}" 43 | } 44 | 45 | fetch_restic "${@}" 46 | -------------------------------------------------------------------------------- /kill.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ "${#}" != "1" ]; then 4 | echo "Usage: ${0} " 5 | exit 1 6 | fi 7 | 8 | if [ -f "${1}" ]; then 9 | xargs kill < "${1}" 10 | rm -f "${1}" 11 | fi 12 | -------------------------------------------------------------------------------- /operator/README.md: -------------------------------------------------------------------------------- 1 | # Operator Support Code 2 | 3 | This Go module contains almost all of the code that supports the operator part K8up. 4 | 5 | The CLI entrypoint is in [`cmd/operator`](../cmd/operator). 6 | 7 | The rest of the operator's code follows the layout that [the _Operator SDK_](https://sdk.operatorframework.io/docs/building-operators/golang/) expects: 8 | 9 | - [`/api`](../api/v1) contains the custom resource definitions (CRDs) 10 | - [`/config`](../config) contains configuration and sample files 11 | - [`/controllers`](../controllers) contains the controllers which act on the events of the CRDs (and other events) through reconciliation loops 12 | -------------------------------------------------------------------------------- /operator/archivecontroller/setup.go: -------------------------------------------------------------------------------- 1 | package archivecontroller 2 | 3 | import ( 4 | k8upv1 "github.com/k8up-io/k8up/v2/api/v1" 5 | "github.com/k8up-io/k8up/v2/operator/reconciler" 6 | batchv1 "k8s.io/api/batch/v1" 7 | controllerruntime "sigs.k8s.io/controller-runtime" 8 | "sigs.k8s.io/controller-runtime/pkg/predicate" 9 | ) 10 | 11 | // +kubebuilder:rbac:groups=k8up.io,resources=archives,verbs=get;list;watch;create;update;patch;delete 12 | // +kubebuilder:rbac:groups=k8up.io,resources=archives/status;archives/finalizers,verbs=get;update;patch 13 | // +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete 14 | 15 | // SetupWithManager configures the reconciler. 16 | func SetupWithManager(mgr controllerruntime.Manager) error { 17 | name := "archive.k8up.io" 18 | r := reconciler.NewReconciler[*k8upv1.Archive, *k8upv1.ArchiveList](mgr.GetClient(), &ArchiveReconciler{ 19 | Kube: mgr.GetClient(), 20 | }) 21 | return controllerruntime.NewControllerManagedBy(mgr). 22 | For(&k8upv1.Archive{}). 23 | Owns(&batchv1.Job{}). 24 | Named(name). 25 | WithEventFilter(predicate.GenerationChangedPredicate{}). 26 | Complete(r) 27 | } 28 | -------------------------------------------------------------------------------- /operator/cfg/config_test.go: -------------------------------------------------------------------------------- 1 | package cfg 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func Test_Configuration_GetGlobalFailedJobsHistoryLimit(t *testing.T) { 10 | t.Run("GlobalKeepJobsIfNotSet", func(t *testing.T) { 11 | c := Configuration{ 12 | GlobalFailedJobsHistoryLimit: -5, 13 | GlobalKeepJobs: 17, 14 | } 15 | assert.Equal(t, 17, c.GetGlobalFailedJobsHistoryLimit()) 16 | }) 17 | t.Run("ReturnsGlobalFailedJobsHistoryLimitIfSet", func(t *testing.T) { 18 | c := Configuration{ 19 | GlobalFailedJobsHistoryLimit: 17, 20 | } 21 | assert.Equal(t, 17, c.GetGlobalFailedJobsHistoryLimit()) 22 | }) 23 | t.Run("LimitsNegativeValuesToZero", func(t *testing.T) { 24 | c := Configuration{ 25 | GlobalFailedJobsHistoryLimit: -5, 26 | GlobalKeepJobs: -17, 27 | } 28 | assert.Equal(t, 0, c.GetGlobalFailedJobsHistoryLimit()) 29 | }) 30 | } 31 | 32 | func Test_Configuration_GetGlobalSuccessfulJobsHistoryLimit(t *testing.T) { 33 | t.Run("GlobalKeepJobsIfNotSet", func(t *testing.T) { 34 | c := Configuration{ 35 | GlobalSuccessfulJobsHistoryLimit: -2, 36 | GlobalKeepJobs: 17, 37 | } 38 | assert.Equal(t, 17, c.GetGlobalSuccessfulJobsHistoryLimit()) 39 | }) 40 | t.Run("ReturnsGlobalSuccessfulJobsHistoryLimitIfSet", func(t *testing.T) { 41 | c := Configuration{ 42 | GlobalSuccessfulJobsHistoryLimit: 17, 43 | } 44 | assert.Equal(t, 17, c.GetGlobalSuccessfulJobsHistoryLimit()) 45 | }) 46 | t.Run("LimitsNegativeValuesToZero", func(t *testing.T) { 47 | c := Configuration{ 48 | GlobalSuccessfulJobsHistoryLimit: -2, 49 | GlobalKeepJobs: -17, 50 | } 51 | assert.Equal(t, 0, c.GetGlobalSuccessfulJobsHistoryLimit()) 52 | }) 53 | } 54 | -------------------------------------------------------------------------------- /operator/checkcontroller/setup.go: -------------------------------------------------------------------------------- 1 | package checkcontroller 2 | 3 | import ( 4 | k8upv1 "github.com/k8up-io/k8up/v2/api/v1" 5 | "github.com/k8up-io/k8up/v2/operator/reconciler" 6 | batchv1 "k8s.io/api/batch/v1" 7 | ctrl "sigs.k8s.io/controller-runtime" 8 | "sigs.k8s.io/controller-runtime/pkg/predicate" 9 | ) 10 | 11 | // +kubebuilder:rbac:groups=k8up.io,resources=checks,verbs=get;list;watch;create;update;patch;delete 12 | // +kubebuilder:rbac:groups=k8up.io,resources=checks/status;checks/finalizers,verbs=get;update;patch 13 | // +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete 14 | 15 | // SetupWithManager configures the reconciler. 16 | func SetupWithManager(mgr ctrl.Manager) error { 17 | name := "check.k8up.io" 18 | r := reconciler.NewReconciler[*k8upv1.Check, *k8upv1.CheckList](mgr.GetClient(), &CheckReconciler{ 19 | Kube: mgr.GetClient(), 20 | }) 21 | return ctrl.NewControllerManagedBy(mgr). 22 | For(&k8upv1.Check{}). 23 | Owns(&batchv1.Job{}). 24 | Named(name). 25 | WithEventFilter(predicate.GenerationChangedPredicate{}). 26 | Complete(r) 27 | } 28 | -------------------------------------------------------------------------------- /operator/controllers.go: -------------------------------------------------------------------------------- 1 | package operator 2 | 3 | // +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;create;update 4 | // +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch 5 | -------------------------------------------------------------------------------- /operator/executor/cleaner/cleaner_test.go: -------------------------------------------------------------------------------- 1 | package cleaner 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | "k8s.io/apimachinery/pkg/util/uuid" 9 | 10 | k8upv1 "github.com/k8up-io/k8up/v2/api/v1" 11 | ) 12 | 13 | func TestGroupByStatus(t *testing.T) { 14 | successJob := createJob(completedStatusWithReason(k8upv1.ReasonSucceeded)) 15 | failedJob := createJob(completedStatusWithReason(k8upv1.ReasonFailed)) 16 | runningJob := createJob(k8upv1.Status{}) 17 | 18 | runningJobs, failedJobs, successfulJobs := groupByStatus([]k8upv1.JobObject{&successJob, &failedJob, &runningJob}) 19 | assert.Len(t, runningJobs, 1) 20 | assert.True(t, runningJobs[0] == &runningJob) 21 | assert.Len(t, failedJobs, 1) 22 | assert.True(t, failedJobs[0] == &failedJob) 23 | assert.Len(t, successfulJobs, 1) 24 | assert.True(t, successfulJobs[0] == &successJob) 25 | 26 | } 27 | 28 | func createJob(status k8upv1.Status) k8upv1.Restore { 29 | return k8upv1.Restore{ 30 | ObjectMeta: metav1.ObjectMeta{Name: "job-" + string(uuid.NewUUID())}, 31 | Spec: k8upv1.RestoreSpec{}, 32 | Status: status, 33 | } 34 | } 35 | 36 | func completedStatusWithReason(r k8upv1.ConditionReason) k8upv1.Status { 37 | return k8upv1.Status{ 38 | Conditions: []metav1.Condition{ 39 | { 40 | Type: k8upv1.ConditionCompleted.String(), 41 | Status: metav1.ConditionTrue, 42 | Reason: r.String(), 43 | }, 44 | }, 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /operator/job/job_test.go: -------------------------------------------------------------------------------- 1 | package job 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestSha256Hash(t *testing.T) { 10 | tests := map[string]struct { 11 | givenString string 12 | goldenString string 13 | }{ 14 | "EmptyString": { 15 | givenString: "", 16 | goldenString: "", 17 | }, 18 | "RepositoryS3": { 19 | givenString: "s3:endpoint/bucket", 20 | goldenString: "03ae9513ea3ba4b6d7289c427503e85cb28c11da210f442f89a07093c22af8a", 21 | }, 22 | } 23 | for name, tc := range tests { 24 | t.Run(name, func(t *testing.T) { 25 | actual := Sha256Hash(tc.givenString) 26 | assert.Equal(t, tc.goldenString, actual) 27 | assert.LessOrEqual(t, len(actual), 63) 28 | }) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /operator/prunecontroller/setup.go: -------------------------------------------------------------------------------- 1 | package prunecontroller 2 | 3 | import ( 4 | k8upv1 "github.com/k8up-io/k8up/v2/api/v1" 5 | "github.com/k8up-io/k8up/v2/operator/reconciler" 6 | batchv1 "k8s.io/api/batch/v1" 7 | ctrl "sigs.k8s.io/controller-runtime" 8 | "sigs.k8s.io/controller-runtime/pkg/predicate" 9 | ) 10 | 11 | // +kubebuilder:rbac:groups=k8up.io,resources=prunes,verbs=get;list;watch;create;update;patch;delete 12 | // +kubebuilder:rbac:groups=k8up.io,resources=prunes/status;prunes/finalizers,verbs=get;update;patch 13 | // +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete 14 | 15 | // SetupWithManager configures the reconciler. 16 | func SetupWithManager(mgr ctrl.Manager) error { 17 | name := "prune.k8up.io" 18 | r := reconciler.NewReconciler[*k8upv1.Prune, *k8upv1.PruneList](mgr.GetClient(), &PruneReconciler{ 19 | Kube: mgr.GetClient(), 20 | }) 21 | return ctrl.NewControllerManagedBy(mgr). 22 | For(&k8upv1.Prune{}). 23 | Owns(&batchv1.Job{}). 24 | Named(name). 25 | WithEventFilter(predicate.GenerationChangedPredicate{}). 26 | Complete(r) 27 | } 28 | -------------------------------------------------------------------------------- /operator/restorecontroller/setup.go: -------------------------------------------------------------------------------- 1 | package restorecontroller 2 | 3 | import ( 4 | k8upv1 "github.com/k8up-io/k8up/v2/api/v1" 5 | "github.com/k8up-io/k8up/v2/operator/reconciler" 6 | batchv1 "k8s.io/api/batch/v1" 7 | "sigs.k8s.io/controller-runtime" 8 | "sigs.k8s.io/controller-runtime/pkg/predicate" 9 | ) 10 | 11 | // +kubebuilder:rbac:groups=k8up.io,resources=restores,verbs=get;list;watch;create;update;patch;delete 12 | // +kubebuilder:rbac:groups=k8up.io,resources=restores/status;restores/finalizers,verbs=get;update;patch 13 | // +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete 14 | 15 | // SetupWithManager configures the reconciler. 16 | func SetupWithManager(mgr controllerruntime.Manager) error { 17 | name := "restore.k8up.io" 18 | r := reconciler.NewReconciler[*k8upv1.Restore, *k8upv1.RestoreList](mgr.GetClient(), &RestoreReconciler{ 19 | Kube: mgr.GetClient(), 20 | }) 21 | return controllerruntime.NewControllerManagedBy(mgr). 22 | For(&k8upv1.Restore{}). 23 | Owns(&batchv1.Job{}). 24 | Named(name). 25 | WithEventFilter(predicate.GenerationChangedPredicate{}). 26 | Complete(r) 27 | } 28 | -------------------------------------------------------------------------------- /operator/schedulecontroller/controller.go: -------------------------------------------------------------------------------- 1 | package schedulecontroller 2 | 3 | import ( 4 | "context" 5 | 6 | k8upv1 "github.com/k8up-io/k8up/v2/api/v1" 7 | "github.com/k8up-io/k8up/v2/operator/cfg" 8 | "github.com/k8up-io/k8up/v2/operator/job" 9 | "github.com/k8up-io/k8up/v2/operator/scheduler" 10 | controllerruntime "sigs.k8s.io/controller-runtime" 11 | "sigs.k8s.io/controller-runtime/pkg/client" 12 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 13 | ) 14 | 15 | // ScheduleReconciler reconciles a Schedule object 16 | type ScheduleReconciler struct { 17 | Kube client.Client 18 | } 19 | 20 | func (r *ScheduleReconciler) NewObject() *k8upv1.Schedule { 21 | return &k8upv1.Schedule{} 22 | } 23 | 24 | func (r *ScheduleReconciler) NewObjectList() *k8upv1.ScheduleList { 25 | return &k8upv1.ScheduleList{} 26 | } 27 | 28 | func (r *ScheduleReconciler) Provision(ctx context.Context, schedule *k8upv1.Schedule) (controllerruntime.Result, error) { 29 | log := controllerruntime.LoggerFrom(ctx) 30 | 31 | repository := cfg.Config.GetGlobalRepository() 32 | if schedule.Spec.Backend != nil { 33 | repository = schedule.Spec.Backend.String() 34 | } 35 | if schedule.Spec.Archive != nil && schedule.Spec.Archive.RestoreSpec == nil { 36 | schedule.Spec.Archive.RestoreSpec = &k8upv1.RestoreSpec{} 37 | } 38 | config := job.NewConfig(r.Kube, schedule, repository) 39 | 40 | return controllerruntime.Result{}, NewScheduleHandler(config, schedule, log).Handle(ctx) 41 | } 42 | 43 | func (r *ScheduleReconciler) Deprovision(ctx context.Context, obj *k8upv1.Schedule) (controllerruntime.Result, error) { 44 | for _, jobType := range []k8upv1.JobType{k8upv1.PruneType, k8upv1.ArchiveType, k8upv1.RestoreType, k8upv1.BackupType, k8upv1.CheckType} { 45 | key := keyOf(obj, jobType) 46 | scheduler.GetScheduler().RemoveSchedule(ctx, key) 47 | } 48 | controllerutil.RemoveFinalizer(obj, k8upv1.ScheduleFinalizerName) 49 | return controllerruntime.Result{}, r.Kube.Update(ctx, obj) 50 | } 51 | -------------------------------------------------------------------------------- /operator/schedulecontroller/schedule_utils_integration_test.go: -------------------------------------------------------------------------------- 1 | //go:build integration 2 | 3 | package schedulecontroller 4 | 5 | import ( 6 | "k8s.io/apimachinery/pkg/api/meta" 7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | 9 | k8upv1 "github.com/k8up-io/k8up/v2/api/v1" 10 | ) 11 | 12 | func (ts *ScheduleControllerTestSuite) givenScheduleResource(schedule k8upv1.ScheduleDefinition) { 13 | givenSchedule := ts.newScheduleSpec("test", schedule) 14 | ts.EnsureResources(givenSchedule) 15 | ts.givenSchedule = givenSchedule 16 | } 17 | 18 | func (ts *ScheduleControllerTestSuite) givenEffectiveSchedule() { 19 | ts.givenSchedule.Status.EffectiveSchedules = []k8upv1.EffectiveSchedule{ 20 | {JobType: k8upv1.BackupType, GeneratedSchedule: "somevaluetobechanged"}, 21 | } 22 | ts.UpdateStatus(ts.givenSchedule) 23 | } 24 | 25 | func (ts *ScheduleControllerTestSuite) newScheduleSpec(name string, schedule k8upv1.ScheduleDefinition) *k8upv1.Schedule { 26 | return &k8upv1.Schedule{ 27 | ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ts.NS}, 28 | Spec: k8upv1.ScheduleSpec{ 29 | Backup: &k8upv1.BackupSchedule{ 30 | ScheduleCommon: &k8upv1.ScheduleCommon{ 31 | Schedule: schedule, 32 | }, 33 | }, 34 | }, 35 | } 36 | } 37 | 38 | func (ts *ScheduleControllerTestSuite) thenAssertCondition(resultSchedule *k8upv1.Schedule, condition k8upv1.ConditionType, reason k8upv1.ConditionReason, containsMessage string) { 39 | c := meta.FindStatusCondition(resultSchedule.Status.Conditions, condition.String()) 40 | ts.Assert().NotNil(c) 41 | ts.Assert().Equal(reason.String(), c.Reason) 42 | ts.Assert().Contains(c.Message, containsMessage) 43 | } 44 | -------------------------------------------------------------------------------- /operator/schedulecontroller/setup.go: -------------------------------------------------------------------------------- 1 | package schedulecontroller 2 | 3 | import ( 4 | k8upv1 "github.com/k8up-io/k8up/v2/api/v1" 5 | "github.com/k8up-io/k8up/v2/operator/reconciler" 6 | ctrl "sigs.k8s.io/controller-runtime" 7 | "sigs.k8s.io/controller-runtime/pkg/predicate" 8 | ) 9 | 10 | // +kubebuilder:rbac:groups=k8up.io,resources=schedules,verbs=get;list;watch;create;update;patch;delete 11 | // +kubebuilder:rbac:groups=k8up.io,resources=schedules/status;schedules/finalizers,verbs=get;update;patch 12 | // The following permissions are just for backwards compatibility. 13 | // +kubebuilder:rbac:groups=k8up.io,resources=effectiveschedules,verbs=get;list;watch;create;update;patch;delete 14 | // +kubebuilder:rbac:groups=k8up.io,resources=effectiveschedules/finalizers,verbs=update 15 | 16 | // SetupWithManager configures the reconciler. 17 | func SetupWithManager(mgr ctrl.Manager) error { 18 | name := "schedule.k8up.io" 19 | r := reconciler.NewReconciler[*k8upv1.Schedule, *k8upv1.ScheduleList](mgr.GetClient(), &ScheduleReconciler{ 20 | Kube: mgr.GetClient(), 21 | }) 22 | return ctrl.NewControllerManagedBy(mgr). 23 | For(&k8upv1.Schedule{}). 24 | Named(name). 25 | WithEventFilter(predicate.GenerationChangedPredicate{}). 26 | Complete(r) 27 | } 28 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": [ 3 | "config:base", 4 | ":gitSignOff", 5 | ":disableDependencyDashboard" 6 | ], 7 | "labels": [ 8 | "dependency" 9 | ], 10 | "postUpdateOptions": [ 11 | "gomodTidy" 12 | ], 13 | "packageRules": [ 14 | { 15 | "matchPackagePatterns": [ 16 | "k8s.io/utils", 17 | "sigs.k8s.io/controller-runtime/tools/setup-envtest", 18 | "github.com/minio/minio-go/v7" 19 | ], 20 | "schedule": [ 21 | "on the first day of the month" 22 | ], 23 | "automerge": true 24 | }, 25 | { 26 | "matchPackagePatterns": [ 27 | "github.com/urfave/cli/v2" 28 | ], 29 | "groupName": "urfave/cli/v2", 30 | "schedule": [ 31 | "on the first day of the month" 32 | ] 33 | } 34 | ], 35 | "prBodyNotes": [ 36 | "- [ ] PR contains the label that identifies the area, one of: `area:operator`, `area:chart`\n- [ ] If the PR is targeting a Helm chart, add the chart label, e.g. `chart:k8up`" 37 | ] 38 | } 39 | -------------------------------------------------------------------------------- /restic/README.md: -------------------------------------------------------------------------------- 1 | # Restic Backup Implementation 2 | 3 | This Go module contains the bulk of the implementation to support Restic backups. 4 | 5 | The CLI entrypoint is in [`cmd/restic`](../cmd/restic). 6 | -------------------------------------------------------------------------------- /restic/cli/archive.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | // Archive uploads the last version of each snapshot to S3. 4 | func (r *Restic) Archive(options RestoreOptions, tags ArrayOpts) error { 5 | 6 | archiveLogger := r.logger.WithName("archive") 7 | 8 | err := r.LastSnapshots(tags) 9 | if err != nil { 10 | archiveLogger.Error(err, "could not list snapshots") 11 | } 12 | 13 | archiveLogger.Info("archiving latest snapshots for every host") 14 | 15 | for _, v := range r.snapshots { 16 | PVCname := r.parsePath(v.Paths) 17 | archiveLogger.Info("starting archival for", "namespace", v.Hostname, "pvc", PVCname) 18 | err := r.Restore(v.ID, options, nil) 19 | if err != nil { 20 | return err 21 | } 22 | } 23 | 24 | return nil 25 | } 26 | -------------------------------------------------------------------------------- /restic/cli/check.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "github.com/k8up-io/k8up/v2/restic/logging" 5 | ) 6 | 7 | // Check will check the repository for errors 8 | func (r *Restic) Check() error { 9 | checkLogger := r.logger.WithName("check") 10 | 11 | checkLogger.Info("checking repository") 12 | 13 | resticCheckLogger := checkLogger.WithName("restic") 14 | opts := CommandOptions{ 15 | Path: r.resticPath, 16 | Args: r.globalFlags.ApplyToCommand("check"), 17 | StdOut: logging.NewInfoWriter(resticCheckLogger), 18 | StdErr: logging.NewErrorWriter(resticCheckLogger), 19 | } 20 | 21 | cmd := NewCommand(r.ctx, checkLogger, opts) 22 | cmd.Run() 23 | 24 | return cmd.FatalError 25 | } 26 | -------------------------------------------------------------------------------- /restic/cli/flags.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | // Flags stores arguments to pass to `restic` and can return them as array, see ApplyToCommand(). 4 | type Flags map[string][]string 5 | 6 | // AddFlag appends the given values to the existing flag (identified by key) if it exists or 7 | // appends it and it's values to the end of the list of globalFlags otherwise. 8 | func (f Flags) AddFlag(key string, values ...string) { 9 | currentValues, found := f[key] 10 | if found { 11 | f[key] = append(currentValues, values...) 12 | return 13 | } 14 | 15 | f[key] = values 16 | } 17 | 18 | // Combine returns a new Flags instance that contains the flags and their values of both, 19 | // the given Flags and the Flags instance 20 | func Combine(first, second Flags) Flags { 21 | combined := Flags{} 22 | for firstK, firstV := range first { 23 | combined[firstK] = firstV 24 | } 25 | for secondK, secondV := range second { 26 | firstV, foundInFirst := first[secondK] 27 | if foundInFirst { 28 | combined[secondK] = append(firstV, secondV...) 29 | } else { 30 | combined[secondK] = secondV 31 | } 32 | } 33 | return combined 34 | } 35 | 36 | // ApplyToCommand applies the globalFlags to the given command and it's arguments, such that `newArgs = [command, globalFlags..., commandArgs...]`, 37 | // in order the returning array to be passed to the `restic` process. 38 | func (f Flags) ApplyToCommand(command string, commandArgs ...string) []string { 39 | args := make([]string, 0) 40 | if command != "" { 41 | args = append(args, command) 42 | } 43 | 44 | for flag, values := range f { 45 | args = append(args, expand(flag, values)...) 46 | continue 47 | } 48 | return append(args, commandArgs...) 49 | } 50 | 51 | func expand(flag string, values []string) []string { 52 | if len(values) == 0 { 53 | return []string{flag} 54 | } 55 | 56 | args := make([]string, 0) 57 | for _, value := range values { 58 | args = append(args, flag, value) 59 | } 60 | return args 61 | } 62 | -------------------------------------------------------------------------------- /restic/cli/init.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "io" 7 | "strings" 8 | 9 | "github.com/k8up-io/k8up/v2/restic/logging" 10 | ) 11 | 12 | // Init initialises a repository, checks if the repository exists and will 13 | // initialise it if not. It's safe to call this every time. 14 | func (r *Restic) Init() error { 15 | if r.clientCert != (clientCert{}) { 16 | if err := generatePemFile(r.clientCert.cert, r.clientCert.key, r.clientCert.pem); err != nil { 17 | return err 18 | } 19 | } 20 | 21 | initLogger := r.logger.WithName("RepoInit") 22 | resticLogger := initLogger.WithName("restic") 23 | 24 | initErrorCatcher := &initStdErrWrapper{ 25 | exists: false, 26 | Writer: logging.NewErrorWriter(resticLogger), 27 | } 28 | 29 | opts := CommandOptions{ 30 | Path: r.resticPath, 31 | Args: r.globalFlags.ApplyToCommand("init"), 32 | StdOut: logging.NewInfoWriter(resticLogger), 33 | StdErr: initErrorCatcher, 34 | } 35 | cmd := NewCommand(r.ctx, initLogger, opts) 36 | cmd.Run() 37 | 38 | if !initErrorCatcher.exists { 39 | return cmd.FatalError 40 | } 41 | 42 | return nil 43 | } 44 | 45 | type initStdErrWrapper struct { 46 | exists bool 47 | io.Writer 48 | } 49 | 50 | func (i *initStdErrWrapper) Write(p []byte) (n int, err error) { 51 | scanner := bufio.NewScanner(bytes.NewReader(p)) 52 | 53 | // array of acceptable errors to attempt to continue 54 | okErrorArray := []string{"already initialized", "already exists"} 55 | 56 | for scanner.Scan() { 57 | for _, errorString := range okErrorArray { 58 | if strings.Contains(scanner.Text(), errorString) { 59 | i.exists = true 60 | return len(p), nil 61 | } 62 | } 63 | } 64 | 65 | return i.Writer.Write(p) 66 | } 67 | -------------------------------------------------------------------------------- /restic/cli/interfaces.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import "github.com/prometheus/client_golang/prometheus" 4 | 5 | type StatsHandler interface { 6 | SendPrometheus(PrometheusProvider) error 7 | SendWebhook(WebhookProvider) error 8 | } 9 | 10 | type PrometheusProvider interface { 11 | ToProm() []prometheus.Collector 12 | } 13 | 14 | type WebhookProvider interface { 15 | ToJSON() []byte 16 | } 17 | -------------------------------------------------------------------------------- /restic/cli/snapshots.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | 7 | "github.com/k8up-io/k8up/v2/restic/dto" 8 | "github.com/k8up-io/k8up/v2/restic/logging" 9 | ) 10 | 11 | // Snapshots lists all the snapshots from the repository and saves them in the 12 | // restic instance for further use. 13 | func (r *Restic) Snapshots(tags ArrayOpts) error { 14 | return r.listSnapshots(tags, false) 15 | } 16 | 17 | // LastSnapshots only returns the latests snapshots for a given set of tags. 18 | func (r *Restic) LastSnapshots(tags ArrayOpts) error { 19 | return r.listSnapshots(tags, true) 20 | } 21 | 22 | func (r *Restic) listSnapshots(tags ArrayOpts, last bool) error { 23 | snaplogger := r.logger.WithName("snapshots") 24 | 25 | snaplogger.Info("getting list of snapshots") 26 | 27 | buf := &bytes.Buffer{} 28 | 29 | opts := CommandOptions{ 30 | Path: r.resticPath, 31 | Args: r.globalFlags.ApplyToCommand("snapshots", "--json"), 32 | StdOut: buf, 33 | StdErr: logging.NewErrorWriter(snaplogger.WithName("restic")), 34 | } 35 | 36 | if len(tags) > 0 { 37 | opts.Args = append(opts.Args, tags.BuildArgs()...) 38 | } 39 | 40 | cmd := NewCommand(r.ctx, snaplogger, opts) 41 | cmd.Run() 42 | 43 | snaps := []dto.Snapshot{} 44 | 45 | jdecoder := json.NewDecoder(buf) 46 | 47 | err := jdecoder.Decode(&snaps) 48 | if err != nil { 49 | return err 50 | } 51 | 52 | r.snapshots = snaps 53 | 54 | return cmd.FatalError 55 | 56 | } 57 | -------------------------------------------------------------------------------- /restic/cli/stdinbackup.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/k8up-io/k8up/v2/restic/cfg" 7 | "github.com/k8up-io/k8up/v2/restic/kubernetes" 8 | "github.com/k8up-io/k8up/v2/restic/logging" 9 | ) 10 | 11 | // StdinBackup create a snapshot with the data contained in the given reader. 12 | func (r *Restic) StdinBackup(data *kubernetes.ExecData, filename, fileExt string, tags ArrayOpts) error { 13 | 14 | stdinlogger := r.logger.WithName("stdinBackup") 15 | 16 | stdinlogger.Info("starting stdin backup", "filename", filename, "extension", fileExt) 17 | 18 | outputWriter := logging.NewStdinBackupOutputParser(stdinlogger.WithName("progress"), filename+fileExt, r.sendBackupStats) 19 | 20 | flags := Combine(r.globalFlags, Flags{ 21 | "--host": {cfg.Config.Hostname}, 22 | "--json": {}, 23 | "--stdin": {}, 24 | "--stdin-filename": {fmt.Sprintf("%s%s", filename, fileExt)}, 25 | }) 26 | 27 | opts := CommandOptions{ 28 | Path: r.resticPath, 29 | Args: flags.ApplyToCommand("backup"), 30 | StdOut: outputWriter, 31 | StdErr: outputWriter, 32 | StdIn: data.Reader, 33 | } 34 | 35 | return r.triggerBackup(stdinlogger, tags, opts, data) 36 | } 37 | -------------------------------------------------------------------------------- /restic/cli/unlock.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "github.com/k8up-io/k8up/v2/restic/logging" 5 | ) 6 | 7 | // Unlock will remove stale locks from the repository 8 | // If the all flag is set to true, even non-stale locks are removed. 9 | func (r *Restic) Unlock(all bool) error { 10 | unlocklogger := r.logger.WithName("unlock") 11 | 12 | unlocklogger.Info("unlocking repository", "all", all) 13 | 14 | opts := CommandOptions{ 15 | Path: r.resticPath, 16 | Args: r.globalFlags.ApplyToCommand("unlock"), 17 | StdOut: logging.NewErrorWriter(unlocklogger.WithName("restic")), 18 | StdErr: logging.NewErrorWriter(unlocklogger.WithName("restic")), 19 | } 20 | 21 | if all { 22 | opts.Args = append(opts.Args, "--remove-all") 23 | } 24 | 25 | cmd := NewCommand(r.ctx, unlocklogger, opts) 26 | cmd.Run() 27 | 28 | return cmd.FatalError 29 | } 30 | -------------------------------------------------------------------------------- /restic/cli/utils.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "io" 5 | "os" 6 | ) 7 | 8 | func generatePemFile(clientCert string, clientKey string, dest string) error { 9 | certIn, err := os.Open(clientCert) 10 | if err != nil { 11 | return err 12 | } 13 | defer certIn.Close() 14 | 15 | tlsIn, err := os.Open(clientKey) 16 | if err != nil { 17 | return err 18 | } 19 | defer tlsIn.Close() 20 | 21 | out, err := os.OpenFile(dest, os.O_CREATE|os.O_WRONLY, 0644) 22 | if err != nil { 23 | return err 24 | } 25 | defer out.Close() 26 | 27 | _, err = io.Copy(out, certIn) 28 | if err != nil { 29 | return err 30 | } 31 | 32 | _, err = io.Copy(out, tlsIn) 33 | if err != nil { 34 | return err 35 | } 36 | 37 | return nil 38 | } 39 | -------------------------------------------------------------------------------- /restic/dto/snapshot.go: -------------------------------------------------------------------------------- 1 | package dto 2 | 3 | import "time" 4 | 5 | // Snapshot models a restic a single snapshot from the 6 | // snapshots --json subcommand. 7 | type Snapshot struct { 8 | ID string `json:"id"` 9 | Time time.Time `json:"time"` 10 | Tree string `json:"tree"` 11 | Paths []string `json:"paths"` 12 | Hostname string `json:"hostname"` 13 | Username string `json:"username"` 14 | UID int `json:"uid"` 15 | Gid int `json:"gid"` 16 | Tags []string `json:"tags"` 17 | } 18 | -------------------------------------------------------------------------------- /restic/kubernetes/config.go: -------------------------------------------------------------------------------- 1 | package kubernetes 2 | 3 | import ( 4 | "fmt" 5 | 6 | "k8s.io/apimachinery/pkg/runtime" 7 | "k8s.io/client-go/rest" 8 | "k8s.io/client-go/tools/clientcmd" 9 | "sigs.k8s.io/controller-runtime/pkg/client" 10 | "sigs.k8s.io/controller-runtime/pkg/log" 11 | 12 | "github.com/go-logr/logr" 13 | k8upv1 "github.com/k8up-io/k8up/v2/api/v1" 14 | "github.com/k8up-io/k8up/v2/restic/cfg" 15 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 16 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 17 | ) 18 | 19 | func getClientConfig() (*rest.Config, error) { 20 | config, err := rest.InClusterConfig() 21 | if err != nil { 22 | err1 := err 23 | config, err = clientcmd.BuildConfigFromFlags("", cfg.Config.KubeConfig) 24 | if err != nil { 25 | err = fmt.Errorf("InClusterConfig as well as BuildConfigFromFlags Failed. Error in InClusterConfig: %+v\nError in BuildConfigFromFlags: %+v", err1, err) 26 | return nil, err 27 | } 28 | } 29 | 30 | return config, nil 31 | } 32 | 33 | func NewTypedClient(l logr.Logger) (client.Client, error) { 34 | 35 | log.SetLogger(l) 36 | 37 | scheme := runtime.NewScheme() 38 | utilruntime.Must(clientgoscheme.AddToScheme(scheme)) 39 | utilruntime.Must(k8upv1.AddToScheme(scheme)) 40 | 41 | config, err := getClientConfig() 42 | if err != nil { 43 | return nil, err 44 | } 45 | 46 | opts := client.Options{ 47 | Scheme: scheme, 48 | } 49 | 50 | return client.New(config, opts) 51 | } 52 | -------------------------------------------------------------------------------- /tools.go: -------------------------------------------------------------------------------- 1 | //go:build tools 2 | 3 | // Place any runtime dependencies as imports in this file. 4 | // Go modules will be forced to download and install them. 5 | package tools 6 | 7 | import ( 8 | _ "github.com/restic/restic" 9 | _ "sigs.k8s.io/controller-tools/cmd/controller-gen" 10 | ) 11 | --------------------------------------------------------------------------------