├── .ci-scripts ├── mdl-style.rb └── yamlconfig.yaml ├── .codecov.yml ├── .dockerignore ├── .github ├── mergify.yml ├── renovate.json5 └── workflows │ ├── periodic.yml │ ├── stale.yml │ └── tests.yml ├── .gitignore ├── .golangci.yml ├── .pre-commit-config.yaml ├── CHANGELOG.md ├── DCO ├── Dockerfile ├── LICENSE ├── Makefile ├── PROJECT ├── README.md ├── SECURITY.md ├── api └── v1 │ ├── groupversion_info.go │ ├── snapshotschedule_types.go │ └── zz_generated.deepcopy.go ├── bundle.Dockerfile ├── bundle ├── manifests │ ├── snapscheduler-controller-manager-metrics-service_v1_service.yaml │ ├── snapscheduler-manager-config_v1_configmap.yaml │ ├── snapscheduler-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml │ ├── snapscheduler.backube_snapshotschedules.yaml │ └── snapscheduler.clusterserviceversion.yaml ├── metadata │ └── annotations.yaml └── tests │ └── scorecard │ └── config.yaml ├── cmd └── main.go ├── config ├── crd │ ├── bases │ │ └── snapscheduler.backube_snapshotschedules.yaml │ ├── kustomization.yaml │ └── kustomizeconfig.yaml ├── default │ ├── kustomization.yaml │ ├── manager_auth_proxy_patch.yaml │ └── manager_config_patch.yaml ├── manager │ ├── kustomization.yaml │ └── manager.yaml ├── manifests │ ├── bases │ │ └── snapscheduler.clusterserviceversion.yaml │ └── kustomization.yaml ├── prometheus │ ├── kustomization.yaml │ └── monitor.yaml ├── rbac │ ├── auth_proxy_client_clusterrole.yaml │ ├── auth_proxy_role.yaml │ ├── auth_proxy_role_binding.yaml │ ├── auth_proxy_service.yaml │ ├── kustomization.yaml │ ├── leader_election_role.yaml │ ├── leader_election_role_binding.yaml │ ├── role.yaml │ ├── role_binding.yaml │ ├── service_account.yaml │ ├── snapshotschedule_editor_role.yaml │ └── snapshotschedule_viewer_role.yaml ├── samples │ ├── kustomization.yaml │ └── snapscheduler_v1_snapshotschedule.yaml └── scorecard │ ├── bases │ └── config.yaml │ ├── kustomization.yaml │ └── patches │ ├── basic.config.yaml │ └── olm.config.yaml ├── docs ├── .bundle │ └── config ├── 404.md ├── Gemfile ├── Gemfile.lock ├── README.md ├── _config.yml ├── _includes │ └── twitter-follow.html ├── _layouts │ └── default.html ├── development.md ├── docs.md ├── index.md ├── install.md ├── labeling.md ├── media │ ├── snapscheduler.svg │ ├── snapscheduler_repocard.png │ └── snapscheduler_repocard.svg ├── roadmap.md └── usage.md ├── go.mod ├── go.sum ├── hack ├── crds │ ├── snapshot.storage.k8s.io_volumesnapshotclasses.yaml │ ├── snapshot.storage.k8s.io_volumesnapshotcontents.yaml │ └── snapshot.storage.k8s.io_volumesnapshots.yaml ├── run-in-kind.sh ├── setup-kind-cluster.sh └── test-full.sh ├── helm └── snapscheduler │ ├── .helmignore │ ├── Chart.yaml │ ├── README.md │ ├── templates │ ├── NOTES.txt │ ├── _helpers.tpl │ ├── clusterrole-metrics-reader.yaml │ ├── clusterrole-proxy.yaml │ ├── clusterrole.yaml │ ├── deployment.yaml │ ├── role-leader-election.yaml │ ├── rolebinding-leader-election.yaml │ ├── rolebinding-proxy.yaml │ ├── rolebinding.yaml │ ├── service-metrics.yaml │ ├── serviceaccount.yaml │ └── snapscheduler.backube_snapshotschedules.yaml │ └── values.yaml ├── internal └── controller │ ├── snapshots_expire.go │ ├── snapshots_expire_test.go │ ├── snapshotschedule_controller.go │ ├── snapshotschedule_test.go │ └── suite_test.go ├── release-checklist.md └── test-kuttl ├── e2e ├── custom-snapclass │ ├── 00-assert.yaml │ ├── 00-create-workload.yaml │ ├── 05-create-schedule.yaml │ ├── 10-waitfor-snapshot.yaml │ └── 9999-delete-ns.yaml ├── label-selector-equality │ ├── 00-assert.yaml │ ├── 00-create-workload.yaml │ ├── 05-create-schedule.yaml │ ├── 10-waitfor-snapshot.yaml │ └── 9999-delete-ns.yaml ├── label-selector-set │ ├── 00-assert.yaml │ ├── 00-create-workload.yaml │ ├── 05-create-schedule.yaml │ ├── 10-waitfor-snapshot.yaml │ └── 9999-delete-ns.yaml ├── metrics │ ├── 00-assert.yaml │ ├── 00-check-metrics.yaml │ └── 9999-delete-ns.yaml ├── minimal-schedule │ ├── 00-assert.yaml │ ├── 00-create-workload.yaml │ ├── 05-create-schedule.yaml │ ├── 10-waitfor-snapshot.yaml │ └── 9999-delete-ns.yaml ├── multi-pvc │ ├── 00-assert.yaml │ ├── 00-create-workload.yaml │ ├── 05-create-schedule.yaml │ ├── 10-waitfor-snapshot.yaml │ └── 9999-delete-ns.yaml └── template-labels │ ├── 00-assert.yaml │ ├── 00-create-workload.yaml │ ├── 05-create-schedule.yaml │ ├── 10-waitfor-snapshot.yaml │ └── 9999-delete-ns.yaml └── kuttl-test.yaml /.ci-scripts/mdl-style.rb: -------------------------------------------------------------------------------- 1 | all 2 | 3 | #Refer below url for more information about the markdown rules. 4 | #https://github.com/markdownlint/markdownlint/blob/master/docs/RULES.md 5 | 6 | rule 'MD007', :indent => 2 7 | rule 'MD009', :br_spaces => 2 8 | rule 'MD013', :ignore_code_blocks => true, :tables => false 9 | rule 'MD024', :allow_different_nesting => true 10 | 11 | exclude_rule 'MD040' # Fenced code blocks should have a language specified 12 | exclude_rule 'MD041' # First line in file should be a top level header 13 | -------------------------------------------------------------------------------- /.ci-scripts/yamlconfig.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://yamllint.readthedocs.io/en/stable/configuration.html 3 | 4 | extends: default 5 | 6 | ignore: | 7 | hack/crds/* 8 | helm/snapscheduler/templates/* 9 | helm/snapscheduler/crds/* 10 | config/** 11 | bundle/** 12 | 13 | rules: 14 | comments: # renovate-bot dosen't put 2 spaces before the version number 15 | ignore: | 16 | .github/workflows/* 17 | document-start: 18 | ignore: | 19 | deploy/olm-catalog/snapscheduler/1.1.1/snapshotschedules.snapscheduler.backube.crd.yaml 20 | indentation: 21 | indent-sequences: consistent 22 | line-length: 23 | allow-non-breakable-inline-mappings: true 24 | ignore: | 25 | deploy/olm-catalog/snapscheduler/1.1.1/snapshotschedules.snapscheduler.backube.crd.yaml 26 | helm/snapscheduler/Chart.yaml 27 | test-kuttl/e2e/** 28 | -------------------------------------------------------------------------------- /.codecov.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # See: https://docs.codecov.io/docs/codecov-yaml 3 | codecov: 4 | require_ci_to_pass: true 5 | 6 | coverage: 7 | precision: 1 8 | round: down 9 | range: "70...100" 10 | 11 | status: 12 | project: true 13 | patch: true 14 | changes: false 15 | 16 | comment: 17 | layout: "diff, files" 18 | behavior: default 19 | require_changes: true 20 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file 2 | # Ignore all files which are not go type 3 | /bin 4 | /docs 5 | -------------------------------------------------------------------------------- /.github/mergify.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://docs.mergify.com/ 3 | queue_rules: 4 | - name: default 5 | queue_conditions: 6 | - or: 7 | - author=JohnStrunk 8 | - author=renovate-bot 9 | - author=dependabot[bot] 10 | - "#approved-reviews-by>=1" 11 | - "#changes-requested-reviews-by=0" 12 | - label!=do-not-merge 13 | - check-success=Successful e2e tests 14 | - check-success=DCO 15 | merge_conditions: [] 16 | merge_method: merge 17 | 18 | pull_request_rules: 19 | - name: Automatic merge 20 | conditions: [] 21 | actions: 22 | queue: 23 | -------------------------------------------------------------------------------- /.github/renovate.json5: -------------------------------------------------------------------------------- 1 | { 2 | // JSON5 spec: https://json5.org/ 3 | // Renovate docs: https://docs.renovatebot.com/configuration-options/ 4 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 5 | "extends": [ 6 | "config:recommended", // Use recommended settings 7 | ":enablePreCommit", // Enable pre-commit hook updates 8 | ":gitSignOff", // Sign off commits 9 | "helpers:pinGitHubActionDigests" // Pin GitHub action digests 10 | ], 11 | "labels": [ 12 | "dependencies" 13 | ], 14 | "lockFileMaintenance": { // Update Gemfile.lock 15 | "enabled": true 16 | }, 17 | "packageRules": [ 18 | { 19 | // Update GH actions in a single PR 20 | "groupName": "GH actions", 21 | "matchDepTypes": [ 22 | "action" 23 | ] 24 | }, 25 | { 26 | // Update golang tag in dockerfile & golang version in workflows in a 27 | // single PR" 28 | "groupName": "golang version", 29 | "matchDepNames": [ 30 | "golang", 31 | "go" 32 | ] 33 | }, 34 | { 35 | // Disable some docker image updates 36 | "matchDatasources": [ 37 | "docker" 38 | ], 39 | "matchPackageNames": [ 40 | "controller", // Ignore the fake "controller" docker image name 41 | "quay.io/backube/snapscheduler", // Don't pin our own image 42 | "quay.io/operator-framework/scorecard-test" // Ignore OSDK scorecard image 43 | ], 44 | "enabled": false 45 | }, 46 | { 47 | // Renovate's packages update too frequently, so we only schedule updates 48 | // once a week to keep the noise down 49 | "matchPackageNames": [ 50 | "renovatebot/pre-commit-hooks" 51 | ], 52 | "schedule": [ 53 | "before 9am on monday" 54 | ] 55 | } 56 | ], 57 | "postUpdateOptions": [ 58 | "gomodTidy" // Run `go mod tidy` after updating dependencies 59 | ], 60 | "customManagers": [ 61 | { 62 | "customType": "regex", 63 | "description": "controller-tools version updates", 64 | "fileMatch": [ 65 | "^.github/(?:workflows|actions)/.+\\.ya?ml$", 66 | "(^|/)Makefile$" 67 | ], 68 | "matchStrings": [ 69 | "CONTROLLER_TOOLS_VERSION\\s*:=?\\s*\"?(?.+?)\"?\\n" 70 | ], 71 | "depNameTemplate": "kubernetes-sigs/controller-tools", 72 | "datasourceTemplate": "github-releases" 73 | }, 74 | { 75 | "customType": "regex", 76 | "description": "golang version updates (major/minor only)", 77 | "fileMatch": [ 78 | "^.github/(?:workflows|actions)/.+\\.ya?ml$", 79 | "(^|/)Makefile$" 80 | ], 81 | "matchStrings": [ 82 | "GO_VERSION\\s*:=?\\s*\"?(?.+?)\"?\\n" 83 | ], 84 | "depNameTemplate": "golang", 85 | "datasourceTemplate": "golang-version", 86 | "extractVersionTemplate": "^(?.*)\\.\\d+$", 87 | "versioningTemplate": "loose" 88 | }, 89 | { 90 | "customType": "regex", 91 | "description": "golangci-lint version updates", 92 | "fileMatch": [ 93 | "^.github/(?:workflows|actions)/.+\\.ya?ml$", 94 | "(^|/)Makefile$" 95 | ], 96 | "matchStrings": [ 97 | "GOLANGCI_VERSION\\s*:=?\\s*\"?(?.+?)\"?\\n" 98 | ], 99 | "depNameTemplate": "golangci/golangci-lint", 100 | "datasourceTemplate": "github-releases" 101 | }, 102 | { 103 | "customType": "regex", 104 | "description": "Helm version updates", 105 | "fileMatch": [ 106 | "^.github/(?:workflows|actions)/.+\\.ya?ml$", 107 | "(^|/)Makefile$" 108 | ], 109 | "matchStrings": [ 110 | "HELM_VERSION\\s*:=?\\s*\"?(?.+?)\"?\\n" 111 | ], 112 | "depNameTemplate": "helm/helm", 113 | "datasourceTemplate": "github-releases" 114 | }, 115 | { 116 | "customType": "regex", 117 | "description": "Docker image updates in yamls", 118 | "fileMatch": [ 119 | "^config/.+\\.ya?ml$" 120 | ], 121 | "matchStrings": [ 122 | "image:\\s*\"?(?.*?):(?.*?)(?:@(?sha256:[a-f0-9]+))?\"?\\s" 123 | ], 124 | "datasourceTemplate": "docker" 125 | }, 126 | { 127 | "customType": "regex", 128 | "description": "KinD version updates", 129 | "fileMatch": [ 130 | "^.github/(?:workflows|actions)/.+\\.ya?ml$", 131 | "(^|/)Makefile$" 132 | ], 133 | "matchStrings": [ 134 | "KIND_VERSION\\s*:=?\\s*\"?(?.+?)\"?\\n" 135 | ], 136 | "depNameTemplate": "kubernetes-sigs/kind", 137 | "datasourceTemplate": "github-releases", 138 | "extractVersionTemplate": "^v(?.*)$" 139 | }, 140 | { 141 | "customType": "regex", 142 | "description": "kustomize version updates", 143 | "fileMatch": [ 144 | "^.github/(?:workflows|actions)/.+\\.ya?ml$", 145 | "(^|/)Makefile$" 146 | ], 147 | "matchStrings": [ 148 | "KUSTOMIZE_VERSION\\s*:=?\\s*\"?(?.+?)\"?\\n" 149 | ], 150 | "depNameTemplate": "kubernetes-sigs/kustomize", 151 | "datasourceTemplate": "github-releases", 152 | "extractVersionTemplate": "^kustomize/(?.*)$" 153 | }, 154 | { 155 | "customType": "regex", 156 | "description": "Kuttl version updates", 157 | "fileMatch": [ 158 | "^.github/(?:workflows|actions)/.+\\.ya?ml$", 159 | "(^|/)Makefile$" 160 | ], 161 | "matchStrings": [ 162 | "KUTTL_VERSION\\s*:=?\\s*\"?(?.+?)\"?\\n" 163 | ], 164 | "depNameTemplate": "kudobuilder/kuttl", 165 | "datasourceTemplate": "github-releases" 166 | }, 167 | { 168 | "customType": "regex", 169 | "description": "Update tags for repos in hack/setup-kind-cluster.sh", 170 | "fileMatch": [ 171 | "^hack/setup-kind-cluster.sh$" 172 | ], 173 | "matchStrings": [ 174 | "# renovate: datasource=(?[a-z-.]+?) depName=(?[^\\s]+?)(?: (?:lookupName|packageName)=(?[^\\s]+?))?(?: versioning=(?[a-z-0-9]+?))?\\s+TAG\\s*=\\s*[\"']?(?.+?)[\"']?\\s" 175 | ] 176 | } 177 | ], 178 | "prConcurrentLimit": 5, 179 | "prHourlyLimit": 0, 180 | "rebaseWhen": "behind-base-branch", 181 | "schedule": [ 182 | "* 3-6 * * 1-5" // Run between 3am and 6am on weekdays 183 | ], 184 | "semanticCommits": "disabled", 185 | "timezone": "America/New_York" 186 | } 187 | -------------------------------------------------------------------------------- /.github/workflows/periodic.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # yamllint disable rule:line-length 3 | 4 | # We want to have our active branches built periodically to ensure they continue 5 | # to build correctly, and to pick up any updates to underlying packages/images. 6 | # Unfortunately, GitHub only allows scheduled workflow runs against the 7 | # "default" branch (main). This job, residing on the default branch, will 8 | # trigger other jobs (across other branches) at a regular interval. 9 | # 10 | # Jobs triggered by this workflow: 11 | # - Must have "workflow_dispatch" as a trigger method 12 | # - Must either: 13 | # - Be on the default branch OR 14 | # - Have executed at least once previously 15 | # 16 | # The above conditions are met in our case since we're just trying to 17 | # periodically trigger workflows that run with each PR/Push. 18 | name: Periodic 19 | on: # yamllint disable-line rule:truthy 20 | schedule: 21 | - cron: "15 6 * * 1" # 6:15 every Monday 22 | workflow_dispatch: # Useful for testing, but not necessary 23 | 24 | # https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions 25 | permissions: 26 | actions: write 27 | contents: read 28 | 29 | jobs: 30 | trigger-workflows: 31 | name: Trigger other workflows 32 | runs-on: ubuntu-latest 33 | 34 | steps: 35 | # Must checkout source or gh can't figure out what to trigger 36 | - name: Checkout source 37 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 38 | 39 | - name: Trigger workflows 40 | env: 41 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 42 | run: | 43 | # Main development branch 44 | gh workflow run --ref "master" "tests.yml" 45 | 46 | # Active release branches 47 | gh workflow run --ref "release-3.2" "tests.yml" 48 | 49 | # Rebuilds of tagged containers 50 | # gh workflow run --ref "v3.2.0" "tests.yml" # Disabled due to mdl config incompatibility 51 | -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # yamllint disable rule:line-length 3 | 4 | name: Close stale issues/PRs 5 | on: # yamllint disable-line rule:truthy 6 | schedule: 7 | - cron: "11 3 * * *" # Daily @ 03:11 8 | 9 | permissions: 10 | issues: write 11 | pull-requests: write 12 | 13 | jobs: 14 | stale: 15 | name: Close stale issues/PRs 16 | runs-on: ubuntu-latest 17 | 18 | steps: 19 | - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0 20 | with: 21 | days-before-issue-stale: -1 22 | days-before-issue-close: -1 23 | days-before-pr-stale: 60 24 | days-before-pr-close: 30 25 | stale-pr-message: > 26 | This pull request has been automatically marked as stale because it 27 | has not had recent activity. It will be closed in 30 days if no 28 | further activity occurs. Thank you for your contributions. 29 | close-pr-message: > 30 | This pull request has been automatically closed due to inactivity. 31 | Please re-open if these changes are still required. 32 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | bin/* 9 | 10 | # Test binary, built with `go test -c` 11 | *.test 12 | 13 | # Output of the go coverage tool, specifically when used with LiteIDE 14 | *.out 15 | 16 | # Go workspace file 17 | go.work 18 | 19 | # Kubernetes Generated files - skip generated files, except for vendored files 20 | !vendor/**/zz_generated.* 21 | 22 | # editor and IDE paraphernalia 23 | .idea 24 | .vscode 25 | *.swp 26 | *.swo 27 | *~ 28 | 29 | /coverage.txt 30 | /testbin/ 31 | Dockerfile.cross 32 | 33 | # Ignore metadata generated by Jekyll 34 | /docs/_site/ 35 | /docs/.sass-cache/ 36 | /docs/.jekyll-cache/ 37 | /docs/.jekyll-metadata 38 | # Ignore folders generated by Bundler 39 | /docs/vendor/ 40 | /.vscode/ 41 | /.ci-scripts/vendor/ 42 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://golangci-lint.run/usage/configuration/#config-file 3 | 4 | version: "2" 5 | formatters: 6 | enable: 7 | - goimports 8 | exclusions: 9 | generated: lax 10 | paths: 11 | - third_party$ 12 | - builtin$ 13 | - examples$ 14 | linters: 15 | default: none 16 | enable: 17 | - asciicheck 18 | - bidichk 19 | - contextcheck 20 | - dupl 21 | - errcheck 22 | - errname 23 | - errorlint 24 | - exhaustive 25 | - forcetypeassert 26 | - funlen 27 | - goconst 28 | - gocyclo 29 | - gosec 30 | - govet 31 | - ineffassign 32 | - lll 33 | - makezero 34 | - misspell 35 | - nakedret 36 | - prealloc 37 | - revive 38 | - staticcheck 39 | - unconvert 40 | - unparam 41 | - unused 42 | - whitespace 43 | exclusions: 44 | generated: lax 45 | presets: 46 | - comments 47 | - common-false-positives 48 | - legacy 49 | - std-error-handling 50 | paths: 51 | - third_party$ 52 | - builtin$ 53 | - examples$ 54 | run: 55 | allow-parallel-runners: true 56 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # yamllint disable rule:line-length 3 | 4 | # See https://pre-commit.com for more information 5 | # See https://pre-commit.com/hooks.html for more hooks 6 | 7 | # Install in your local dev environment 8 | # > pip install --upgrade --user pre-commit 9 | # Enable the hooks for this repo 10 | # > pre-commit install 11 | 12 | repos: 13 | - repo: https://github.com/pre-commit/pre-commit-hooks 14 | rev: "v5.0.0" 15 | hooks: 16 | - id: check-added-large-files # Prevents giant files from being committed 17 | - id: check-json # Check that JSON files are valid 18 | - id: check-merge-conflict # Check for files that contain merge conflict strings 19 | - id: check-symlinks # Ensure symlinks have a valid target 20 | - id: check-toml # Ensure toml files are valid 21 | - id: check-xml # Check that XML files are valid 22 | - id: end-of-file-fixer # Ensures that a file is either empty, or ends with one newline 23 | - id: fix-byte-order-marker # Forbid utf-8 byte order marker 24 | - id: trailing-whitespace # Trims trailing whitespace 25 | args: [--markdown-linebreak-ext=md] 26 | 27 | - repo: https://github.com/adrienverge/yamllint 28 | rev: "v1.37.1" 29 | hooks: 30 | - id: yamllint 31 | args: ["--strict", "-c", ".ci-scripts/yamlconfig.yaml"] 32 | 33 | - repo: https://github.com/markdownlint/markdownlint 34 | rev: "v0.13.0" 35 | hooks: 36 | - id: markdownlint 37 | args: ["--style", ".ci-scripts/mdl-style.rb"] 38 | exclude: | 39 | (?x)^( 40 | docs/404\.md 41 | )$ 42 | 43 | - repo: https://github.com/jumanjihouse/pre-commit-hooks 44 | rev: "3.0.0" 45 | hooks: 46 | - id: shellcheck 47 | 48 | - repo: https://github.com/renovatebot/pre-commit-hooks 49 | rev: "40.31.1" 50 | hooks: 51 | - id: renovate-config-validator 52 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. The format 4 | is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) 5 | 6 | This project follows [Semantic Versioning](https://semver.org/) 7 | 8 | ## [Unreleased] 9 | 10 | ## [3.5.0] - 2025-05-14 11 | 12 | ### Added 13 | 14 | - Ability to enable setting OwnerReferences on snapshots. Enabling this will 15 | cause the Snapshots to be deleted when the corresponding SnapshotSchedule is 16 | deleted. This is disabled by default. 17 | 18 | ## [3.4.0] - 2024-05-09 19 | 20 | ### Changed 21 | 22 | - Dependency updates (including CVE fixes) 23 | - Upgrade operator-sdk to 1.34.1 24 | 25 | ### Fixed 26 | 27 | - Helm chart: Add `metadata.namespace` to namespaced resources for compatibility 28 | with ArgoCD 29 | 30 | ## [3.3.0] - 2023-09-22 31 | 32 | ### Added 33 | 34 | - Ability to set a `priorityClassName` for the operator via Helm chart 35 | - Ability to set pod labels and annotations for the operator via Helm chart 36 | 37 | ### Fixed 38 | 39 | - Improve cronspec validation to allow "slash" notation 40 | 41 | ## [3.2.0] - 2022-10-10 42 | 43 | ### Added 44 | 45 | - Ability to specify a container hash instead of just a tag when deploying via 46 | Helm chart 47 | 48 | ### Changed 49 | 50 | - :warning: Helm chart now directly manages CRD installation/upgrade 51 | If upgrading from 3.1.0 or earlier, manual steps are required: 52 | 53 | ```console 54 | Error: UPGRADE FAILED: rendered manifests contain a resource that already 55 | exists. Unable to continue with update: CustomResourceDefinition 56 | "snapshotschedules.snapscheduler.backube" in namespace "" exists and cannot be 57 | imported into the current release: invalid ownership metadata; label 58 | validation error: missing key "app.kubernetes.io/managed-by": must be set to 59 | "Helm"; annotation validation error: missing key "meta.helm.sh/release-name": 60 | must be set to "snapscheduler"; annotation validation error: missing key 61 | "meta.helm.sh/release-namespace": must be set to "backube-snapscheduler" 62 | ``` 63 | 64 | The above error can be fixed by adding the required labels and annotations as 65 | mentioned in the error message: 66 | 67 | ```console 68 | $ kubectl label crd/snapshotschedules.snapscheduler.backube app.kubernetes.io/managed-by=Helm 69 | customresourcedefinition.apiextensions.k8s.io/snapshotschedules.snapscheduler.backube labeled 70 | 71 | $ kubectl annotate crd/snapshotschedules.snapscheduler.backube meta.helm.sh/release-name=snapscheduler 72 | customresourcedefinition.apiextensions.k8s.io/snapshotschedules.snapscheduler.backube annotated 73 | 74 | $ kubectl annotate crd/snapshotschedules.snapscheduler.backube meta.helm.sh/release-namespace=backube-snapscheduler 75 | customresourcedefinition.apiextensions.k8s.io/snapshotschedules.snapscheduler.backube annotated 76 | ``` 77 | 78 | - Made CRD validation of cronspec more permissive 79 | - Upgrade quay.io/brancz/kube-rbac-proxy to v0.13.1 80 | - Upgrade operator-sdk to 1.23.0 81 | 82 | ## [3.1.0] - 2022-08-01 83 | 84 | ### Added 85 | 86 | - Ability to control TopologySpreadConstraints via Helm chart 87 | 88 | ### Changed 89 | 90 | - Upgrade quay.io/brancz/kube-rbac-proxy to v0.13.0 91 | - Upgrade operator-sdk to 1.22.0 92 | 93 | ## [3.0.0] - 2022-04-01 94 | 95 | ### Changed 96 | 97 | - Snapshot objects are now accessed via `snapshot.storage.k8s.io/v1` API version 98 | - Upgrade operator-sdk to 1.18 99 | 100 | ### Removed 101 | 102 | - Removed support for Kubernetes versions < 1.20 103 | 104 | ## [2.1.0] - 2021-12-17 105 | 106 | ### Added 107 | 108 | - Ability to configure resource requests for RBAC proxy container when deploying 109 | via Helm chart. 110 | - Ability to configure container image used for kube-rbac-proxy 111 | 112 | ### Changed 113 | 114 | - Build w/ Go 1.17 115 | - Upgrade kube-rbac-proxy image to 0.11.0 116 | - Upgrade operator-sdk to 1.15 117 | 118 | ## [2.0.0] - 2021-08-03 119 | 120 | ### Changed 121 | 122 | - Updated project scaffolding to operator-sdk 1.10 123 | - Moved CRD to `apiextensions.k8s.io/v1` 124 | - Added default host anti-affinity for the operator replicas 125 | - Updated Helm Chart manifests to more closely match OSDK scaffolding 126 | 127 | ### Removed 128 | 129 | - Removed support for Kubernetes versions < 1.17 130 | - Removed support for `snapshot.storage.k8s.io/v1alpha1` snapshot version 131 | - Removed node selector labels targeting `beta.kubernetes.io/arch` and 132 | `beta.kubernetes.io/os` 133 | 134 | ## [1.2.0] - 2021-04-05 135 | 136 | ### Changed 137 | 138 | - Switched the operator base container to distroless 139 | 140 | ### Fixed 141 | 142 | - Metrics weren't accessible from the snapsheduler-metrics Service 143 | 144 | ## [1.1.1] - 2020-04-24 145 | 146 | ### Fixed 147 | 148 | - Fix crash when snapshotTemplate is not defined in schedule 149 | 150 | ## [1.1.0] - 2020-02-13 151 | 152 | ### Added 153 | 154 | - Support Kubernetes 1.17 and `snapshot.storage.k8s.io/v1beta1` snapshot version 155 | 156 | ## [1.0.0] - 2019-12-09 157 | 158 | ### Added 159 | 160 | - Crontab-based schedule CR to take snapshots of CSI-based persistent volumes 161 | - Label selectors to control which PVCs are selected for snapshotting 162 | - Retention policies based on snapshot age or count 163 | 164 | [Unreleased]: https://github.com/backube/snapscheduler/compare/v3.5.0...HEAD 165 | [3.5.0]: https://github.com/backube/snapscheduler/compare/v3.4.0...v3.5.0 166 | [3.4.0]: https://github.com/backube/snapscheduler/compare/v3.3.0...v3.4.0 167 | [3.3.0]: https://github.com/backube/snapscheduler/compare/v3.2.0...v3.3.0 168 | [3.2.0]: https://github.com/backube/snapscheduler/compare/v3.1.0...v3.2.0 169 | [3.1.0]: https://github.com/backube/snapscheduler/compare/v3.0.0...v3.1.0 170 | [3.0.0]: https://github.com/backube/snapscheduler/compare/v2.1.0...v3.0.0 171 | [2.1.0]: https://github.com/backube/snapscheduler/compare/v2.0.0...v2.1.0 172 | [2.0.0]: https://github.com/backube/snapscheduler/compare/v1.2.0...v2.0.0 173 | [1.2.0]: https://github.com/backube/snapscheduler/compare/v1.1.1...v1.2.0 174 | [1.1.1]: https://github.com/backube/snapscheduler/compare/v1.1.0...v1.1.1 175 | [1.1.0]: https://github.com/backube/snapscheduler/compare/v1.0.0...v1.1.0 176 | [1.0.0]: https://github.com/backube/snapscheduler/releases/tag/v1.0.0 177 | -------------------------------------------------------------------------------- /DCO: -------------------------------------------------------------------------------- 1 | Developer Certificate of Origin 2 | Version 1.1 3 | 4 | Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 5 | 1 Letterman Drive 6 | Suite D4700 7 | San Francisco, CA, 94129 8 | 9 | Everyone is permitted to copy and distribute verbatim copies of this 10 | license document, but changing it is not allowed. 11 | 12 | 13 | Developer's Certificate of Origin 1.1 14 | 15 | By making a contribution to this project, I certify that: 16 | 17 | (a) The contribution was created in whole or in part by me and I 18 | have the right to submit it under the open source license 19 | indicated in the file; or 20 | 21 | (b) The contribution is based upon previous work that, to the best 22 | of my knowledge, is covered under an appropriate open source 23 | license and I have the right under that license to submit that 24 | work with modifications, whether created in whole or in part 25 | by me, under the same open source license (unless I am 26 | permitted to submit under a different license), as indicated 27 | in the file; or 28 | 29 | (c) The contribution was provided directly to me by some other 30 | person who certified (a), (b) or (c) and I have not modified 31 | it. 32 | 33 | (d) I understand and agree that this project and the contribution 34 | are public and that a record of the contribution (including all 35 | personal information I submit with it, including my sign-off) is 36 | maintained indefinitely and may be redistributed consistent with 37 | this project or the open source license(s) involved. 38 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Build the manager binary 2 | FROM golang:1.24@sha256:4c0a1814a7c6c65ece28b3bfea14ee3cf83b5e80b81418453f0e9d5255a5d7b8 AS builder 3 | ARG TARGETOS 4 | ARG TARGETARCH 5 | 6 | WORKDIR /workspace 7 | # Copy the Go Modules manifests 8 | COPY go.mod go.mod 9 | COPY go.sum go.sum 10 | # cache deps before building and copying source so that we don't need to re-download as much 11 | # and so that source changes don't invalidate our downloaded layer 12 | RUN go mod download 13 | 14 | # Copy the go source 15 | COPY cmd/main.go cmd/main.go 16 | COPY api/ api/ 17 | COPY internal/controller/ internal/controller/ 18 | 19 | # Build 20 | ARG version="(unknown)" 21 | RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager -ldflags -X=main.snapschedulerVersion=${version} cmd/main.go 22 | 23 | # Use distroless as minimal base image to package the manager binary 24 | # Refer to https://github.com/GoogleContainerTools/distroless for more details 25 | FROM gcr.io/distroless/static:nonroot 26 | WORKDIR / 27 | COPY --from=builder /workspace/manager . 28 | USER 65532:65532 29 | 30 | ENTRYPOINT ["/manager"] 31 | 32 | ARG builddate="(unknown)" 33 | ARG description="Operator to manage scheduled PV snapshots" 34 | 35 | LABEL build-date="${builddate}" 36 | LABEL description="${description}" 37 | LABEL io.k8s.description="${description}" 38 | LABEL io.k8s.displayname="snapscheduler: A snapshot scheduler" 39 | LABEL name="snapscheduler" 40 | LABEL summary="${description}" 41 | LABEL vcs-type="git" 42 | LABEL vcs-url="https://github.com/backube/snapscheduler" 43 | LABEL vendor="Backube" 44 | LABEL version="${version}" 45 | -------------------------------------------------------------------------------- /PROJECT: -------------------------------------------------------------------------------- 1 | # Code generated by tool. DO NOT EDIT. 2 | # This file is used to track the info used to scaffold your project 3 | # and allow the plugins properly work. 4 | # More info: https://book.kubebuilder.io/reference/project-config.html 5 | domain: backube 6 | layout: 7 | - go.kubebuilder.io/v4 8 | plugins: 9 | manifests.sdk.operatorframework.io/v2: {} 10 | scorecard.sdk.operatorframework.io/v2: {} 11 | projectName: snapscheduler 12 | repo: github.com/backube/snapscheduler 13 | resources: 14 | - api: 15 | crdVersion: v1 16 | namespaced: true 17 | controller: true 18 | domain: backube 19 | group: snapscheduler 20 | kind: SnapshotSchedule 21 | path: github.com/backube/snapscheduler/api/v1 22 | version: v1 23 | version: "3" 24 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SnapScheduler 2 | 3 | [![Build 4 | Status](https://github.com/backube/snapscheduler/workflows/Tests/badge.svg)](https://github.com/backube/snapscheduler/actions?query=branch%3Amaster+workflow%3ATests+) 5 | [![Go Report 6 | Card](https://goreportcard.com/badge/github.com/backube/snapscheduler)](https://goreportcard.com/report/github.com/backube/snapscheduler) 7 | [![codecov](https://codecov.io/gh/backube/snapscheduler/branch/master/graph/badge.svg)](https://codecov.io/gh/backube/snapscheduler) 8 | 9 | SnapScheduler provides scheduled snapshots for Kubernetes CSI-based volumes. 10 | 11 | ## Quickstart 12 | 13 | Install: 14 | 15 | ```console 16 | $ helm repo add backube https://backube.github.io/helm-charts/ 17 | "backube" has been added to your repositories 18 | 19 | $ kubectl create namespace backube-snapscheduler 20 | namespace/backube-snapscheduler created 21 | 22 | $ helm install -n backube-snapscheduler snapscheduler backube/snapscheduler 23 | NAME: snapscheduler 24 | LAST DEPLOYED: Mon Jul 6 15:16:41 2020 25 | NAMESPACE: backube-snapscheduler 26 | STATUS: deployed 27 | ... 28 | ``` 29 | 30 | Keep 6 hourly snapshots of all PVCs in `mynamespace`: 31 | 32 | ```console 33 | $ kubectl -n mynamespace apply -f - <. 19 | 20 | --- 21 | Licensed under the Apache License, Version 2.0 (the "License"); 22 | you may not use this file except in compliance with the License. 23 | You may obtain a copy of the License at 24 | 25 | http://www.apache.org/licenses/LICENSE-2.0 26 | 27 | Unless required by applicable law or agreed to in writing, software 28 | distributed under the License is distributed on an "AS IS" BASIS, 29 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 30 | See the License for the specific language governing permissions and 31 | limitations under the License. 32 | */ 33 | 34 | // Package v1 contains API Schema definitions for the snapscheduler v1 API group 35 | // +kubebuilder:object:generate=true 36 | // +groupName=snapscheduler.backube 37 | package v1 38 | 39 | import ( 40 | "k8s.io/apimachinery/pkg/runtime/schema" 41 | "sigs.k8s.io/controller-runtime/pkg/scheme" 42 | ) 43 | 44 | var ( 45 | // GroupVersion is group version used to register these objects 46 | GroupVersion = schema.GroupVersion{Group: "snapscheduler.backube", Version: "v1"} 47 | 48 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 49 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 50 | 51 | // AddToScheme adds the types in this group-version to the given scheme. 52 | AddToScheme = SchemeBuilder.AddToScheme 53 | ) 54 | -------------------------------------------------------------------------------- /api/v1/snapshotschedule_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (C) 2019 The snapscheduler authors 3 | 4 | This file may be used, at your option, according to either the GNU AGPL 3.0 or 5 | the Apache V2 license. 6 | 7 | --- 8 | This program is free software: you can redistribute it and/or modify it under 9 | the terms of the GNU Affero General Public License as published by the Free 10 | Software Foundation, either version 3 of the License, or (at your option) any 11 | later version. 12 | 13 | This program is distributed in the hope that it will be useful, but WITHOUT ANY 14 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A 15 | PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. 16 | 17 | You should have received a copy of the GNU Affero General Public License along 18 | with this program. If not, see . 19 | 20 | --- 21 | Licensed under the Apache License, Version 2.0 (the "License"); 22 | you may not use this file except in compliance with the License. 23 | You may obtain a copy of the License at 24 | 25 | http://www.apache.org/licenses/LICENSE-2.0 26 | 27 | Unless required by applicable law or agreed to in writing, software 28 | distributed under the License is distributed on an "AS IS" BASIS, 29 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 30 | See the License for the specific language governing permissions and 31 | limitations under the License. 32 | */ 33 | 34 | // nolint: lll 35 | package v1 36 | 37 | import ( 38 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 39 | ) 40 | 41 | // SnapshotRetentionSpec defines how long snapshots should be kept. 42 | type SnapshotRetentionSpec struct { 43 | // The length of time (time.Duration) after which a given Snapshot will be 44 | // deleted. 45 | //+kubebuilder:validation:Pattern=^\d+(h|m|s)$ 46 | //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Expiration period",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:text"} 47 | //+optional 48 | Expires string `json:"expires,omitempty"` 49 | // The maximum number of snapshots to retain per PVC 50 | //+kubebuilder:validation:Minimum=1 51 | //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Maximum snapshots",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:number"} 52 | //+optional 53 | MaxCount *int32 `json:"maxCount,omitempty"` 54 | } 55 | 56 | // SnapshotTemplateSpec defines the template for Snapshot objects 57 | type SnapshotTemplateSpec struct { 58 | // A list of labels that should be added to each Snapshot created by this 59 | // schedule. 60 | //+operator-sdk:csv:customresourcedefinitions:type=spec 61 | //+optional 62 | Labels map[string]string `json:"labels,omitempty"` 63 | // The name of the VolumeSnapshotClass to be used when creating Snapshots. 64 | //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="VolumeSnapshotClass name",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:text"} 65 | //+optional 66 | SnapshotClassName *string `json:"snapshotClassName,omitempty"` 67 | } 68 | 69 | // SnapshotScheduleSpec defines the desired state of SnapshotSchedule 70 | type SnapshotScheduleSpec struct { 71 | // A filter to select which PVCs to snapshot via this schedule 72 | //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="PVC selector",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:selector:core:v1:PersistentVolumeClaim"} 73 | //+optional 74 | ClaimSelector metav1.LabelSelector `json:"claimSelector,omitempty"` 75 | // Retention determines how long this schedule's snapshots will be kept. 76 | //+operator-sdk:csv:customresourcedefinitions:type=spec 77 | //+optional 78 | Retention SnapshotRetentionSpec `json:"retention,omitempty"` 79 | // Schedule is a Cronspec specifying when snapshots should be taken. See 80 | // https://en.wikipedia.org/wiki/Cron for a description of the format. 81 | //+kubebuilder:validation:Pattern=`^(@(annually|yearly|monthly|weekly|daily|hourly))|((((\d+,)*\d+|(\d+(\/|-)\d+)|\*(\/\d+)?)\s?){5})$` 82 | //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Schedule",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:text"} 83 | Schedule string `json:"schedule,omitempty"` 84 | // Indicates that this schedule should be temporarily disabled 85 | //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Disabled",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:booleanSwitch"} 86 | //+optional 87 | Disabled bool `json:"disabled,omitempty"` 88 | // A template to customize the Snapshots. 89 | //+operator-sdk:csv:customresourcedefinitions:type=spec 90 | SnapshotTemplate *SnapshotTemplateSpec `json:"snapshotTemplate,omitempty"` 91 | } 92 | 93 | // SnapshotScheduleStatus defines the observed state of SnapshotSchedule 94 | type SnapshotScheduleStatus struct { 95 | // Conditions is a list of conditions related to operator reconciliation. 96 | //+optional 97 | //+operator-sdk:csv:customresourcedefinitions:type=status,displayName="Conditions",xDescriptors={"urn:alm:descriptor:io.kubernetes.conditions"} 98 | Conditions []metav1.Condition `json:"conditions,omitempty"` 99 | // The time of the most recent snapshot taken by this schedule 100 | //+optional 101 | //+operator-sdk:csv:customresourcedefinitions:type=status,displayName="Last snapshot",xDescriptors={"urn:alm:descriptor:text"} 102 | LastSnapshotTime *metav1.Time `json:"lastSnapshotTime,omitempty"` 103 | // The time of the next scheduled snapshot 104 | //+optional 105 | //+operator-sdk:csv:customresourcedefinitions:type=status,displayName="Next snapshot",xDescriptors={"urn:alm:descriptor:text"} 106 | NextSnapshotTime *metav1.Time `json:"nextSnapshotTime,omitempty"` 107 | } 108 | 109 | const ( 110 | // ConditionReconciled is a Condition indicating whether the object is fully 111 | // reconciled. 112 | ConditionReconciled = "Reconciled" 113 | // ReconciledReasonError indicates there was an error while attempting to reconcile. 114 | ReconciledReasonError = "ReconcileError" 115 | // ReconciledReasonComplete indicates reconcile was successful 116 | ReconciledReasonComplete = "ReconcileComplete" 117 | ) 118 | 119 | //+kubebuilder:object:root=true 120 | //+kubebuilder:subresource:status 121 | //+kubebuilder:printcolumn:name="Schedule",type=string,JSONPath=".spec.schedule" 122 | //+kubebuilder:printcolumn:name="Max age",type=string,JSONPath=".spec.retention.expires" 123 | //+kubebuilder:printcolumn:name="Max num",type=integer,JSONPath=".spec.retention.maxCount" 124 | //+kubebuilder:printcolumn:name="Disabled",type=boolean,JSONPath=".spec.disabled" 125 | //+kubebuilder:printcolumn:name="Next snapshot",type=string,JSONPath=".status.nextSnapshotTime" 126 | //+kubebuilder:resource:path=snapshotschedules,scope=Namespaced 127 | //+operator-sdk:csv:customresourcedefinitions:displayName="Snapshot Schedule",resources={} 128 | 129 | // SnapshotSchedule defines a schedule for taking automated snapshots of PVC(s) 130 | type SnapshotSchedule struct { 131 | metav1.TypeMeta `json:",inline"` 132 | metav1.ObjectMeta `json:"metadata,omitempty"` 133 | 134 | Spec SnapshotScheduleSpec `json:"spec,omitempty"` 135 | Status SnapshotScheduleStatus `json:"status,omitempty"` 136 | } 137 | 138 | //+kubebuilder:object:root=true 139 | 140 | // SnapshotScheduleList contains a list of SnapshotSchedule 141 | type SnapshotScheduleList struct { 142 | metav1.TypeMeta `json:",inline"` 143 | metav1.ListMeta `json:"metadata,omitempty"` 144 | Items []SnapshotSchedule `json:"items"` 145 | } 146 | 147 | func init() { 148 | SchemeBuilder.Register(&SnapshotSchedule{}, &SnapshotScheduleList{}) 149 | } 150 | -------------------------------------------------------------------------------- /api/v1/zz_generated.deepcopy.go: -------------------------------------------------------------------------------- 1 | //go:build !ignore_autogenerated 2 | 3 | // Code generated by controller-gen. DO NOT EDIT. 4 | 5 | package v1 6 | 7 | import ( 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | runtime "k8s.io/apimachinery/pkg/runtime" 10 | ) 11 | 12 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 13 | func (in *SnapshotRetentionSpec) DeepCopyInto(out *SnapshotRetentionSpec) { 14 | *out = *in 15 | if in.MaxCount != nil { 16 | in, out := &in.MaxCount, &out.MaxCount 17 | *out = new(int32) 18 | **out = **in 19 | } 20 | } 21 | 22 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotRetentionSpec. 23 | func (in *SnapshotRetentionSpec) DeepCopy() *SnapshotRetentionSpec { 24 | if in == nil { 25 | return nil 26 | } 27 | out := new(SnapshotRetentionSpec) 28 | in.DeepCopyInto(out) 29 | return out 30 | } 31 | 32 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 33 | func (in *SnapshotSchedule) DeepCopyInto(out *SnapshotSchedule) { 34 | *out = *in 35 | out.TypeMeta = in.TypeMeta 36 | in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) 37 | in.Spec.DeepCopyInto(&out.Spec) 38 | in.Status.DeepCopyInto(&out.Status) 39 | } 40 | 41 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotSchedule. 42 | func (in *SnapshotSchedule) DeepCopy() *SnapshotSchedule { 43 | if in == nil { 44 | return nil 45 | } 46 | out := new(SnapshotSchedule) 47 | in.DeepCopyInto(out) 48 | return out 49 | } 50 | 51 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 52 | func (in *SnapshotSchedule) DeepCopyObject() runtime.Object { 53 | if c := in.DeepCopy(); c != nil { 54 | return c 55 | } 56 | return nil 57 | } 58 | 59 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 60 | func (in *SnapshotScheduleList) DeepCopyInto(out *SnapshotScheduleList) { 61 | *out = *in 62 | out.TypeMeta = in.TypeMeta 63 | in.ListMeta.DeepCopyInto(&out.ListMeta) 64 | if in.Items != nil { 65 | in, out := &in.Items, &out.Items 66 | *out = make([]SnapshotSchedule, len(*in)) 67 | for i := range *in { 68 | (*in)[i].DeepCopyInto(&(*out)[i]) 69 | } 70 | } 71 | } 72 | 73 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleList. 74 | func (in *SnapshotScheduleList) DeepCopy() *SnapshotScheduleList { 75 | if in == nil { 76 | return nil 77 | } 78 | out := new(SnapshotScheduleList) 79 | in.DeepCopyInto(out) 80 | return out 81 | } 82 | 83 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 84 | func (in *SnapshotScheduleList) DeepCopyObject() runtime.Object { 85 | if c := in.DeepCopy(); c != nil { 86 | return c 87 | } 88 | return nil 89 | } 90 | 91 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 92 | func (in *SnapshotScheduleSpec) DeepCopyInto(out *SnapshotScheduleSpec) { 93 | *out = *in 94 | in.ClaimSelector.DeepCopyInto(&out.ClaimSelector) 95 | in.Retention.DeepCopyInto(&out.Retention) 96 | if in.SnapshotTemplate != nil { 97 | in, out := &in.SnapshotTemplate, &out.SnapshotTemplate 98 | *out = new(SnapshotTemplateSpec) 99 | (*in).DeepCopyInto(*out) 100 | } 101 | } 102 | 103 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleSpec. 104 | func (in *SnapshotScheduleSpec) DeepCopy() *SnapshotScheduleSpec { 105 | if in == nil { 106 | return nil 107 | } 108 | out := new(SnapshotScheduleSpec) 109 | in.DeepCopyInto(out) 110 | return out 111 | } 112 | 113 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 114 | func (in *SnapshotScheduleStatus) DeepCopyInto(out *SnapshotScheduleStatus) { 115 | *out = *in 116 | if in.Conditions != nil { 117 | in, out := &in.Conditions, &out.Conditions 118 | *out = make([]metav1.Condition, len(*in)) 119 | for i := range *in { 120 | (*in)[i].DeepCopyInto(&(*out)[i]) 121 | } 122 | } 123 | if in.LastSnapshotTime != nil { 124 | in, out := &in.LastSnapshotTime, &out.LastSnapshotTime 125 | *out = (*in).DeepCopy() 126 | } 127 | if in.NextSnapshotTime != nil { 128 | in, out := &in.NextSnapshotTime, &out.NextSnapshotTime 129 | *out = (*in).DeepCopy() 130 | } 131 | } 132 | 133 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleStatus. 134 | func (in *SnapshotScheduleStatus) DeepCopy() *SnapshotScheduleStatus { 135 | if in == nil { 136 | return nil 137 | } 138 | out := new(SnapshotScheduleStatus) 139 | in.DeepCopyInto(out) 140 | return out 141 | } 142 | 143 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 144 | func (in *SnapshotTemplateSpec) DeepCopyInto(out *SnapshotTemplateSpec) { 145 | *out = *in 146 | if in.Labels != nil { 147 | in, out := &in.Labels, &out.Labels 148 | *out = make(map[string]string, len(*in)) 149 | for key, val := range *in { 150 | (*out)[key] = val 151 | } 152 | } 153 | if in.SnapshotClassName != nil { 154 | in, out := &in.SnapshotClassName, &out.SnapshotClassName 155 | *out = new(string) 156 | **out = **in 157 | } 158 | } 159 | 160 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotTemplateSpec. 161 | func (in *SnapshotTemplateSpec) DeepCopy() *SnapshotTemplateSpec { 162 | if in == nil { 163 | return nil 164 | } 165 | out := new(SnapshotTemplateSpec) 166 | in.DeepCopyInto(out) 167 | return out 168 | } 169 | -------------------------------------------------------------------------------- /bundle.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM scratch 2 | 3 | # Core bundle labels. 4 | LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1 5 | LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/ 6 | LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/ 7 | LABEL operators.operatorframework.io.bundle.package.v1=snapscheduler 8 | LABEL operators.operatorframework.io.bundle.channels.v1=candidate,stable 9 | LABEL operators.operatorframework.io.bundle.channel.default.v1=stable 10 | LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.34.1 11 | LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1 12 | LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v4 13 | 14 | # Labels for testing. 15 | LABEL operators.operatorframework.io.test.mediatype.v1=scorecard+v1 16 | LABEL operators.operatorframework.io.test.config.v1=tests/scorecard/ 17 | 18 | # Copy files to locations specified by labels. 19 | COPY bundle/manifests /manifests/ 20 | COPY bundle/metadata /metadata/ 21 | COPY bundle/tests/scorecard /tests/scorecard/ 22 | -------------------------------------------------------------------------------- /bundle/manifests/snapscheduler-controller-manager-metrics-service_v1_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | app.kubernetes.io/component: kube-rbac-proxy 7 | app.kubernetes.io/created-by: snapscheduler 8 | app.kubernetes.io/instance: controller-manager-metrics-service 9 | app.kubernetes.io/managed-by: kustomize 10 | app.kubernetes.io/name: service 11 | app.kubernetes.io/part-of: snapscheduler 12 | control-plane: controller-manager 13 | name: snapscheduler-controller-manager-metrics-service 14 | spec: 15 | ports: 16 | - name: https 17 | port: 8443 18 | targetPort: https 19 | selector: 20 | control-plane: controller-manager 21 | status: 22 | loadBalancer: {} 23 | -------------------------------------------------------------------------------- /bundle/manifests/snapscheduler-manager-config_v1_configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | controller_manager_config.yaml: | 4 | apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 5 | kind: ControllerManagerConfig 6 | health: 7 | healthProbeBindAddress: :8081 8 | metrics: 9 | bindAddress: 127.0.0.1:8080 10 | webhook: 11 | port: 9443 12 | leaderElection: 13 | leaderElect: true 14 | resourceName: cd2d8e9f.backube 15 | leaderElectionReleaseOnCancel: true 16 | kind: ConfigMap 17 | metadata: 18 | name: snapscheduler-manager-config 19 | -------------------------------------------------------------------------------- /bundle/manifests/snapscheduler-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | app.kubernetes.io/component: kube-rbac-proxy 7 | app.kubernetes.io/created-by: snapscheduler 8 | app.kubernetes.io/instance: metrics-reader 9 | app.kubernetes.io/managed-by: kustomize 10 | app.kubernetes.io/name: clusterrole 11 | app.kubernetes.io/part-of: snapscheduler 12 | name: snapscheduler-metrics-reader 13 | rules: 14 | - nonResourceURLs: 15 | - /metrics 16 | verbs: 17 | - get 18 | -------------------------------------------------------------------------------- /bundle/metadata/annotations.yaml: -------------------------------------------------------------------------------- 1 | annotations: 2 | # Core bundle annotations. 3 | operators.operatorframework.io.bundle.mediatype.v1: registry+v1 4 | operators.operatorframework.io.bundle.manifests.v1: manifests/ 5 | operators.operatorframework.io.bundle.metadata.v1: metadata/ 6 | operators.operatorframework.io.bundle.package.v1: snapscheduler 7 | operators.operatorframework.io.bundle.channels.v1: candidate,stable 8 | operators.operatorframework.io.bundle.channel.default.v1: stable 9 | operators.operatorframework.io.metrics.builder: operator-sdk-v1.34.1 10 | operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 11 | operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v4 12 | 13 | # Annotations for testing. 14 | operators.operatorframework.io.test.mediatype.v1: scorecard+v1 15 | operators.operatorframework.io.test.config.v1: tests/scorecard/ 16 | com.redhat.openshift.versions: "v4.7" 17 | -------------------------------------------------------------------------------- /bundle/tests/scorecard/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: scorecard.operatorframework.io/v1alpha3 2 | kind: Configuration 3 | metadata: 4 | name: config 5 | stages: 6 | - parallel: true 7 | tests: 8 | - entrypoint: 9 | - scorecard-test 10 | - basic-check-spec 11 | image: quay.io/operator-framework/scorecard-test:v1.34.1 12 | labels: 13 | suite: basic 14 | test: basic-check-spec-test 15 | storage: 16 | spec: 17 | mountPath: {} 18 | - entrypoint: 19 | - scorecard-test 20 | - olm-bundle-validation 21 | image: quay.io/operator-framework/scorecard-test:v1.34.1 22 | labels: 23 | suite: olm 24 | test: olm-bundle-validation-test 25 | storage: 26 | spec: 27 | mountPath: {} 28 | - entrypoint: 29 | - scorecard-test 30 | - olm-crds-have-validation 31 | image: quay.io/operator-framework/scorecard-test:v1.34.1 32 | labels: 33 | suite: olm 34 | test: olm-crds-have-validation-test 35 | storage: 36 | spec: 37 | mountPath: {} 38 | - entrypoint: 39 | - scorecard-test 40 | - olm-crds-have-resources 41 | image: quay.io/operator-framework/scorecard-test:v1.34.1 42 | labels: 43 | suite: olm 44 | test: olm-crds-have-resources-test 45 | storage: 46 | spec: 47 | mountPath: {} 48 | - entrypoint: 49 | - scorecard-test 50 | - olm-spec-descriptors 51 | image: quay.io/operator-framework/scorecard-test:v1.34.1 52 | labels: 53 | suite: olm 54 | test: olm-spec-descriptors-test 55 | storage: 56 | spec: 57 | mountPath: {} 58 | - entrypoint: 59 | - scorecard-test 60 | - olm-status-descriptors 61 | image: quay.io/operator-framework/scorecard-test:v1.34.1 62 | labels: 63 | suite: olm 64 | test: olm-status-descriptors-test 65 | storage: 66 | spec: 67 | mountPath: {} 68 | storage: 69 | spec: 70 | mountPath: {} 71 | -------------------------------------------------------------------------------- /cmd/main.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021 The snapscheduler authors. 3 | 4 | This program is free software: you can redistribute it and/or modify 5 | it under the terms of the GNU Affero General Public License as published 6 | by the Free Software Foundation, either version 3 of the License, or 7 | (at your option) any later version. 8 | 9 | This program is distributed in the hope that it will be useful, 10 | but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | GNU Affero General Public License for more details. 13 | 14 | You should have received a copy of the GNU Affero General Public License 15 | along with this program. If not, see . 16 | */ 17 | 18 | package main 19 | 20 | import ( 21 | "crypto/tls" 22 | "flag" 23 | "fmt" 24 | "os" 25 | "runtime" 26 | 27 | // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) 28 | // to ensure that exec-entrypoint and run can make use of them. 29 | 30 | "go.uber.org/zap/zapcore" 31 | _ "k8s.io/client-go/plugin/pkg/client/auth" 32 | 33 | snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" 34 | kruntime "k8s.io/apimachinery/pkg/runtime" 35 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 36 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 37 | ctrl "sigs.k8s.io/controller-runtime" 38 | "sigs.k8s.io/controller-runtime/pkg/healthz" 39 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 40 | ctrlMetrics "sigs.k8s.io/controller-runtime/pkg/metrics/server" 41 | "sigs.k8s.io/controller-runtime/pkg/webhook" 42 | 43 | snapschedulerv1 "github.com/backube/snapscheduler/api/v1" 44 | "github.com/backube/snapscheduler/internal/controller" 45 | //+kubebuilder:scaffold:imports 46 | ) 47 | 48 | var ( 49 | scheme = kruntime.NewScheme() 50 | setupLog = ctrl.Log.WithName("setup") 51 | snapschedulerVersion = "0.0.0" 52 | ) 53 | 54 | func init() { 55 | utilruntime.Must(clientgoscheme.AddToScheme(scheme)) 56 | utilruntime.Must(snapv1.AddToScheme(scheme)) 57 | 58 | utilruntime.Must(snapschedulerv1.AddToScheme(scheme)) 59 | //+kubebuilder:scaffold:scheme 60 | } 61 | 62 | // nolint: funlen 63 | func main() { 64 | var metricsAddr string 65 | var enableLeaderElection bool 66 | var probeAddr string 67 | var secureMetrics bool 68 | var enableHTTP2 bool 69 | var enableOwnerReferences bool 70 | flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") 71 | flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") 72 | flag.BoolVar(&enableLeaderElection, "leader-elect", false, 73 | "Enable leader election for controller manager. "+ 74 | "Enabling this will ensure there is only one active controller manager.") 75 | flag.BoolVar(&secureMetrics, "metrics-secure", false, 76 | "If set the metrics endpoint is served securely") 77 | flag.BoolVar(&enableHTTP2, "enable-http2", false, 78 | "If set, HTTP/2 will be enabled for the metrics and webhook servers") 79 | flag.BoolVar(&enableOwnerReferences, "enable-owner-references", false, "Enable owner references for VolumeSnapshots.") 80 | opts := zap.Options{ 81 | Development: true, 82 | TimeEncoder: zapcore.RFC3339NanoTimeEncoder, 83 | } 84 | opts.BindFlags(flag.CommandLine) 85 | flag.Parse() 86 | 87 | ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) 88 | 89 | setupLog.Info(fmt.Sprintf("Operator Version: %s", snapschedulerVersion)) 90 | setupLog.Info(fmt.Sprintf("Go Version: %s", runtime.Version())) 91 | setupLog.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH)) 92 | 93 | // if the enable-http2 flag is false (the default), http/2 should be disabled 94 | // due to its vulnerabilities. More specifically, disabling http/2 will 95 | // prevent from being vulnerable to the HTTP/2 Stream Cancelation and 96 | // Rapid Reset CVEs. For more information see: 97 | // - https://github.com/advisories/GHSA-qppj-fm5r-hxr3 98 | // - https://github.com/advisories/GHSA-4374-p667-p6c8 99 | disableHTTP2 := func(c *tls.Config) { 100 | setupLog.Info("disabling http/2") 101 | c.NextProtos = []string{"http/1.1"} 102 | } 103 | 104 | tlsOpts := []func(*tls.Config){} 105 | if !enableHTTP2 { 106 | tlsOpts = append(tlsOpts, disableHTTP2) 107 | } 108 | 109 | webhookServer := webhook.NewServer(webhook.Options{ 110 | TLSOpts: tlsOpts, 111 | }) 112 | 113 | mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ 114 | Scheme: scheme, 115 | Metrics: ctrlMetrics.Options{ 116 | BindAddress: metricsAddr, 117 | SecureServing: secureMetrics, 118 | TLSOpts: tlsOpts, 119 | }, 120 | HealthProbeBindAddress: probeAddr, 121 | LeaderElection: enableLeaderElection, 122 | LeaderElectionID: "cd2d8e9f.backube", 123 | LeaderElectionReleaseOnCancel: true, 124 | WebhookServer: webhookServer, 125 | }) 126 | if err != nil { 127 | setupLog.Error(err, "unable to start manager") 128 | os.Exit(1) 129 | } 130 | 131 | if err = (&controller.SnapshotScheduleReconciler{ 132 | Client: mgr.GetClient(), 133 | Scheme: mgr.GetScheme(), 134 | EnableOwnerReferences: enableOwnerReferences, 135 | }).SetupWithManager(mgr); err != nil { 136 | setupLog.Error(err, "unable to create controller", "controller", "SnapshotSchedule") 137 | os.Exit(1) 138 | } 139 | //+kubebuilder:scaffold:builder 140 | 141 | if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { 142 | setupLog.Error(err, "unable to set up health check") 143 | os.Exit(1) 144 | } 145 | if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { 146 | setupLog.Error(err, "unable to set up ready check") 147 | os.Exit(1) 148 | } 149 | 150 | setupLog.Info("starting manager") 151 | if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { 152 | setupLog.Error(err, "problem running manager") 153 | os.Exit(1) 154 | } 155 | } 156 | -------------------------------------------------------------------------------- /config/crd/bases/snapscheduler.backube_snapshotschedules.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | controller-gen.kubebuilder.io/version: v0.18.0 7 | name: snapshotschedules.snapscheduler.backube 8 | spec: 9 | group: snapscheduler.backube 10 | names: 11 | kind: SnapshotSchedule 12 | listKind: SnapshotScheduleList 13 | plural: snapshotschedules 14 | singular: snapshotschedule 15 | scope: Namespaced 16 | versions: 17 | - additionalPrinterColumns: 18 | - jsonPath: .spec.schedule 19 | name: Schedule 20 | type: string 21 | - jsonPath: .spec.retention.expires 22 | name: Max age 23 | type: string 24 | - jsonPath: .spec.retention.maxCount 25 | name: Max num 26 | type: integer 27 | - jsonPath: .spec.disabled 28 | name: Disabled 29 | type: boolean 30 | - jsonPath: .status.nextSnapshotTime 31 | name: Next snapshot 32 | type: string 33 | name: v1 34 | schema: 35 | openAPIV3Schema: 36 | description: SnapshotSchedule defines a schedule for taking automated snapshots 37 | of PVC(s) 38 | properties: 39 | apiVersion: 40 | description: |- 41 | APIVersion defines the versioned schema of this representation of an object. 42 | Servers should convert recognized schemas to the latest internal value, and 43 | may reject unrecognized values. 44 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources 45 | type: string 46 | kind: 47 | description: |- 48 | Kind is a string value representing the REST resource this object represents. 49 | Servers may infer this from the endpoint the client submits requests to. 50 | Cannot be updated. 51 | In CamelCase. 52 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds 53 | type: string 54 | metadata: 55 | type: object 56 | spec: 57 | description: SnapshotScheduleSpec defines the desired state of SnapshotSchedule 58 | properties: 59 | claimSelector: 60 | description: A filter to select which PVCs to snapshot via this schedule 61 | properties: 62 | matchExpressions: 63 | description: matchExpressions is a list of label selector requirements. 64 | The requirements are ANDed. 65 | items: 66 | description: |- 67 | A label selector requirement is a selector that contains values, a key, and an operator that 68 | relates the key and values. 69 | properties: 70 | key: 71 | description: key is the label key that the selector applies 72 | to. 73 | type: string 74 | operator: 75 | description: |- 76 | operator represents a key's relationship to a set of values. 77 | Valid operators are In, NotIn, Exists and DoesNotExist. 78 | type: string 79 | values: 80 | description: |- 81 | values is an array of string values. If the operator is In or NotIn, 82 | the values array must be non-empty. If the operator is Exists or DoesNotExist, 83 | the values array must be empty. This array is replaced during a strategic 84 | merge patch. 85 | items: 86 | type: string 87 | type: array 88 | x-kubernetes-list-type: atomic 89 | required: 90 | - key 91 | - operator 92 | type: object 93 | type: array 94 | x-kubernetes-list-type: atomic 95 | matchLabels: 96 | additionalProperties: 97 | type: string 98 | description: |- 99 | matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels 100 | map is equivalent to an element of matchExpressions, whose key field is "key", the 101 | operator is "In", and the values array contains only "value". The requirements are ANDed. 102 | type: object 103 | type: object 104 | x-kubernetes-map-type: atomic 105 | disabled: 106 | description: Indicates that this schedule should be temporarily disabled 107 | type: boolean 108 | retention: 109 | description: Retention determines how long this schedule's snapshots 110 | will be kept. 111 | properties: 112 | expires: 113 | description: |- 114 | The length of time (time.Duration) after which a given Snapshot will be 115 | deleted. 116 | pattern: ^\d+(h|m|s)$ 117 | type: string 118 | maxCount: 119 | description: The maximum number of snapshots to retain per PVC 120 | format: int32 121 | minimum: 1 122 | type: integer 123 | type: object 124 | schedule: 125 | description: |- 126 | Schedule is a Cronspec specifying when snapshots should be taken. See 127 | https://en.wikipedia.org/wiki/Cron for a description of the format. 128 | pattern: ^(@(annually|yearly|monthly|weekly|daily|hourly))|((((\d+,)*\d+|(\d+(\/|-)\d+)|\*(\/\d+)?)\s?){5})$ 129 | type: string 130 | snapshotTemplate: 131 | description: A template to customize the Snapshots. 132 | properties: 133 | labels: 134 | additionalProperties: 135 | type: string 136 | description: |- 137 | A list of labels that should be added to each Snapshot created by this 138 | schedule. 139 | type: object 140 | snapshotClassName: 141 | description: The name of the VolumeSnapshotClass to be used when 142 | creating Snapshots. 143 | type: string 144 | type: object 145 | type: object 146 | status: 147 | description: SnapshotScheduleStatus defines the observed state of SnapshotSchedule 148 | properties: 149 | conditions: 150 | description: Conditions is a list of conditions related to operator 151 | reconciliation. 152 | items: 153 | description: Condition contains details for one aspect of the current 154 | state of this API Resource. 155 | properties: 156 | lastTransitionTime: 157 | description: |- 158 | lastTransitionTime is the last time the condition transitioned from one status to another. 159 | This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. 160 | format: date-time 161 | type: string 162 | message: 163 | description: |- 164 | message is a human readable message indicating details about the transition. 165 | This may be an empty string. 166 | maxLength: 32768 167 | type: string 168 | observedGeneration: 169 | description: |- 170 | observedGeneration represents the .metadata.generation that the condition was set based upon. 171 | For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date 172 | with respect to the current state of the instance. 173 | format: int64 174 | minimum: 0 175 | type: integer 176 | reason: 177 | description: |- 178 | reason contains a programmatic identifier indicating the reason for the condition's last transition. 179 | Producers of specific condition types may define expected values and meanings for this field, 180 | and whether the values are considered a guaranteed API. 181 | The value should be a CamelCase string. 182 | This field may not be empty. 183 | maxLength: 1024 184 | minLength: 1 185 | pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ 186 | type: string 187 | status: 188 | description: status of the condition, one of True, False, Unknown. 189 | enum: 190 | - "True" 191 | - "False" 192 | - Unknown 193 | type: string 194 | type: 195 | description: type of condition in CamelCase or in foo.example.com/CamelCase. 196 | maxLength: 316 197 | pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ 198 | type: string 199 | required: 200 | - lastTransitionTime 201 | - message 202 | - reason 203 | - status 204 | - type 205 | type: object 206 | type: array 207 | lastSnapshotTime: 208 | description: The time of the most recent snapshot taken by this schedule 209 | format: date-time 210 | type: string 211 | nextSnapshotTime: 212 | description: The time of the next scheduled snapshot 213 | format: date-time 214 | type: string 215 | type: object 216 | type: object 217 | served: true 218 | storage: true 219 | subresources: 220 | status: {} 221 | -------------------------------------------------------------------------------- /config/crd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # This kustomization.yaml is not intended to be run by itself, 2 | # since it depends on service name and namespace that are out of this kustomize package. 3 | # It should be run by config/default 4 | resources: 5 | - bases/snapscheduler.backube_snapshotschedules.yaml 6 | #+kubebuilder:scaffold:crdkustomizeresource 7 | 8 | patches: 9 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. 10 | # patches here are for enabling the conversion webhook for each CRD 11 | #- path: patches/webhook_in_snapshotschedules.yaml 12 | #+kubebuilder:scaffold:crdkustomizewebhookpatch 13 | 14 | # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. 15 | # patches here are for enabling the CA injection for each CRD 16 | #- path: patches/cainjection_in_snapshotschedules.yaml 17 | #+kubebuilder:scaffold:crdkustomizecainjectionpatch 18 | 19 | # [WEBHOOK] To enable webhook, uncomment the following section 20 | # the following config is for teaching kustomize how to do kustomization for CRDs. 21 | 22 | #configurations: 23 | #- kustomizeconfig.yaml 24 | -------------------------------------------------------------------------------- /config/crd/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD 2 | nameReference: 3 | - kind: Service 4 | version: v1 5 | fieldSpecs: 6 | - kind: CustomResourceDefinition 7 | version: v1 8 | group: apiextensions.k8s.io 9 | path: spec/conversion/webhook/clientConfig/service/name 10 | 11 | namespace: 12 | - kind: CustomResourceDefinition 13 | version: v1 14 | group: apiextensions.k8s.io 15 | path: spec/conversion/webhook/clientConfig/service/namespace 16 | create: false 17 | 18 | varReference: 19 | - path: metadata/annotations 20 | -------------------------------------------------------------------------------- /config/default/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Adds namespace to all resources. 2 | namespace: snapscheduler-system 3 | 4 | # Value of this field is prepended to the 5 | # names of all resources, e.g. a deployment named 6 | # "wordpress" becomes "alices-wordpress". 7 | # Note that it should also match with the prefix (text before '-') of the namespace 8 | # field above. 9 | namePrefix: snapscheduler- 10 | 11 | # Labels to add to all resources and selectors. 12 | #labels: 13 | #- includeSelectors: true 14 | # pairs: 15 | # someName: someValue 16 | 17 | resources: 18 | - ../crd 19 | - ../rbac 20 | - ../manager 21 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 22 | # crd/kustomization.yaml 23 | #- ../webhook 24 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. 25 | #- ../certmanager 26 | # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. 27 | #- ../prometheus 28 | 29 | patches: 30 | # Protect the /metrics endpoint by putting it behind auth. 31 | # If you want your controller-manager to expose the /metrics 32 | # endpoint w/o any authn/z, please comment the following line. 33 | - path: manager_auth_proxy_patch.yaml 34 | 35 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 36 | # crd/kustomization.yaml 37 | #- path: manager_webhook_patch.yaml 38 | 39 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 40 | # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. 41 | # 'CERTMANAGER' needs to be enabled to use ca injection 42 | #- path: webhookcainjection_patch.yaml 43 | 44 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. 45 | # Uncomment the following replacements to add the cert-manager CA injection annotations 46 | #replacements: 47 | # - source: # Add cert-manager annotation to ValidatingWebhookConfiguration, MutatingWebhookConfiguration and CRDs 48 | # kind: Certificate 49 | # group: cert-manager.io 50 | # version: v1 51 | # name: serving-cert # this name should match the one in certificate.yaml 52 | # fieldPath: .metadata.namespace # namespace of the certificate CR 53 | # targets: 54 | # - select: 55 | # kind: ValidatingWebhookConfiguration 56 | # fieldPaths: 57 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 58 | # options: 59 | # delimiter: '/' 60 | # index: 0 61 | # create: true 62 | # - select: 63 | # kind: MutatingWebhookConfiguration 64 | # fieldPaths: 65 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 66 | # options: 67 | # delimiter: '/' 68 | # index: 0 69 | # create: true 70 | # - select: 71 | # kind: CustomResourceDefinition 72 | # fieldPaths: 73 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 74 | # options: 75 | # delimiter: '/' 76 | # index: 0 77 | # create: true 78 | # - source: 79 | # kind: Certificate 80 | # group: cert-manager.io 81 | # version: v1 82 | # name: serving-cert # this name should match the one in certificate.yaml 83 | # fieldPath: .metadata.name 84 | # targets: 85 | # - select: 86 | # kind: ValidatingWebhookConfiguration 87 | # fieldPaths: 88 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 89 | # options: 90 | # delimiter: '/' 91 | # index: 1 92 | # create: true 93 | # - select: 94 | # kind: MutatingWebhookConfiguration 95 | # fieldPaths: 96 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 97 | # options: 98 | # delimiter: '/' 99 | # index: 1 100 | # create: true 101 | # - select: 102 | # kind: CustomResourceDefinition 103 | # fieldPaths: 104 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 105 | # options: 106 | # delimiter: '/' 107 | # index: 1 108 | # create: true 109 | # - source: # Add cert-manager annotation to the webhook Service 110 | # kind: Service 111 | # version: v1 112 | # name: webhook-service 113 | # fieldPath: .metadata.name # namespace of the service 114 | # targets: 115 | # - select: 116 | # kind: Certificate 117 | # group: cert-manager.io 118 | # version: v1 119 | # fieldPaths: 120 | # - .spec.dnsNames.0 121 | # - .spec.dnsNames.1 122 | # options: 123 | # delimiter: '.' 124 | # index: 0 125 | # create: true 126 | # - source: 127 | # kind: Service 128 | # version: v1 129 | # name: webhook-service 130 | # fieldPath: .metadata.namespace # namespace of the service 131 | # targets: 132 | # - select: 133 | # kind: Certificate 134 | # group: cert-manager.io 135 | # version: v1 136 | # fieldPaths: 137 | # - .spec.dnsNames.0 138 | # - .spec.dnsNames.1 139 | # options: 140 | # delimiter: '.' 141 | # index: 1 142 | # create: true 143 | -------------------------------------------------------------------------------- /config/default/manager_auth_proxy_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch inject a sidecar container which is a HTTP proxy for the 2 | # controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | name: controller-manager 7 | namespace: system 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: kube-rbac-proxy 13 | securityContext: 14 | allowPrivilegeEscalation: false 15 | capabilities: 16 | drop: 17 | - "ALL" 18 | image: quay.io/brancz/kube-rbac-proxy:v0.19.1 19 | args: 20 | - "--secure-listen-address=0.0.0.0:8443" 21 | - "--upstream=http://127.0.0.1:8080/" 22 | - "--logtostderr=true" 23 | - "--v=0" 24 | ports: 25 | - containerPort: 8443 26 | protocol: TCP 27 | name: https 28 | resources: 29 | limits: 30 | cpu: 500m 31 | memory: 128Mi 32 | requests: 33 | cpu: 5m 34 | memory: 64Mi 35 | - name: manager 36 | args: 37 | - "--health-probe-bind-address=:8081" 38 | - "--metrics-bind-address=127.0.0.1:8080" 39 | - "--leader-elect" 40 | -------------------------------------------------------------------------------- /config/default/manager_config_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: manager 11 | -------------------------------------------------------------------------------- /config/manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manager.yaml 3 | 4 | apiVersion: kustomize.config.k8s.io/v1beta1 5 | kind: Kustomization 6 | images: 7 | - name: controller 8 | newName: quay.io/backube/snapscheduler 9 | newTag: 3.5.0 10 | -------------------------------------------------------------------------------- /config/manager/manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | app.kubernetes.io/name: namespace 7 | app.kubernetes.io/instance: system 8 | app.kubernetes.io/component: manager 9 | app.kubernetes.io/created-by: snapscheduler 10 | app.kubernetes.io/part-of: snapscheduler 11 | app.kubernetes.io/managed-by: kustomize 12 | name: system 13 | --- 14 | apiVersion: apps/v1 15 | kind: Deployment 16 | metadata: 17 | name: controller-manager 18 | namespace: system 19 | labels: 20 | control-plane: controller-manager 21 | app.kubernetes.io/name: deployment 22 | app.kubernetes.io/instance: controller-manager 23 | app.kubernetes.io/component: manager 24 | app.kubernetes.io/created-by: snapscheduler 25 | app.kubernetes.io/part-of: snapscheduler 26 | app.kubernetes.io/managed-by: kustomize 27 | spec: 28 | selector: 29 | matchLabels: 30 | control-plane: controller-manager 31 | replicas: 1 32 | template: 33 | metadata: 34 | labels: 35 | control-plane: controller-manager 36 | backube/snapscheduler-affinity: manager 37 | spec: 38 | affinity: 39 | nodeAffinity: 40 | requiredDuringSchedulingIgnoredDuringExecution: 41 | nodeSelectorTerms: 42 | - matchExpressions: 43 | - key: kubernetes.io/arch 44 | operator: In 45 | values: 46 | - amd64 47 | # - arm64 48 | # - ppc64le 49 | # - s390x 50 | - key: kubernetes.io/os 51 | operator: In 52 | values: 53 | - linux 54 | podAntiAffinity: 55 | preferredDuringSchedulingIgnoredDuringExecution: 56 | - weight: 100 57 | podAffinityTerm: 58 | labelSelector: 59 | matchExpressions: 60 | - key: backube/snapscheduler-affinity 61 | operator: In 62 | values: 63 | - manager 64 | topologyKey: kubernetes.io/hostname 65 | securityContext: 66 | runAsNonRoot: true 67 | # Uncomment when we no longer support OCP 4.10 68 | # seccompProfile: 69 | # type: RuntimeDefault 70 | containers: 71 | - command: 72 | - /manager 73 | args: 74 | - --leader-elect 75 | image: controller:latest 76 | name: manager 77 | securityContext: 78 | allowPrivilegeEscalation: false 79 | capabilities: 80 | drop: 81 | - "ALL" 82 | livenessProbe: 83 | httpGet: 84 | path: /healthz 85 | port: 8081 86 | initialDelaySeconds: 15 87 | periodSeconds: 20 88 | readinessProbe: 89 | httpGet: 90 | path: /readyz 91 | port: 8081 92 | initialDelaySeconds: 5 93 | periodSeconds: 10 94 | resources: 95 | requests: 96 | cpu: 10m 97 | memory: 100Mi 98 | serviceAccountName: controller-manager 99 | terminationGracePeriodSeconds: 10 100 | nodeSelector: 101 | kubernetes.io/arch: amd64 102 | kubernetes.io/os: linux 103 | -------------------------------------------------------------------------------- /config/manifests/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # These resources constitute the fully configured set of manifests 2 | # used to generate the 'manifests/' directory in a bundle. 3 | resources: 4 | - bases/snapscheduler.clusterserviceversion.yaml 5 | - ../default 6 | - ../samples 7 | - ../scorecard 8 | 9 | # [WEBHOOK] To enable webhooks, uncomment all the sections with [WEBHOOK] prefix. 10 | # Do NOT uncomment sections with prefix [CERTMANAGER], as OLM does not support cert-manager. 11 | # These patches remove the unnecessary "cert" volume and its manager container volumeMount. 12 | #patchesJson6902: 13 | #- target: 14 | # group: apps 15 | # version: v1 16 | # kind: Deployment 17 | # name: controller-manager 18 | # namespace: system 19 | # patch: |- 20 | # # Remove the manager container's "cert" volumeMount, since OLM will create and mount a set of certs. 21 | # # Update the indices in this path if adding or removing containers/volumeMounts in the manager's Deployment. 22 | # - op: remove 23 | # path: /spec/template/spec/containers/0/volumeMounts/0 24 | # # Remove the "cert" volume, since OLM will create and mount a set of certs. 25 | # # Update the indices in this path if adding or removing volumes in the manager's Deployment. 26 | # - op: remove 27 | # path: /spec/template/spec/volumes/0 28 | -------------------------------------------------------------------------------- /config/prometheus/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - monitor.yaml 3 | -------------------------------------------------------------------------------- /config/prometheus/monitor.yaml: -------------------------------------------------------------------------------- 1 | 2 | # Prometheus Monitor Service (Metrics) 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: ServiceMonitor 5 | metadata: 6 | labels: 7 | control-plane: controller-manager 8 | app.kubernetes.io/name: servicemonitor 9 | app.kubernetes.io/instance: controller-manager-metrics-monitor 10 | app.kubernetes.io/component: metrics 11 | app.kubernetes.io/created-by: snapscheduler 12 | app.kubernetes.io/part-of: snapscheduler 13 | app.kubernetes.io/managed-by: kustomize 14 | name: controller-manager-metrics-monitor 15 | namespace: system 16 | spec: 17 | endpoints: 18 | - path: /metrics 19 | port: https 20 | scheme: https 21 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 22 | tlsConfig: 23 | insecureSkipVerify: true 24 | selector: 25 | matchLabels: 26 | control-plane: controller-manager 27 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_client_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: clusterrole 6 | app.kubernetes.io/instance: metrics-reader 7 | app.kubernetes.io/component: kube-rbac-proxy 8 | app.kubernetes.io/created-by: snapscheduler 9 | app.kubernetes.io/part-of: snapscheduler 10 | app.kubernetes.io/managed-by: kustomize 11 | name: metrics-reader 12 | rules: 13 | - nonResourceURLs: 14 | - "/metrics" 15 | verbs: 16 | - get 17 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: clusterrole 6 | app.kubernetes.io/instance: proxy-role 7 | app.kubernetes.io/component: kube-rbac-proxy 8 | app.kubernetes.io/created-by: snapscheduler 9 | app.kubernetes.io/part-of: snapscheduler 10 | app.kubernetes.io/managed-by: kustomize 11 | name: proxy-role 12 | rules: 13 | - apiGroups: 14 | - authentication.k8s.io 15 | resources: 16 | - tokenreviews 17 | verbs: 18 | - create 19 | - apiGroups: 20 | - authorization.k8s.io 21 | resources: 22 | - subjectaccessreviews 23 | verbs: 24 | - create 25 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: clusterrolebinding 6 | app.kubernetes.io/instance: proxy-rolebinding 7 | app.kubernetes.io/component: kube-rbac-proxy 8 | app.kubernetes.io/created-by: snapscheduler 9 | app.kubernetes.io/part-of: snapscheduler 10 | app.kubernetes.io/managed-by: kustomize 11 | name: proxy-rolebinding 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: ClusterRole 15 | name: proxy-role 16 | subjects: 17 | - kind: ServiceAccount 18 | name: controller-manager 19 | namespace: system 20 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | app.kubernetes.io/name: service 7 | app.kubernetes.io/instance: controller-manager-metrics-service 8 | app.kubernetes.io/component: kube-rbac-proxy 9 | app.kubernetes.io/created-by: snapscheduler 10 | app.kubernetes.io/part-of: snapscheduler 11 | app.kubernetes.io/managed-by: kustomize 12 | name: controller-manager-metrics-service 13 | namespace: system 14 | spec: 15 | ports: 16 | - name: https 17 | port: 8443 18 | targetPort: https 19 | selector: 20 | control-plane: controller-manager 21 | -------------------------------------------------------------------------------- /config/rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | # All RBAC will be applied under this service account in 3 | # the deployment namespace. You may comment out this resource 4 | # if your manager will use a service account that exists at 5 | # runtime. Be sure to update RoleBinding and ClusterRoleBinding 6 | # subjects if changing service account names. 7 | - service_account.yaml 8 | - role.yaml 9 | - role_binding.yaml 10 | - leader_election_role.yaml 11 | - leader_election_role_binding.yaml 12 | # Comment the following 4 lines if you want to disable 13 | # the auth proxy (https://github.com/brancz/kube-rbac-proxy) 14 | # which protects your /metrics endpoint. 15 | - auth_proxy_service.yaml 16 | - auth_proxy_role.yaml 17 | - auth_proxy_role_binding.yaml 18 | - auth_proxy_client_clusterrole.yaml 19 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions to do leader election. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: role 7 | app.kubernetes.io/instance: leader-election-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: snapscheduler 10 | app.kubernetes.io/part-of: snapscheduler 11 | app.kubernetes.io/managed-by: kustomize 12 | name: leader-election-role 13 | rules: 14 | - apiGroups: 15 | - "" 16 | resources: 17 | - configmaps 18 | verbs: 19 | - get 20 | - list 21 | - watch 22 | - create 23 | - update 24 | - patch 25 | - delete 26 | - apiGroups: 27 | - coordination.k8s.io 28 | resources: 29 | - leases 30 | verbs: 31 | - get 32 | - list 33 | - watch 34 | - create 35 | - update 36 | - patch 37 | - delete 38 | - apiGroups: 39 | - "" 40 | resources: 41 | - events 42 | verbs: 43 | - create 44 | - patch 45 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: rolebinding 6 | app.kubernetes.io/instance: leader-election-rolebinding 7 | app.kubernetes.io/component: rbac 8 | app.kubernetes.io/created-by: snapscheduler 9 | app.kubernetes.io/part-of: snapscheduler 10 | app.kubernetes.io/managed-by: kustomize 11 | name: leader-election-rolebinding 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: Role 15 | name: leader-election-role 16 | subjects: 17 | - kind: ServiceAccount 18 | name: controller-manager 19 | namespace: system 20 | -------------------------------------------------------------------------------- /config/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: manager-role 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - persistentvolumeclaims 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - snapscheduler.backube 17 | resources: 18 | - snapshotschedules 19 | verbs: 20 | - create 21 | - delete 22 | - get 23 | - list 24 | - patch 25 | - update 26 | - watch 27 | - apiGroups: 28 | - snapscheduler.backube 29 | resources: 30 | - snapshotschedules/finalizers 31 | verbs: 32 | - update 33 | - apiGroups: 34 | - snapscheduler.backube 35 | resources: 36 | - snapshotschedules/status 37 | verbs: 38 | - get 39 | - patch 40 | - update 41 | - apiGroups: 42 | - snapshot.storage.k8s.io 43 | resources: 44 | - volumesnapshots 45 | verbs: 46 | - create 47 | - delete 48 | - get 49 | - list 50 | - patch 51 | - update 52 | - watch 53 | -------------------------------------------------------------------------------- /config/rbac/role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: clusterrolebinding 6 | app.kubernetes.io/instance: manager-rolebinding 7 | app.kubernetes.io/component: rbac 8 | app.kubernetes.io/created-by: snapscheduler 9 | app.kubernetes.io/part-of: snapscheduler 10 | app.kubernetes.io/managed-by: kustomize 11 | name: manager-rolebinding 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: ClusterRole 15 | name: manager-role 16 | subjects: 17 | - kind: ServiceAccount 18 | name: controller-manager 19 | namespace: system 20 | -------------------------------------------------------------------------------- /config/rbac/service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: serviceaccount 6 | app.kubernetes.io/instance: controller-manager-sa 7 | app.kubernetes.io/component: rbac 8 | app.kubernetes.io/created-by: snapscheduler 9 | app.kubernetes.io/part-of: snapscheduler 10 | app.kubernetes.io/managed-by: kustomize 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/snapshotschedule_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit snapshotschedules. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: snapshotschedule-editor-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: snapscheduler 10 | app.kubernetes.io/part-of: snapscheduler 11 | app.kubernetes.io/managed-by: kustomize 12 | name: snapshotschedule-editor-role 13 | rules: 14 | - apiGroups: 15 | - snapscheduler.backube 16 | resources: 17 | - snapshotschedules 18 | verbs: 19 | - create 20 | - delete 21 | - get 22 | - list 23 | - patch 24 | - update 25 | - watch 26 | - apiGroups: 27 | - snapscheduler.backube 28 | resources: 29 | - snapshotschedules/status 30 | verbs: 31 | - get 32 | -------------------------------------------------------------------------------- /config/rbac/snapshotschedule_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view snapshotschedules. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: snapshotschedule-viewer-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: snapscheduler 10 | app.kubernetes.io/part-of: snapscheduler 11 | app.kubernetes.io/managed-by: kustomize 12 | name: snapshotschedule-viewer-role 13 | rules: 14 | - apiGroups: 15 | - snapscheduler.backube 16 | resources: 17 | - snapshotschedules 18 | verbs: 19 | - get 20 | - list 21 | - watch 22 | - apiGroups: 23 | - snapscheduler.backube 24 | resources: 25 | - snapshotschedules/status 26 | verbs: 27 | - get 28 | -------------------------------------------------------------------------------- /config/samples/kustomization.yaml: -------------------------------------------------------------------------------- 1 | ## Append samples of your project ## 2 | resources: 3 | - snapscheduler_v1_snapshotschedule.yaml 4 | #+kubebuilder:scaffold:manifestskustomizesamples 5 | -------------------------------------------------------------------------------- /config/samples/snapscheduler_v1_snapshotschedule.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: snapscheduler.backube/v1 3 | kind: SnapshotSchedule 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: snapshotschedule 7 | app.kubernetes.io/instance: daily 8 | app.kubernetes.io/part-of: snapscheduler 9 | app.kubernetes.io/managed-by: kustomize 10 | app.kubernetes.io/created-by: snapscheduler 11 | name: daily 12 | spec: 13 | retention: 14 | maxCount: 7 15 | # schedule fields: min hr dom mo dow 16 | # also supports @shortcuts 17 | schedule: "0 0 * * *" 18 | -------------------------------------------------------------------------------- /config/scorecard/bases/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: scorecard.operatorframework.io/v1alpha3 2 | kind: Configuration 3 | metadata: 4 | name: config 5 | stages: 6 | - parallel: true 7 | tests: [] 8 | -------------------------------------------------------------------------------- /config/scorecard/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - bases/config.yaml 3 | patchesJson6902: 4 | - path: patches/basic.config.yaml 5 | target: 6 | group: scorecard.operatorframework.io 7 | version: v1alpha3 8 | kind: Configuration 9 | name: config 10 | - path: patches/olm.config.yaml 11 | target: 12 | group: scorecard.operatorframework.io 13 | version: v1alpha3 14 | kind: Configuration 15 | name: config 16 | #+kubebuilder:scaffold:patchesJson6902 17 | -------------------------------------------------------------------------------- /config/scorecard/patches/basic.config.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /stages/0/tests/- 3 | value: 4 | entrypoint: 5 | - scorecard-test 6 | - basic-check-spec 7 | image: quay.io/operator-framework/scorecard-test:v1.34.1 8 | labels: 9 | suite: basic 10 | test: basic-check-spec-test 11 | -------------------------------------------------------------------------------- /config/scorecard/patches/olm.config.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /stages/0/tests/- 3 | value: 4 | entrypoint: 5 | - scorecard-test 6 | - olm-bundle-validation 7 | image: quay.io/operator-framework/scorecard-test:v1.34.1 8 | labels: 9 | suite: olm 10 | test: olm-bundle-validation-test 11 | - op: add 12 | path: /stages/0/tests/- 13 | value: 14 | entrypoint: 15 | - scorecard-test 16 | - olm-crds-have-validation 17 | image: quay.io/operator-framework/scorecard-test:v1.34.1 18 | labels: 19 | suite: olm 20 | test: olm-crds-have-validation-test 21 | - op: add 22 | path: /stages/0/tests/- 23 | value: 24 | entrypoint: 25 | - scorecard-test 26 | - olm-crds-have-resources 27 | image: quay.io/operator-framework/scorecard-test:v1.34.1 28 | labels: 29 | suite: olm 30 | test: olm-crds-have-resources-test 31 | - op: add 32 | path: /stages/0/tests/- 33 | value: 34 | entrypoint: 35 | - scorecard-test 36 | - olm-spec-descriptors 37 | image: quay.io/operator-framework/scorecard-test:v1.34.1 38 | labels: 39 | suite: olm 40 | test: olm-spec-descriptors-test 41 | - op: add 42 | path: /stages/0/tests/- 43 | value: 44 | entrypoint: 45 | - scorecard-test 46 | - olm-status-descriptors 47 | image: quay.io/operator-framework/scorecard-test:v1.34.1 48 | labels: 49 | suite: olm 50 | test: olm-status-descriptors-test 51 | -------------------------------------------------------------------------------- /docs/.bundle/config: -------------------------------------------------------------------------------- 1 | --- 2 | BUNDLE_PATH: "vendor/bundle" 3 | -------------------------------------------------------------------------------- /docs/404.md: -------------------------------------------------------------------------------- 1 | # Page not found 2 | 3 | oops... Looks like that was a bad link. 4 | 5 | Please [file an issue to tell us about it]({{ site.github.issues_url }}{{ "/new?title=Docs: Page not found" | uri_escape }}) 6 | -------------------------------------------------------------------------------- /docs/Gemfile: -------------------------------------------------------------------------------- 1 | source "https://rubygems.org" 2 | # Serve documentation locally with: 3 | # bundle update 4 | # bundle exec jekyll serve -w -l 5 | 6 | gem "github-pages", group: :jekyll_plugins 7 | -------------------------------------------------------------------------------- /docs/Gemfile.lock: -------------------------------------------------------------------------------- 1 | GEM 2 | remote: https://rubygems.org/ 3 | specs: 4 | activesupport (7.1.3.2) 5 | base64 6 | bigdecimal 7 | concurrent-ruby (~> 1.0, >= 1.0.2) 8 | connection_pool (>= 2.2.5) 9 | drb 10 | i18n (>= 1.6, < 2) 11 | minitest (>= 5.1) 12 | mutex_m 13 | tzinfo (~> 2.0) 14 | addressable (2.8.6) 15 | public_suffix (>= 2.0.2, < 6.0) 16 | base64 (0.2.0) 17 | bigdecimal (3.1.7) 18 | coffee-script (2.4.1) 19 | coffee-script-source 20 | execjs 21 | coffee-script-source (1.12.2) 22 | colorator (1.1.0) 23 | commonmarker (0.23.10) 24 | concurrent-ruby (1.2.3) 25 | connection_pool (2.4.1) 26 | dnsruby (1.71.0) 27 | simpleidn (~> 0.2.1) 28 | drb (2.2.1) 29 | em-websocket (0.5.3) 30 | eventmachine (>= 0.12.9) 31 | http_parser.rb (~> 0) 32 | ethon (0.16.0) 33 | ffi (>= 1.15.0) 34 | eventmachine (1.2.7) 35 | execjs (2.9.1) 36 | faraday (2.9.0) 37 | faraday-net_http (>= 2.0, < 3.2) 38 | faraday-net_http (3.1.0) 39 | net-http 40 | ffi (1.16.3) 41 | forwardable-extended (2.6.0) 42 | gemoji (4.1.0) 43 | github-pages (231) 44 | github-pages-health-check (= 1.18.2) 45 | jekyll (= 3.9.5) 46 | jekyll-avatar (= 0.8.0) 47 | jekyll-coffeescript (= 1.2.2) 48 | jekyll-commonmark-ghpages (= 0.4.0) 49 | jekyll-default-layout (= 0.1.5) 50 | jekyll-feed (= 0.17.0) 51 | jekyll-gist (= 1.5.0) 52 | jekyll-github-metadata (= 2.16.1) 53 | jekyll-include-cache (= 0.2.1) 54 | jekyll-mentions (= 1.6.0) 55 | jekyll-optional-front-matter (= 0.3.2) 56 | jekyll-paginate (= 1.1.0) 57 | jekyll-readme-index (= 0.3.0) 58 | jekyll-redirect-from (= 0.16.0) 59 | jekyll-relative-links (= 0.6.1) 60 | jekyll-remote-theme (= 0.4.3) 61 | jekyll-sass-converter (= 1.5.2) 62 | jekyll-seo-tag (= 2.8.0) 63 | jekyll-sitemap (= 1.4.0) 64 | jekyll-swiss (= 1.0.0) 65 | jekyll-theme-architect (= 0.2.0) 66 | jekyll-theme-cayman (= 0.2.0) 67 | jekyll-theme-dinky (= 0.2.0) 68 | jekyll-theme-hacker (= 0.2.0) 69 | jekyll-theme-leap-day (= 0.2.0) 70 | jekyll-theme-merlot (= 0.2.0) 71 | jekyll-theme-midnight (= 0.2.0) 72 | jekyll-theme-minimal (= 0.2.0) 73 | jekyll-theme-modernist (= 0.2.0) 74 | jekyll-theme-primer (= 0.6.0) 75 | jekyll-theme-slate (= 0.2.0) 76 | jekyll-theme-tactile (= 0.2.0) 77 | jekyll-theme-time-machine (= 0.2.0) 78 | jekyll-titles-from-headings (= 0.5.3) 79 | jemoji (= 0.13.0) 80 | kramdown (= 2.4.0) 81 | kramdown-parser-gfm (= 1.1.0) 82 | liquid (= 4.0.4) 83 | mercenary (~> 0.3) 84 | minima (= 2.5.1) 85 | nokogiri (>= 1.13.6, < 2.0) 86 | rouge (= 3.30.0) 87 | terminal-table (~> 1.4) 88 | github-pages-health-check (1.18.2) 89 | addressable (~> 2.3) 90 | dnsruby (~> 1.60) 91 | octokit (>= 4, < 8) 92 | public_suffix (>= 3.0, < 6.0) 93 | typhoeus (~> 1.3) 94 | html-pipeline (2.14.3) 95 | activesupport (>= 2) 96 | nokogiri (>= 1.4) 97 | http_parser.rb (0.8.0) 98 | i18n (1.14.4) 99 | concurrent-ruby (~> 1.0) 100 | jekyll (3.9.5) 101 | addressable (~> 2.4) 102 | colorator (~> 1.0) 103 | em-websocket (~> 0.5) 104 | i18n (>= 0.7, < 2) 105 | jekyll-sass-converter (~> 1.0) 106 | jekyll-watch (~> 2.0) 107 | kramdown (>= 1.17, < 3) 108 | liquid (~> 4.0) 109 | mercenary (~> 0.3.3) 110 | pathutil (~> 0.9) 111 | rouge (>= 1.7, < 4) 112 | safe_yaml (~> 1.0) 113 | jekyll-avatar (0.8.0) 114 | jekyll (>= 3.0, < 5.0) 115 | jekyll-coffeescript (1.2.2) 116 | coffee-script (~> 2.2) 117 | coffee-script-source (~> 1.12) 118 | jekyll-commonmark (1.4.0) 119 | commonmarker (~> 0.22) 120 | jekyll-commonmark-ghpages (0.4.0) 121 | commonmarker (~> 0.23.7) 122 | jekyll (~> 3.9.0) 123 | jekyll-commonmark (~> 1.4.0) 124 | rouge (>= 2.0, < 5.0) 125 | jekyll-default-layout (0.1.5) 126 | jekyll (>= 3.0, < 5.0) 127 | jekyll-feed (0.17.0) 128 | jekyll (>= 3.7, < 5.0) 129 | jekyll-gist (1.5.0) 130 | octokit (~> 4.2) 131 | jekyll-github-metadata (2.16.1) 132 | jekyll (>= 3.4, < 5.0) 133 | octokit (>= 4, < 7, != 4.4.0) 134 | jekyll-include-cache (0.2.1) 135 | jekyll (>= 3.7, < 5.0) 136 | jekyll-mentions (1.6.0) 137 | html-pipeline (~> 2.3) 138 | jekyll (>= 3.7, < 5.0) 139 | jekyll-optional-front-matter (0.3.2) 140 | jekyll (>= 3.0, < 5.0) 141 | jekyll-paginate (1.1.0) 142 | jekyll-readme-index (0.3.0) 143 | jekyll (>= 3.0, < 5.0) 144 | jekyll-redirect-from (0.16.0) 145 | jekyll (>= 3.3, < 5.0) 146 | jekyll-relative-links (0.6.1) 147 | jekyll (>= 3.3, < 5.0) 148 | jekyll-remote-theme (0.4.3) 149 | addressable (~> 2.0) 150 | jekyll (>= 3.5, < 5.0) 151 | jekyll-sass-converter (>= 1.0, <= 3.0.0, != 2.0.0) 152 | rubyzip (>= 1.3.0, < 3.0) 153 | jekyll-sass-converter (1.5.2) 154 | sass (~> 3.4) 155 | jekyll-seo-tag (2.8.0) 156 | jekyll (>= 3.8, < 5.0) 157 | jekyll-sitemap (1.4.0) 158 | jekyll (>= 3.7, < 5.0) 159 | jekyll-swiss (1.0.0) 160 | jekyll-theme-architect (0.2.0) 161 | jekyll (> 3.5, < 5.0) 162 | jekyll-seo-tag (~> 2.0) 163 | jekyll-theme-cayman (0.2.0) 164 | jekyll (> 3.5, < 5.0) 165 | jekyll-seo-tag (~> 2.0) 166 | jekyll-theme-dinky (0.2.0) 167 | jekyll (> 3.5, < 5.0) 168 | jekyll-seo-tag (~> 2.0) 169 | jekyll-theme-hacker (0.2.0) 170 | jekyll (> 3.5, < 5.0) 171 | jekyll-seo-tag (~> 2.0) 172 | jekyll-theme-leap-day (0.2.0) 173 | jekyll (> 3.5, < 5.0) 174 | jekyll-seo-tag (~> 2.0) 175 | jekyll-theme-merlot (0.2.0) 176 | jekyll (> 3.5, < 5.0) 177 | jekyll-seo-tag (~> 2.0) 178 | jekyll-theme-midnight (0.2.0) 179 | jekyll (> 3.5, < 5.0) 180 | jekyll-seo-tag (~> 2.0) 181 | jekyll-theme-minimal (0.2.0) 182 | jekyll (> 3.5, < 5.0) 183 | jekyll-seo-tag (~> 2.0) 184 | jekyll-theme-modernist (0.2.0) 185 | jekyll (> 3.5, < 5.0) 186 | jekyll-seo-tag (~> 2.0) 187 | jekyll-theme-primer (0.6.0) 188 | jekyll (> 3.5, < 5.0) 189 | jekyll-github-metadata (~> 2.9) 190 | jekyll-seo-tag (~> 2.0) 191 | jekyll-theme-slate (0.2.0) 192 | jekyll (> 3.5, < 5.0) 193 | jekyll-seo-tag (~> 2.0) 194 | jekyll-theme-tactile (0.2.0) 195 | jekyll (> 3.5, < 5.0) 196 | jekyll-seo-tag (~> 2.0) 197 | jekyll-theme-time-machine (0.2.0) 198 | jekyll (> 3.5, < 5.0) 199 | jekyll-seo-tag (~> 2.0) 200 | jekyll-titles-from-headings (0.5.3) 201 | jekyll (>= 3.3, < 5.0) 202 | jekyll-watch (2.2.1) 203 | listen (~> 3.0) 204 | jemoji (0.13.0) 205 | gemoji (>= 3, < 5) 206 | html-pipeline (~> 2.2) 207 | jekyll (>= 3.0, < 5.0) 208 | kramdown (2.4.0) 209 | rexml 210 | kramdown-parser-gfm (1.1.0) 211 | kramdown (~> 2.0) 212 | liquid (4.0.4) 213 | listen (3.9.0) 214 | rb-fsevent (~> 0.10, >= 0.10.3) 215 | rb-inotify (~> 0.9, >= 0.9.10) 216 | mercenary (0.3.6) 217 | mini_portile2 (2.8.8) 218 | minima (2.5.1) 219 | jekyll (>= 3.5, < 5.0) 220 | jekyll-feed (~> 0.9) 221 | jekyll-seo-tag (~> 2.1) 222 | minitest (5.22.3) 223 | mutex_m (0.2.0) 224 | net-http (0.4.1) 225 | uri 226 | nokogiri (1.18.8) 227 | mini_portile2 (~> 2.8.2) 228 | racc (~> 1.4) 229 | octokit (4.25.1) 230 | faraday (>= 1, < 3) 231 | sawyer (~> 0.9) 232 | pathutil (0.16.2) 233 | forwardable-extended (~> 2.6) 234 | public_suffix (5.0.4) 235 | racc (1.8.1) 236 | rb-fsevent (0.11.2) 237 | rb-inotify (0.10.1) 238 | ffi (~> 1.0) 239 | rexml (3.3.9) 240 | rouge (3.30.0) 241 | rubyzip (2.3.2) 242 | safe_yaml (1.0.5) 243 | sass (3.7.4) 244 | sass-listen (~> 4.0.0) 245 | sass-listen (4.0.0) 246 | rb-fsevent (~> 0.9, >= 0.9.4) 247 | rb-inotify (~> 0.9, >= 0.9.7) 248 | sawyer (0.9.2) 249 | addressable (>= 2.3.5) 250 | faraday (>= 0.17.3, < 3) 251 | simpleidn (0.2.1) 252 | unf (~> 0.1.4) 253 | terminal-table (1.8.0) 254 | unicode-display_width (~> 1.1, >= 1.1.1) 255 | typhoeus (1.4.1) 256 | ethon (>= 0.9.0) 257 | tzinfo (2.0.6) 258 | concurrent-ruby (~> 1.0) 259 | unf (0.1.4) 260 | unf_ext 261 | unf_ext (0.0.9.1) 262 | unicode-display_width (1.8.0) 263 | uri (0.13.2) 264 | 265 | PLATFORMS 266 | ruby 267 | 268 | DEPENDENCIES 269 | github-pages 270 | 271 | BUNDLED WITH 272 | 2.2.33 273 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Documentation for snapscheduler 2 | 3 | This directory holds the source for snapscheduler's documentation. 4 | 5 | The documentation can be viewed at the github-pages site: 6 | [https://backube.github.io/snapscheduler](https://backube.github.io/snapscheduler) 7 | 8 | ------ 9 | 10 | The documentation can be viewed/edited locally using [jekyll](https://jekyllrb.com/). 11 | 12 | ## Prerequisites 13 | 14 | * Install Ruby 15 | * Fedora: `sudo dnf install ruby ruby-devel @development-tools` 16 | * Install bundler 17 | * `gem install bundler` 18 | 19 | ## Serve the docs locally 20 | 21 | * Switch to the `/docs` directory 22 | * Install/update the local gems 23 | * `bundle update` 24 | * Serve the docs 25 | * `PAGES_REPO_NWO=backube/snapscheduler bundle exec jekyll serve -l -w` 26 | -------------------------------------------------------------------------------- /docs/_config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # NOTE: this file is *NOT* reloaded automatically when you use 3 | # 'bundle exec jekyll serve'. If you change this file, please restart the 4 | # server process. 5 | # 6 | # Site settings 7 | # These are used to personalize your new site. If you look in the HTML files, 8 | # you will see them accessed via {{ site.title }}, {{ site.email }}, and so on. 9 | # You can create any custom variable you would like, and they will be accessible 10 | # in the templates via {{ site.myvariable }}. 11 | # Full list of site.github metadata: 12 | # https://jekyll.github.io/github-metadata/site.github/ 13 | 14 | title: snapscheduler 15 | description: >- 16 | Take scheduled snapshots of your Kubernetes persistent volumes. 17 | logo: media/snapscheduler.svg 18 | show_downloads: false 19 | google_analytics: UA-154117660-1 20 | 21 | # Build settings 22 | # theme: jekyll-theme-leap-day # https://github.com/pages-themes/leap-day 23 | theme: jekyll-theme-minimal # https://github.com/pages-themes/minimal 24 | # theme: jekyll-theme-slate # https://github.com/pages-themes/slate 25 | plugins: 26 | - jemoji 27 | 28 | # Exclude from processing. 29 | # The following items will not be processed, by default. 30 | # Any item listed under the `exclude:` key here will be automatically added to 31 | # the internal "default list". 32 | # 33 | # Excluded items can be processed by explicitly listing the directories or 34 | # their entries' file path in the `include:` list. 35 | # 36 | # exclude: 37 | # - .sass-cache/ 38 | # - .jekyll-cache/ 39 | # - gemfiles/ 40 | # - Gemfile 41 | # - Gemfile.lock 42 | # - node_modules/ 43 | # - vendor/bundle/ 44 | # - vendor/cache/ 45 | # - vendor/gems/ 46 | # - vendor/ruby/ 47 | -------------------------------------------------------------------------------- /docs/_includes/twitter-follow.html: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /docs/_layouts/default.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | {% if site.google_analytics %} 9 | 10 | 11 | 18 | {% endif %} 19 | {% seo %} 20 | 21 | 24 | 25 | 26 |
27 |
28 |

{{ site.title | default: site.github.repository_name }}

29 | 30 | {% if site.logo %} 31 | Logo 32 | {% endif %} 33 | 34 |

{{ site.description | default: site.github.project_tagline }}

35 | 36 | {% if site.github.is_project_page %} 37 |

View the Project on GitHub {{ site.github.repository_nwo }}

38 | {% endif %} 39 | 40 | {% if site.github.is_user_page %} 41 |

View My GitHub Profile

42 | {% endif %} 43 | 44 | {% if site.show_downloads %} 45 | 50 | {% endif %} 51 |
52 |
53 | 54 | {{ content }} 55 | 56 |
57 |
58 | {% if site.github.is_project_page %} 59 |

This project is maintained by {{ site.github.owner_name }}

60 | {% endif %} 61 |

{% include twitter-follow.html %}

62 |

Hosted on GitHub Pages — Theme by orderedlist

63 |
64 |
65 | 66 | 67 | 68 | -------------------------------------------------------------------------------- /docs/development.md: -------------------------------------------------------------------------------- 1 | # Development 2 | 3 | This page provides an overview of how to get started enhancing snapscheduler. 4 | 5 | ## Prerequisites 6 | 7 | Required: 8 | 9 | - A working Go environment 10 | - Docker 11 | - [operator-sdk](https://github.com/operator-framework/operator-sdk) 12 | - Check 13 | [Makefile](https://github.com/backube/snapscheduler/blob/master/Makefile) 14 | for the proper version to use 15 | - Can be installed by: `make operator-sdk` 16 | - [kind](https://kind.sigs.k8s.io/) 17 | - Recommended for running E2E tests in combination with the CSI hostpath 18 | driver 19 | - Check 20 | [Makefile](https://github.com/backube/snapscheduler/blob/master/Makefile) 21 | for the proper version to use 22 | 23 | Recommended: 24 | 25 | - markdownlint 26 | - yamllint 27 | - shellcheck 28 | 29 | ## Building the code 30 | 31 | It is possible to run the operator locally against a running cluster. This 32 | enables quick turnaround during development. With a running cluster (and 33 | properly configured kubeconfig): 34 | 35 | Install the CRDs: 36 | 37 | ```console 38 | $ make install 39 | /home/jstrunk/src/backube/snapscheduler/bin/controller-gen "crd:trivialVersions=true,preserveUnknownFields=false" rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases 40 | cp config/crd/bases/* helm/snapscheduler/crds 41 | /home/jstrunk/src/backube/snapscheduler/bin/kustomize build config/crd | kubectl apply -f - 42 | customresourcedefinition.apiextensions.k8s.io/snapshotschedules.snapscheduler.backube created 43 | ``` 44 | 45 | Run the operator locally: 46 | 47 | ```console 48 | $ make run 49 | /home/jstrunk/src/backube/snapscheduler/bin/controller-gen "crd:trivialVersions=true,preserveUnknownFields=false" rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases 50 | cp config/crd/bases/* helm/snapscheduler/crds 51 | /home/jstrunk/src/backube/snapscheduler/bin/controller-gen object:headerFile="hack/boilerplate.go.txt" paths="./..." 52 | /home/jstrunk/src/backube/snapscheduler/bin/golangci-lint run ./... 53 | go run -ldflags -X=main.snapschedulerVersion=v1.1.0-105-g53576a0-dirty ./main.go 54 | 2021-07-20T13:18:58.059-0400 INFO setup Operator Version: v1.1.0-105-g53576a0-dirty 55 | 2021-07-20T13:18:58.059-0400 INFO setup Go Version: go1.16.4 56 | 2021-07-20T13:18:58.059-0400 INFO setup Go OS/Arch: linux/amd64 57 | 2021-07-20T13:18:58.969-0400 INFO controller-runtime.metrics metrics server is starting to listen {"addr": ":8080"} 58 | 2021-07-20T13:18:58.992-0400 INFO setup starting manager 59 | 2021-07-20T13:18:58.993-0400 INFO controller-runtime.manager starting metrics server {"path": "/metrics"} 60 | 2021-07-20T13:18:58.993-0400 INFO controller-runtime.manager.controller.snapshotschedule Starting EventSource {"reconciler group": "snapscheduler.backube", "reconciler kind": "SnapshotSchedule", "source": "kind source: /, Kind="} 61 | 2021-07-20T13:18:59.094-0400 INFO controller-runtime.manager.controller.snapshotschedule Starting Controller {"reconciler group": "snapscheduler.backube", "reconciler kind": "SnapshotSchedule"} 62 | 2021-07-20T13:18:59.094-0400 INFO controller-runtime.manager.controller.snapshotschedule Starting workers {"reconciler group": "snapscheduler.backube", "reconciler kind": "SnapshotSchedule", "worker count": 1} 63 | ... 64 | ``` 65 | 66 | ## CI system & E2E testing 67 | 68 | The CI system (GitHub Actions) checks each PR by running both the linters + unit 69 | tests (mentioned above) and end-to-end tests. These tests are run across a 70 | number of kubernetes versions (see `KUBERNETES_VERSIONS` in 71 | [`.github/workflows/tests.yml`](https://github.com/backube/snapscheduler/blob/master/.github/workflows/tests.yml)). 72 | 73 | ### Running E2E locally 74 | 75 | The same scripts that are used in CI can be used to test and develop locally: 76 | 77 | - The 78 | [`hack/setup-kind-cluster.sh`](https://github.com/backube/snapscheduler/blob/master/hack/setup-kind-cluster.sh) 79 | script will create a Kubernetes cluster and install the CSI hostpath driver. 80 | The `KUBE_VERSION` environment variable can be used to change the Kubernetes 81 | version. Note that this must be a specific version `X.Y.Z` that has a Kind 82 | container. 83 | - The 84 | [`hack/run-in-kind.sh`](https://github.com/backube/snapscheduler/blob/master/hack/run-in-kind.sh) 85 | script will build the operator image, inject it into the Kind cluster, and use 86 | the local helm chart to start it. 87 | 88 | After running the above two scripts, you should have a running cluster with a 89 | suitable CSI driver and the snapscheduler running, ready for testing. 90 | 91 | The E2E tests can then be executed via: 92 | 93 | ```console 94 | $ make test-e2e 95 | cd test-kuttl && /home/jstrunk/src/backube/snapscheduler/bin/kuttl test 96 | === RUN kuttl 97 | harness.go:457: starting setup 98 | harness.go:248: running tests using configured kubeconfig. 99 | harness.go:285: Successful connection to cluster at: https://127.0.0.1:37729 100 | harness.go:353: running tests 101 | harness.go:74: going to run test suite with timeout of 30 seconds for each step 102 | harness.go:365: testsuite: ./e2e has 6 tests 103 | === RUN kuttl/harness 104 | === RUN kuttl/harness/custom-snapclass 105 | === PAUSE kuttl/harness/custom-snapclass 106 | ... 107 | === CONT kuttl 108 | harness.go:399: run tests finished 109 | harness.go:508: cleaning up 110 | harness.go:563: removing temp folder: "" 111 | --- PASS: kuttl (80.81s) 112 | --- PASS: kuttl/harness (0.00s) 113 | --- PASS: kuttl/harness/minimal-schedule (15.31s) 114 | --- PASS: kuttl/harness/label-selector-equality (78.02s) 115 | --- PASS: kuttl/harness/template-labels (78.02s) 116 | --- PASS: kuttl/harness/custom-snapclass (78.02s) 117 | --- PASS: kuttl/harness/multi-pvc (78.04s) 118 | --- PASS: kuttl/harness/label-selector-set (78.04s) 119 | PASS 120 | ``` 121 | 122 | ### Testing w/ OLM 123 | 124 | To test the deployment of SnapScheduler w/ OLM (i.e., using the bundle that will 125 | be consumed in OpenShift), it's necessary to build the bundle, bundle image, and 126 | a catalog image. 127 | 128 | Build and push the bundle and catalog images: 129 | 130 | - IMG: The operator image that will be referenced in the bundle 131 | - IMAGE_TAG_BASE: The base name for the bundle & catalog images (i.e., 132 | foo-bundle, foo-catalog) 133 | - CHANNELS: The list of channels that the bundle will belong to 134 | - DEFAULT_CHANNEL: The default channel when someone installs 135 | - VERSION: The bundle version number (likely the same as the operator version) 136 | 137 | ```console 138 | $ make bundle bundle-build bundle-push catalog-build catalog-push IMAGE_TAG_BASE=quay.io/johnstrunk/snapscheduler CHANNELS="candidate,stable" DEFAULT_CHANNEL=stable IMG=quay.io/backube/snapscheduler:latest VERSION=2.0.0 139 | ... 140 | ``` 141 | 142 | Create a kind cluster & start OLM on it: 143 | 144 | ```console 145 | $ hack/setup-kind-cluster.sh 146 | ... 147 | $ bin/operator-sdk olm install 148 | ... 149 | ``` 150 | 151 | Add the new catalog image to the cluster: 152 | 153 | ```console 154 | $ kubectl -n olm apply -f - < get snapshotschedules 44 | ``` 45 | 46 | ## Manual installation 47 | 48 | Manual installation consists of several steps: 49 | 50 | * Installing the CRD for snapshotschedules 51 | * Installing the operator. 52 | 53 | ### Install the CRD 54 | 55 | Prior to installing the operator, the CustomResourceDefinition for 56 | snapshotschedules needs to be added to the cluster. This operation only needs to 57 | be performed once per cluster, but it does require elevated permissions (to add 58 | the CRD). 59 | 60 | Install the CRD: 61 | 62 | ```console 63 | $ make install 64 | /home/jstrunk/src/backube/snapscheduler/bin/controller-gen "crd:trivialVersions=true,preserveUnknownFields=false" rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases 65 | cp config/crd/bases/* helm/snapscheduler/crds 66 | /home/jstrunk/src/backube/snapscheduler/bin/kustomize build config/crd | kubectl apply -f - 67 | customresourcedefinition.apiextensions.k8s.io/snapshotschedules.snapscheduler.backube configured 68 | ``` 69 | 70 | ### Install the operator 71 | 72 | Once the CRD has been added to the cluster, the operator can be installed. The 73 | operator will be installed into the `snapscheduler-system` namespace. 74 | 75 | ```console 76 | $ make deploy 77 | /home/jstrunk/src/backube/snapscheduler/bin/controller-gen "crd:trivialVersions=true,preserveUnknownFields=false" rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases 78 | cp config/crd/bases/* helm/snapscheduler/crds 79 | cd config/manager && /home/jstrunk/src/backube/snapscheduler/bin/kustomize edit set image controller=quay.io/backube/snapscheduler:latest 80 | /home/jstrunk/src/backube/snapscheduler/bin/kustomize build config/default | kubectl apply -f - 81 | namespace/snapscheduler-system created 82 | customresourcedefinition.apiextensions.k8s.io/snapshotschedules.snapscheduler.backube created 83 | serviceaccount/snapscheduler-controller-manager created 84 | role.rbac.authorization.k8s.io/snapscheduler-leader-election-role created 85 | clusterrole.rbac.authorization.k8s.io/snapscheduler-manager-role created 86 | clusterrole.rbac.authorization.k8s.io/snapscheduler-metrics-reader created 87 | clusterrole.rbac.authorization.k8s.io/snapscheduler-proxy-role created 88 | rolebinding.rbac.authorization.k8s.io/snapscheduler-leader-election-rolebinding created 89 | clusterrolebinding.rbac.authorization.k8s.io/snapscheduler-manager-rolebinding created 90 | clusterrolebinding.rbac.authorization.k8s.io/snapscheduler-proxy-rolebinding created 91 | configmap/snapscheduler-manager-config created 92 | service/snapscheduler-controller-manager-metrics-service created 93 | deployment.apps/snapscheduler-controller-manager created 94 | ``` 95 | 96 | Verify the operator starts: 97 | 98 | ```console 99 | $ kubectl -n snapscheduler-system get deployment/snapscheduler-controller-manager 100 | NAME READY UP-TO-DATE AVAILABLE AGE 101 | snapscheduler-controller-manager 1/1 1 1 4m15s 102 | ``` 103 | 104 | Once the operator is running, [continue on to usage](usage.md). 105 | -------------------------------------------------------------------------------- /docs/labeling.md: -------------------------------------------------------------------------------- 1 | # Labeling strategies 2 | 3 | PVCs are selected for snapshotting via labels. Each `snapshotschedule` has a 4 | label selector that is used to determine which PVCs are subject to the schedule. 5 | There are a number of different strategies that can be employed, several of 6 | which are described below. These are just suggestions to consider and can be 7 | customized as necessary to fit within a given environment. 8 | 9 | ## Application-centric labeling 10 | 11 | This labeling approach is best suited to situations where an application's data 12 | is tagged with a common set of labels (e.g., `app=myapp`), and a schedule is 13 | being defined specifically for that application. 14 | 15 | In this case, the application's label can be directly incorporated into a custom 16 | schedule for that application: 17 | 18 | ```yaml 19 | --- 20 | apiVersion: snapscheduler.backube/v1 21 | kind: SnapshotSchedule 22 | metadata: 23 | name: myapp 24 | spec: 25 | claimSelector: 26 | matchLabels: 27 | app: myapp 28 | # ...other fields omitted... 29 | ``` 30 | 31 | The main benefit to this method is that the application's manifests do not need 32 | to be modified. 33 | 34 | ## Schedule-centric labeling 35 | 36 | This labeling approach best for situations where it is desirable to have a 37 | standard set of schedules that can be used by different applications in an ad 38 | hoc manner. 39 | 40 | Schedules can be defined with their own unique label: 41 | 42 | ```yaml 43 | --- 44 | apiVersion: snapscheduler.backube/v1 45 | kind: SnapshotSchedule 46 | metadata: 47 | name: hourly 48 | spec: 49 | claimSelector: 50 | matchLabels: 51 | "schedule/hourly": "enabled" 52 | schedule: "@hourly" 53 | 54 | --- 55 | apiVersion: snapscheduler.backube/v1 56 | kind: SnapshotSchedule 57 | metadata: 58 | name: daily 59 | spec: 60 | claimSelector: 61 | matchLabels: 62 | "schedule/daily": "enabled" 63 | schedule: "@daily" 64 | ``` 65 | 66 | Individual PVCs can be tagged to use one or more of these standard schedules by 67 | including the appropriate label(s): 68 | 69 | ```yaml 70 | --- 71 | apiVersion: v1 72 | kind: PersistentVolumeClaim 73 | metadata: 74 | name: mydata 75 | labels: 76 | "schedule/hourly": "enabled" 77 | "schedule/daily": "enabled" 78 | spec: 79 | # ...omitted... 80 | ``` 81 | 82 | ## Service-level labeling 83 | 84 | Building on the above example, a class of service for snapshot-based protection 85 | could be defined. For example, it is possible to define a "gold" tier that 86 | provides: 87 | 88 | - 6 hourly 89 | - 7 daily 90 | - 4 weekly 91 | 92 | ```yaml 93 | --- 94 | apiVersion: snapscheduler.backube/v1 95 | kind: SnapshotSchedule 96 | metadata: 97 | name: gold-hourly 98 | spec: 99 | claimSelector: 100 | matchLabels: 101 | "snapshot-tier" "gold" 102 | retention: 103 | maxCount: 6 104 | schedule: "@hourly" 105 | 106 | --- 107 | apiVersion: snapscheduler.backube/v1 108 | kind: SnapshotSchedule 109 | metadata: 110 | name: gold-daily 111 | spec: 112 | claimSelector: 113 | matchLabels: 114 | "snapshot-tier" "gold" 115 | retention: 116 | maxCount: 7 117 | schedule: "@daily" 118 | 119 | --- 120 | apiVersion: snapscheduler.backube/v1 121 | kind: SnapshotSchedule 122 | metadata: 123 | name: gold-weekly 124 | spec: 125 | claimSelector: 126 | matchLabels: 127 | "snapshot-tier" "gold" 128 | retention: 129 | maxCount: 4 130 | schedule: "@weekly" 131 | ``` 132 | 133 | A PVC can then reference the snapshot tier: 134 | 135 | ```yaml 136 | --- 137 | apiVersion: v1 138 | kind: PersistentVolumeClaim 139 | metadata: 140 | name: mydata 141 | labels: 142 | "snapshot-tier": "gold" 143 | spec: 144 | # ...omitted... 145 | ``` 146 | -------------------------------------------------------------------------------- /docs/media/snapscheduler_repocard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/backube/snapscheduler/a2da66a642f5a04ff31635c121dda514d489128f/docs/media/snapscheduler_repocard.png -------------------------------------------------------------------------------- /docs/roadmap.md: -------------------------------------------------------------------------------- 1 | # Project tracking/roadmap 2 | 3 | Project tracking is handled via the standard GitHub Issue/PR workflow. To help 4 | with prioritization of issues, open items are tracked on the main [Work items 5 | project board](https://github.com/backube/snapscheduler/projects/1). 6 | 7 | The project board provides Kanban-style tracking of ongoing and planned work, 8 | with [project-bot](https://github.com/apps/project-bot) automating the movement 9 | of cards as they progress. 10 | 11 | ## Longer-term items 12 | 13 | Longer-term roadmap items may not yet be tracked on the project board. Such 14 | items include: 15 | 16 | ### Cluster-wide schedule definitions 17 | 18 | This would introduce a the notion of a cluster-scoped snapshot schedule that 19 | could be defined by an admin and made available to all users. For example, 20 | cluster-wide standard schedules like hourly, daily, and weekly could be 21 | pre-defined. 22 | 23 | ### StorageClass-based schedules 24 | 25 | Currently, schedules are applied at the PVC level. However, a StorageClass is 26 | the abstraction where a particular level of storage should be defined. The 27 | "level" of storage should not only be items like performance and reliability but 28 | also data protection, including snapshot policies. 29 | 30 | This item would allow allow schedules to be associated with a particular 31 | StorageClass, and all PVC that derive from that class would be snapshotted 32 | according to the associated schedule. 33 | 34 | *The loose coupling of PVC to SC as well as the structure of the underlying etcd 35 | database make this a non-straightforward extension.* 36 | -------------------------------------------------------------------------------- /docs/usage.md: -------------------------------------------------------------------------------- 1 | # Using the scheduler 2 | 3 | The scheduler should already be running in the cluster. If not, go back to 4 | [installation](install.md). 5 | 6 | ## Creating schedules 7 | 8 | A snapshot schedule defines: 9 | 10 | * A [cron-like schedule](https://en.wikipedia.org/wiki/Cron#Overview) for taking 11 | snapshots 12 | * The set of PVCs that will be selected to snapshot 13 | * The retention policy for the snapshots 14 | 15 | ### Example schedule 16 | 17 | Below is an example snapshot schedule to perform hourly snapshots: 18 | 19 | ```yaml 20 | --- 21 | apiVersion: snapscheduler.backube/v1 22 | kind: SnapshotSchedule 23 | metadata: 24 | # The name for this schedule. It is also used as a part 25 | # of the template for naming the snapshots. 26 | name: hourly 27 | # Schedules are namespaced objects 28 | namespace: myns 29 | spec: 30 | # A LabelSelector to control which PVCs should be snapshotted 31 | claimSelector: # optional 32 | # Set to true to make the schedule inactive 33 | disabled: false # optional 34 | retention: 35 | # The length of time a given snapshot should be 36 | # retained, specified in hours. (168h = 1 week) 37 | expires: "168h" # optional 38 | # The maximum number of snapshots per PVC to keep 39 | maxCount: 10 # optional 40 | # The cronspec (https://en.wikipedia.org/wiki/Cron#Overview) 41 | # that defines the schedule. It is interpreted with 42 | # respect to the UTC timezone. The following pre-defined 43 | # shortcuts are also supported: @hourly, @daily, @weekly, 44 | # @monthly, and @yearly 45 | schedule: "0 * * * *" 46 | snapshotTemplate: 47 | # A set of labels can be added to each 48 | # VolumeSnapshot object 49 | labels: # optional 50 | mylabel: myvalue 51 | # The SnapshotClassName to use when creating the 52 | # snapshots. If omitted, the cluster default will 53 | # be used. 54 | snapshotClassName: ebs-csi # optional 55 | ``` 56 | 57 | ### Schedule cronspec 58 | 59 | The `spec.schedule` defines when snapshots are to be taken. This field follows 60 | the standard 5-tuple [cronspec 61 | format](https://en.wikipedia.org/wiki/Cron#Overview). While the schedule 62 | shortcuts of `@hourly`, `@daily`, `@weekly`, `@monthly`, and `@yearly` are 63 | supported, they are not recommended due to the potential for many snapshots to 64 | be triggered simultaneously. A better approach would be to choose a unique time 65 | within the range to trigger the schedule. For example, `@hourly` is equivalent 66 | to `0 * * * *`. This means that all schedules using `@hourly` would trigger at 67 | the same time, potentially causing excessive load on the cluster or storage 68 | system. A better approach would be to take snapshots at a randomly chosen time 69 | with the same frequency, such as 17 minutes after the hour: `17 * * * *`. By 70 | choosing different offsets for schedules, the load can be spread throughout the 71 | hour frequency. 72 | 73 | Dates and times specified in the cronspec are relative to the UTC timezone 74 | (i.e., a schedule of `"0 5 * * *"` will create a snapshot once per day at 5:00 75 | AM UTC). 76 | 77 | ### Snapshot retention 78 | 79 | The `spec.retention` field permits specifying how long a snapshot should be 80 | retained. The retention can be specified either as a duration or as a maximum 81 | number of snapshots (per PVC) to keep. If both are specified, the snapshot will 82 | be deleted according to the more restrictive of the two. For example, the hourly 83 | schedule shown, above, will keep a maximum of 10 snapshots since that is more 84 | restrictive than 168 hours since new snapshots are taken hourly. 85 | 86 | ### Selecting PVCs 87 | 88 | The `spec.claimSelector` is an optional field can be used to limit which PVCs 89 | are snapshotted according to the schedule. This field is a 90 | `metav1.LabelSelector`. Please see the kubernetes documentation on [label 91 | selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors) 92 | for a full explanation. 93 | 94 | The claim selector supports both the simple `matchLabels` as well as the newer 95 | set-based matching. For example, to use matchLabels, include the following: 96 | 97 | ```yaml 98 | spec: 99 | claimSelector: 100 | matchLabels: 101 | thislabel: that 102 | ``` 103 | 104 | Including the above in the schedule would limit the schedule to only PVCs that 105 | carry a label of `thislabel: that` in their `metadata.labels` list. 106 | 107 | ## Viewing schedules 108 | 109 | The existing schedules can be viewed by: 110 | 111 | ```console 112 | $ kubectl -n myns get snapshotschedules 113 | NAME SCHEDULE MAX AGE MAX NUM DISABLED NEXT SNAPSHOT 114 | hourly 0 * * * * 168h 10 2019-11-01T20:00:00Z 115 | ``` 116 | 117 | ## Snapshots 118 | 119 | The snapshots that are created by a schedule are named by the following 120 | template: `--` 121 | 122 | The example, below, shows two snapshots of the PVC named `data` which were taken 123 | by the `hourly` schedule. The time of these two snapshots is visible in the 124 | `YYYYMMDDHHMM` format, UTC timezone. 125 | 126 | ```console 127 | $ kubectl -n myns get volumesnapshots 128 | NAME AGE 129 | data-hourly-201911011900 82m 130 | data-hourly-201911012000 22m 131 | ``` 132 | 133 | ## Quotas & Limiting resource usage 134 | 135 | Schedules that lack a retention policy can create a potentially unbounded number 136 | of snapshots. This has the potential to exhaust the underlying storage system or 137 | to overwhelm the Kubernetes etcd store with objects. To prevent this, it is 138 | suggested that ResourceQuotas be used to limit the total number of snapshots 139 | that can be created in a namespace. 140 | 141 | [Object count 142 | ouotas](https://kubernetes.io/docs/concepts/policy/resource-quotas/#object-count-quota) 143 | are the recommended way to limit Snapshot resources. For example, the following 144 | ResourceQuota object will limit the maximum number of snapshots in the specified 145 | namespace to 50: 146 | 147 | ```yaml 148 | apiVersion: v1 149 | kind: ResourceQuota 150 | metadata: 151 | name: snapshots 152 | namespace: default 153 | spec: 154 | hard: 155 | count/volumesnapshots.snapshot.storage.k8s.io: "50" 156 | ``` 157 | 158 | Remember to leave sufficient headroom for old snapshots to be expired 159 | asynchronously as well as for other snapshot use-cases. 160 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/backube/snapscheduler 2 | 3 | go 1.24.0 4 | 5 | toolchain go1.24.3 6 | 7 | require ( 8 | github.com/go-logr/logr v1.4.3 9 | github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0 10 | github.com/onsi/ginkgo/v2 v2.23.4 11 | github.com/onsi/gomega v1.37.0 12 | github.com/robfig/cron/v3 v3.0.1 13 | go.uber.org/zap v1.27.0 14 | k8s.io/api v0.33.1 15 | k8s.io/apimachinery v0.33.1 16 | k8s.io/client-go v0.33.1 17 | k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979 18 | sigs.k8s.io/controller-runtime v0.21.0 19 | ) 20 | 21 | require ( 22 | github.com/beorn7/perks v1.0.1 // indirect 23 | github.com/blang/semver/v4 v4.0.0 // indirect 24 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 25 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 26 | github.com/emicklei/go-restful/v3 v3.11.0 // indirect 27 | github.com/evanphx/json-patch/v5 v5.9.11 // indirect 28 | github.com/fsnotify/fsnotify v1.7.0 // indirect 29 | github.com/fxamacker/cbor/v2 v2.7.0 // indirect 30 | github.com/go-logr/zapr v1.3.0 // indirect 31 | github.com/go-openapi/jsonpointer v0.21.0 // indirect 32 | github.com/go-openapi/jsonreference v0.20.2 // indirect 33 | github.com/go-openapi/swag v0.23.0 // indirect 34 | github.com/go-task/slim-sprig/v3 v3.0.0 // indirect 35 | github.com/gogo/protobuf v1.3.2 // indirect 36 | github.com/google/btree v1.1.3 // indirect 37 | github.com/google/gnostic-models v0.6.9 // indirect 38 | github.com/google/go-cmp v0.7.0 // indirect 39 | github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect 40 | github.com/google/uuid v1.6.0 // indirect 41 | github.com/josharian/intern v1.0.0 // indirect 42 | github.com/json-iterator/go v1.1.12 // indirect 43 | github.com/mailru/easyjson v0.7.7 // indirect 44 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 45 | github.com/modern-go/reflect2 v1.0.2 // indirect 46 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 47 | github.com/pkg/errors v0.9.1 // indirect 48 | github.com/prometheus/client_golang v1.22.0 // indirect 49 | github.com/prometheus/client_model v0.6.1 // indirect 50 | github.com/prometheus/common v0.62.0 // indirect 51 | github.com/prometheus/procfs v0.15.1 // indirect 52 | github.com/spf13/pflag v1.0.5 // indirect 53 | github.com/x448/float16 v0.8.4 // indirect 54 | go.uber.org/automaxprocs v1.6.0 // indirect 55 | go.uber.org/multierr v1.11.0 // indirect 56 | golang.org/x/net v0.38.0 // indirect 57 | golang.org/x/oauth2 v0.27.0 // indirect 58 | golang.org/x/sync v0.12.0 // indirect 59 | golang.org/x/sys v0.32.0 // indirect 60 | golang.org/x/term v0.30.0 // indirect 61 | golang.org/x/text v0.23.0 // indirect 62 | golang.org/x/time v0.9.0 // indirect 63 | golang.org/x/tools v0.31.0 // indirect 64 | gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect 65 | google.golang.org/protobuf v1.36.5 // indirect 66 | gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect 67 | gopkg.in/inf.v0 v0.9.1 // indirect 68 | gopkg.in/yaml.v3 v3.0.1 // indirect 69 | k8s.io/apiextensions-apiserver v0.33.0 // indirect 70 | k8s.io/klog/v2 v2.130.1 // indirect 71 | k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect 72 | sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect 73 | sigs.k8s.io/randfill v1.0.0 // indirect 74 | sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect 75 | sigs.k8s.io/yaml v1.4.0 // indirect 76 | ) 77 | -------------------------------------------------------------------------------- /hack/crds/snapshot.storage.k8s.io_volumesnapshotclasses.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | controller-gen.kubebuilder.io/version: v0.8.0 7 | api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665" 8 | creationTimestamp: null 9 | name: volumesnapshotclasses.snapshot.storage.k8s.io 10 | spec: 11 | group: snapshot.storage.k8s.io 12 | names: 13 | kind: VolumeSnapshotClass 14 | listKind: VolumeSnapshotClassList 15 | plural: volumesnapshotclasses 16 | shortNames: 17 | - vsclass 18 | - vsclasses 19 | singular: volumesnapshotclass 20 | scope: Cluster 21 | versions: 22 | - additionalPrinterColumns: 23 | - jsonPath: .driver 24 | name: Driver 25 | type: string 26 | - description: Determines whether a VolumeSnapshotContent created through the 27 | VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. 28 | jsonPath: .deletionPolicy 29 | name: DeletionPolicy 30 | type: string 31 | - jsonPath: .metadata.creationTimestamp 32 | name: Age 33 | type: date 34 | name: v1 35 | schema: 36 | openAPIV3Schema: 37 | description: VolumeSnapshotClass specifies parameters that a underlying storage 38 | system uses when creating a volume snapshot. A specific VolumeSnapshotClass 39 | is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses 40 | are non-namespaced 41 | properties: 42 | apiVersion: 43 | description: 'APIVersion defines the versioned schema of this representation 44 | of an object. Servers should convert recognized schemas to the latest 45 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 46 | type: string 47 | deletionPolicy: 48 | description: deletionPolicy determines whether a VolumeSnapshotContent 49 | created through the VolumeSnapshotClass should be deleted when its bound 50 | VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". 51 | "Retain" means that the VolumeSnapshotContent and its physical snapshot 52 | on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent 53 | and its physical snapshot on underlying storage system are deleted. 54 | Required. 55 | enum: 56 | - Delete 57 | - Retain 58 | type: string 59 | driver: 60 | description: driver is the name of the storage driver that handles this 61 | VolumeSnapshotClass. Required. 62 | type: string 63 | kind: 64 | description: 'Kind is a string value representing the REST resource this 65 | object represents. Servers may infer this from the endpoint the client 66 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 67 | type: string 68 | parameters: 69 | additionalProperties: 70 | type: string 71 | description: parameters is a key-value map with storage driver specific 72 | parameters for creating snapshots. These values are opaque to Kubernetes. 73 | type: object 74 | required: 75 | - deletionPolicy 76 | - driver 77 | type: object 78 | served: true 79 | storage: true 80 | subresources: {} 81 | status: 82 | acceptedNames: 83 | kind: "" 84 | plural: "" 85 | conditions: [] 86 | storedVersions: [] 87 | -------------------------------------------------------------------------------- /hack/run-in-kind.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | set -e -o pipefail 4 | 5 | # cd to top dir 6 | scriptdir="$(dirname "$(realpath "$0")")" 7 | cd "$scriptdir/.." 8 | 9 | make docker-build 10 | 11 | KIND_TAG=local-build 12 | IMAGE="quay.io/backube/snapscheduler" 13 | 14 | docker tag "${IMAGE}:latest" "${IMAGE}:${KIND_TAG}" 15 | kind load docker-image "${IMAGE}:${KIND_TAG}" 16 | 17 | docker pull busybox 18 | kind load docker-image busybox 19 | 20 | helm upgrade --install --create-namespace -n backube-snapscheduler \ 21 | --debug \ 22 | --set image.tagOverride=${KIND_TAG} \ 23 | --set metrics.disableAuth=true \ 24 | --wait --timeout=5m \ 25 | snapscheduler ./helm/snapscheduler 26 | -------------------------------------------------------------------------------- /hack/test-full.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | set -e -o pipefail 4 | 5 | SCRIPT_DIR="$(dirname "$(realpath "$0")")" 6 | 7 | START_TIME="$(date +%s)" 8 | 9 | # Ensure all utilities are installed/built fresh 10 | rm -f "${SCRIPT_DIR}/../bin/*" 11 | 12 | # Setup test cluster 13 | "${SCRIPT_DIR}/setup-kind-cluster.sh" 14 | 15 | CLUSTER_SETUP_DONE="$(date +%s)" 16 | 17 | # Start operator 18 | "${SCRIPT_DIR}/run-in-kind.sh" 19 | 20 | OPERATOR_SETUP_DONE="$(date +%s)" 21 | 22 | # Run all the tests 23 | make -C "${SCRIPT_DIR}/.." test test-e2e 24 | 25 | TESTS_DONE="$(date +%s)" 26 | 27 | kind delete cluster 28 | 29 | cat - <- 7 | An operator to take scheduled snapshots of Kubernetes persistent volumes 8 | type: application 9 | # Adding "-0" at the end of the version string permits pre-release kube versions 10 | # to match. See https://github.com/helm/helm/issues/6190 11 | kubeVersion: "^1.20.0-0" 12 | keywords: 13 | - csi 14 | - scheduler 15 | - snapshot 16 | - storage 17 | home: https://backube.github.io/snapscheduler/ 18 | sources: 19 | - https://github.com/backube/snapscheduler 20 | maintainers: 21 | - name: John Strunk 22 | email: jstrunk@redhat.com 23 | url: https://github.com/JohnStrunk 24 | icon: https://raw.githubusercontent.com/backube/snapscheduler/master/docs/media/snapscheduler.svg?sanitize=true 25 | # This is the version number of the application being deployed. 26 | appVersion: "3.5.0" 27 | annotations: 28 | artifacthub.io/category: storage 29 | # https://artifacthub.io/docs/topics/annotations/helm/ 30 | # Changelog for current chart & app version 31 | # Supported kinds: added, changed, deprecated, removed, fixed, security 32 | artifacthub.io/changes: | 33 | - kind: added 34 | description: Ability to enable setting OwnerReferences on snapshots 35 | - kind: security 36 | description: Dependency updates 37 | artifacthub.io/containsSecurityUpdates: "true" 38 | artifacthub.io/crds: | 39 | - kind: SnapshotSchedule 40 | version: v1 41 | name: snapshotschedule.snapscheduler.backube 42 | displayName: Snapshot schedule 43 | description: Defines a schedule for automated volume snapshots 44 | artifacthub.io/crdsExamples: | 45 | - apiVersion: v1 46 | kind: SnapshotSchedule 47 | metadata: 48 | name: daily 49 | spec: 50 | retention: 51 | maxCount: 7 52 | schedule: "0 0 * * *" 53 | - apiVersion: v1 54 | kind: SnapshotSchedule 55 | metadata: 56 | name: weekly 57 | spec: 58 | retention: 59 | maxCount: 4 60 | schedule: "0 0 * * 0" 61 | artifacthub.io/license: AGPL-3.0-or-later 62 | artifacthub.io/operator: "true" 63 | artifacthub.io/operatorCapabilities: Seamless Upgrades 64 | artifacthub.io/signKey: | 65 | fingerprint: 74FC7E7EFD0FF009513CC9E39360133DBD1FF508 66 | url: https://keys.openpgp.org/vks/v1/by-fingerprint/74FC7E7EFD0FF009513CC9E39360133DBD1FF508 67 | -------------------------------------------------------------------------------- /helm/snapscheduler/README.md: -------------------------------------------------------------------------------- 1 | # SnapScheduler 2 | 3 | The SnapScheduler operator takes snapshots of Kubernetes CSI-based persistent 4 | volumes according to user-supplied schedules. 5 | 6 | ## About this operator 7 | 8 | The SnapScheduler operator takes snapshots of CSI-based PersistentVolumes 9 | according to a configurable 10 | [Cron-like](https://en.wikipedia.org/wiki/Cron#Overview) schedule. The schedules 11 | include configurable retention policies for snapshots as well as selectors to 12 | limit the volumes that are snapshotted. An example schedule could be: 13 | 14 | > *Snapshot **all volumes** in a namespace **daily at midnight**, retaining the 15 | > most recent **7** snapshots for each volume.* 16 | 17 | Multiple schedules can be combined to provide more elaborate protection schemes. 18 | For example, a given volume (or collection of volumes) could be protected with: 19 | 20 | - 6 hourly snapshots 21 | - 7 daily snapshots 22 | - 4 weekly snapshots 23 | - 12 monthly snapshots 24 | 25 | ### How it works 26 | 27 | The operator watches for `SnapshotSchedule` CRs in each namespace. When the 28 | current time matches the schedule's cronspec, the operator creates a 29 | `VolumeSnapshot` object for each `PersistentVolumeClaim` in the namespace (or 30 | subset thereof if a label selector is provided). The `VolumeSnapshot` objects 31 | are named according to the template: `--`. 32 | After creating the new snapshots, the oldest snapshots are removed if necessary, 33 | according to the retention policy of the schedule. 34 | 35 | Please see the [full documentation](https://backube.github.io/snapscheduler/) 36 | for more information. 37 | 38 | ## Requirements 39 | 40 | - Kubernetes >= 1.20 41 | - CSI-based storage driver that supports snapshots (i.e. has the 42 | `CREATE_DELETE_SNAPSHOT` capability) 43 | 44 | ## Installation 45 | 46 | The snapscheduler operator is a "cluster-level" operator. A single instance will 47 | watch `snapshotschedules` across all namespaces in the cluster. **Running more 48 | than one instance of the scheduler at a time is not supported.** 49 | 50 | ```console 51 | $ kubectl create ns backube-snapscheduler 52 | namespace/backube-snapscheduler created 53 | 54 | $ helm install --namespace backube-snapscheduler snapscheduler backube/snapscheduler 55 | NAME: snapscheduler 56 | LAST DEPLOYED: Mon Nov 25 17:38:26 2019 57 | NAMESPACE: backube-snapscheduler 58 | STATUS: deployed 59 | REVISION: 1 60 | TEST SUITE: None 61 | NOTES: 62 | Thank you for installing snapscheduler! 63 | 64 | The snapscheduler operator is now installed in the backube-snapscheduler 65 | namespace, and snapshotschedules should be enabled cluster-wide. 66 | 67 | See https://backube.github.io/snapscheduler/usage.html to get started. 68 | 69 | Schedules can be viewed via: 70 | $ kubectl -n get snapshotschedules 71 | ... 72 | ``` 73 | 74 | ### ⚠️ Upgrade notice... ⚠️ 75 | 76 | If upgrading from v3.1.0 or earlier, manual steps are required. When upgrading 77 | from versions 3.1.0 and earlier, the `helm upgrade ...` command will fail with 78 | the following error: 79 | 80 | ```console 81 | Error: UPGRADE FAILED: rendered manifests contain a resource that already 82 | exists. Unable to continue with update: CustomResourceDefinition 83 | "snapshotschedules.snapscheduler.backube" in namespace "" exists and cannot be 84 | imported into the current release: invalid ownership metadata; label validation 85 | error: missing key "app.kubernetes.io/managed-by": must be set to "Helm"; 86 | annotation validation error: missing key "meta.helm.sh/release-name": must be 87 | set to "snapscheduler"; annotation validation error: missing key 88 | "meta.helm.sh/release-namespace": must be set to "backube-snapscheduler" 89 | ``` 90 | 91 | The above error can be fixed by adding the required labels and annotations as 92 | mentioned in the error message: 93 | 94 | ```console 95 | $ kubectl label crd/snapshotschedules.snapscheduler.backube app.kubernetes.io/managed-by=Helm 96 | customresourcedefinition.apiextensions.k8s.io/snapshotschedules.snapscheduler.backube labeled 97 | 98 | $ kubectl annotate crd/snapshotschedules.snapscheduler.backube meta.helm.sh/release-name=snapscheduler 99 | customresourcedefinition.apiextensions.k8s.io/snapshotschedules.snapscheduler.backube annotated 100 | 101 | $ kubectl annotate crd/snapshotschedules.snapscheduler.backube meta.helm.sh/release-namespace=backube-snapscheduler 102 | customresourcedefinition.apiextensions.k8s.io/snapshotschedules.snapscheduler.backube annotated 103 | ``` 104 | 105 | ## Examples 106 | 107 | The schedule for snapshotting is controlled by the 108 | `snapshotschedules.snapscheduler.backube` Custom Resource. This is a namespaced 109 | resource that applies only to the PersistentVolumeClaims in its namespace. 110 | 111 | Below is a simple example that keeps 7 daily (taken at midnight) snapshots of 112 | all PVCs in a given namespace: 113 | 114 | ```yaml 115 | --- 116 | apiVersion: snapscheduler.backube/v1 117 | kind: SnapshotSchedule 118 | metadata: 119 | name: daily 120 | spec: 121 | retention: 122 | maxCount: 7 123 | schedule: "0 0 * * *" 124 | ``` 125 | 126 | See the [usage 127 | documentation](https://backube.github.io/snapscheduler/usage.html) for full 128 | details, including how to: 129 | 130 | - add label selectors to restrict which PVCs this schedule applies to 131 | - set the VolumeSnapshotClass used by the schedule 132 | - apply custom labels to the automatically created VolumeSnapshot objects 133 | 134 | ## Configuration 135 | 136 | The following optional parameters in the chart can be configured, either by 137 | using `--set` on the command line or via a `values.yaml` file. In the general 138 | case, the defaults, shown below, should be sufficient. 139 | 140 | - `replicaCount`: `1` 141 | - The number of replicas of the operator to run. Only one is active at a time 142 | via leader election. 143 | - `image.repository`: `quay.io/backube/snapscheduler` 144 | - The location of the operator container image 145 | - `image.image`: `""` 146 | - If set, it will override the `.repository` and `.tagOverride` fields to 147 | allow specifying a specific container and SHA to deploy 148 | - `image.tagOverride`: `""` 149 | - If set, it will override the operator container image tag. The default tag 150 | is set per chart version and can be viewed (as `appVersion`) via `helm show 151 | chart`. 152 | - `image.pullPolicy`: `IfNotPresent` 153 | - Overrides the container image pull policy 154 | - `imagePullSecrets`: none 155 | - May be set if pull secret(s) are needed to retrieve the operator image 156 | - `manageCRDs`: `true` 157 | - Whether the chart should automatically install, upgrade, or remove the 158 | SnapshotSchedule CRD 159 | - `enableOwnerReferences`: `false` 160 | - If set to `true`, owner references will be added to the VolumeSnapshot 161 | objects created by the operator. 162 | - `rbacProxy.image.repository`: `quay.io/brancz/kube-rbac-proxy` 163 | - Specifies the container image used for the RBAC proxy 164 | - `rbacProxy.image.tag`: (see values file for default tag) 165 | - Specifies the tag for the RBAC proxy container image 166 | - `rbacProxy.image.image`: `""` 167 | - If set, it will override the `.repository` and `.tag` fields to 168 | allow specifying a specific container and SHA to deploy 169 | - `rbacProxy.image.pullPolicy`: `IfNotPresent` 170 | - Specifies the RBAC proxy container image pull policy 171 | - `rbacProxy.resources`: requests for 10m CPU and 100Mi memory; no limits 172 | - Allows overriding the resource requests/limits for the kube-rbac-proxy 173 | container of the operator pod. 174 | - `serviceAccount.create`: `true` 175 | - Whether to create the ServiceAccount for the operator 176 | - `serviceAccount.name`: none 177 | - Override the name of the operator's ServiceAccount 178 | - `podSecurityContext`: none 179 | - Allows setting the security context for the operator pod 180 | - `securityContext`: none 181 | - Allows setting the operator container's security context 182 | - `resources`: requests for 10m CPU and 100Mi memory; no limits 183 | - Allows overriding the resource requests/limits for the manager 184 | container of the operator pod. 185 | - `nodeSelector`: `kubernetes.io/arch: amd64`, `kubernetes.io/os: linux` 186 | - Allows applying a node selector to the operator pod 187 | - `tolerations`: none 188 | - Allows applying tolerations to the operator pod 189 | - `topologySpreadConstraints`: none 190 | - Topology spread constraints rely on node labels to identify the 191 | topology domain(s) that each Node is in. 192 | - `affinity`: node-level anti-affinity 193 | - Allows setting the operator pod's affinity 194 | - `podLabels`: `{}` 195 | - map of additional labels to add to pods 196 | - `podAnnotations`: `{}` 197 | - map of additional annotations to add to pods 198 | -------------------------------------------------------------------------------- /helm/snapscheduler/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | Thank you for installing SnapScheduler! 2 | 3 | Chart version: {{ .Chart.Version }} 4 | SnapScheduler version: {{ default .Chart.AppVersion .Values.image.tagOverride }} 5 | 6 | The SnapScheduler operator is now installed in the {{ .Release.Namespace }} 7 | namespace, and snapshotschedules should be enabled cluster-wide. 8 | 9 | See https://backube.github.io/snapscheduler/usage.html to get started. 10 | 11 | Schedules can be viewed via: 12 | $ kubectl -n get snapshotschedules 13 | -------------------------------------------------------------------------------- /helm/snapscheduler/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "snapscheduler.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "snapscheduler.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "snapscheduler.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | 34 | {{/* 35 | Common labels 36 | */}} 37 | {{- define "snapscheduler.labels" -}} 38 | helm.sh/chart: {{ include "snapscheduler.chart" . }} 39 | {{ include "snapscheduler.selectorLabels" . }} 40 | {{- if .Chart.AppVersion }} 41 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 42 | {{- end }} 43 | app.kubernetes.io/managed-by: {{ .Release.Service }} 44 | {{- end -}} 45 | 46 | {{/* 47 | Selector labels 48 | */}} 49 | {{- define "snapscheduler.selectorLabels" -}} 50 | app.kubernetes.io/name: {{ include "snapscheduler.name" . }} 51 | app.kubernetes.io/instance: {{ .Release.Name }} 52 | {{- end -}} 53 | 54 | {{/* 55 | Create the name of the service account to use 56 | */}} 57 | {{- define "snapscheduler.serviceAccountName" -}} 58 | {{- if .Values.serviceAccount.create -}} 59 | {{ default (include "snapscheduler.fullname" .) .Values.serviceAccount.name }} 60 | {{- else -}} 61 | {{ default "default" .Values.serviceAccount.name }} 62 | {{- end -}} 63 | {{- end -}} 64 | 65 | {{- define "snapscheduler.image" -}} 66 | {{- with .Values.image }} 67 | {{- if .image -}} 68 | {{ .image }} 69 | {{- else -}} 70 | {{ .repository }}:{{ default $.Chart.AppVersion .tagOverride }} 71 | {{- end -}} 72 | {{- end -}} 73 | {{- end }} 74 | 75 | {{- define "rbacproxy.image" -}} 76 | {{- with .Values.rbacProxy.image }} 77 | {{- if .image -}} 78 | {{ .image }} 79 | {{- else -}} 80 | {{ .repository }}:{{ .tag }} 81 | {{- end -}} 82 | {{- end -}} 83 | {{- end }} 84 | -------------------------------------------------------------------------------- /helm/snapscheduler/templates/clusterrole-metrics-reader.yaml: -------------------------------------------------------------------------------- 1 | {{- if not .Values.metrics.disableAuth }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: {{ include "snapscheduler.fullname" . }}-metrics-reader 6 | labels: 7 | {{- include "snapscheduler.labels" . | nindent 4 }} 8 | rules: 9 | - nonResourceURLs: 10 | - /metrics 11 | verbs: 12 | - get 13 | {{- end }} 14 | -------------------------------------------------------------------------------- /helm/snapscheduler/templates/clusterrole-proxy.yaml: -------------------------------------------------------------------------------- 1 | {{- if not .Values.metrics.disableAuth }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: {{ include "snapscheduler.fullname" . }}-proxy 6 | labels: 7 | {{- include "snapscheduler.labels" . | nindent 4 }} 8 | rules: 9 | - apiGroups: 10 | - authentication.k8s.io 11 | resources: 12 | - tokenreviews 13 | verbs: 14 | - create 15 | - apiGroups: 16 | - authorization.k8s.io 17 | resources: 18 | - subjectaccessreviews 19 | verbs: 20 | - create 21 | {{- end }} 22 | -------------------------------------------------------------------------------- /helm/snapscheduler/templates/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: {{ include "snapscheduler.fullname" . }} 6 | labels: 7 | {{- include "snapscheduler.labels" . | nindent 4 }} 8 | rules: 9 | - apiGroups: 10 | - "" 11 | resources: 12 | - persistentvolumeclaims 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | - apiGroups: 18 | - snapscheduler.backube 19 | resources: 20 | - snapshotschedules 21 | verbs: 22 | - create 23 | - delete 24 | - get 25 | - list 26 | - patch 27 | - update 28 | - watch 29 | - apiGroups: 30 | - snapscheduler.backube 31 | resources: 32 | - snapshotschedules/finalizers 33 | verbs: 34 | - update 35 | - apiGroups: 36 | - snapscheduler.backube 37 | resources: 38 | - snapshotschedules/status 39 | verbs: 40 | - get 41 | - patch 42 | - update 43 | - apiGroups: 44 | - snapshot.storage.k8s.io 45 | resources: 46 | - volumesnapshots 47 | verbs: 48 | - create 49 | - delete 50 | - get 51 | - list 52 | - patch 53 | - update 54 | - watch 55 | -------------------------------------------------------------------------------- /helm/snapscheduler/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ include "snapscheduler.fullname" . }} 5 | namespace: {{ .Release.Namespace }} 6 | labels: 7 | {{- include "snapscheduler.labels" . | nindent 4 }} 8 | spec: 9 | replicas: {{ .Values.replicaCount }} 10 | selector: 11 | matchLabels: 12 | {{- include "snapscheduler.selectorLabels" . | nindent 6 }} 13 | template: 14 | metadata: 15 | labels: 16 | backube/snapscheduler-affinity: manager 17 | {{- include "snapscheduler.selectorLabels" . | nindent 8 }} 18 | {{- with .Values.podLabels }} 19 | {{- toYaml . | nindent 8 }} 20 | {{- end }} 21 | {{- with .Values.podAnnotations }} 22 | annotations: 23 | {{- toYaml . | nindent 8 }} 24 | {{- end }} 25 | spec: 26 | {{- with .Values.imagePullSecrets }} 27 | imagePullSecrets: 28 | {{- toYaml . | nindent 8 }} 29 | {{- end }} 30 | serviceAccountName: {{ include "snapscheduler.serviceAccountName" . }} 31 | securityContext: 32 | {{- toYaml .Values.podSecurityContext | nindent 8 }} 33 | containers: 34 | - args: 35 | - --secure-listen-address=0.0.0.0:8443 36 | - --upstream=http://127.0.0.1:8080/ 37 | - --logtostderr=true 38 | - --v=0 39 | {{- if .Values.metrics.disableAuth }} 40 | - --ignore-paths=/metrics 41 | {{- end }} 42 | image: {{ include "rbacproxy.image" . }} 43 | imagePullPolicy: {{ .Values.rbacProxy.image.pullPolicy }} 44 | name: kube-rbac-proxy 45 | ports: 46 | - containerPort: 8443 47 | name: https 48 | resources: 49 | {{- toYaml .Values.rbacProxy.resources | nindent 12 }} 50 | securityContext: 51 | {{- toYaml .Values.securityContext | nindent 12 }} 52 | - args: 53 | - --health-probe-bind-address=:8081 54 | - --metrics-bind-address=127.0.0.1:8080 55 | - --leader-elect 56 | {{- if .Values.enableOwnerReferences }} 57 | - --enable-owner-references 58 | {{- end }} 59 | command: 60 | - /manager 61 | image: {{ include "snapscheduler.image" . }} 62 | imagePullPolicy: {{ .Values.image.pullPolicy }} 63 | livenessProbe: 64 | httpGet: 65 | path: /healthz 66 | port: 8081 67 | initialDelaySeconds: 15 68 | periodSeconds: 20 69 | name: manager 70 | readinessProbe: 71 | httpGet: 72 | path: /readyz 73 | port: 8081 74 | initialDelaySeconds: 5 75 | periodSeconds: 10 76 | resources: 77 | {{- toYaml .Values.resources | nindent 12 }} 78 | securityContext: 79 | {{- toYaml .Values.securityContext | nindent 12 }} 80 | {{- with .Values.nodeSelector }} 81 | nodeSelector: 82 | {{- toYaml . | nindent 8 }} 83 | {{- end }} 84 | {{- with .Values.affinity }} 85 | affinity: 86 | {{- toYaml . | nindent 8 }} 87 | {{- end }} 88 | {{- with .Values.tolerations }} 89 | tolerations: 90 | {{- toYaml . | nindent 8 }} 91 | {{- end }} 92 | {{- with .Values.topologySpreadConstraints }} 93 | topologySpreadConstraints: 94 | {{- toYaml . | nindent 8 }} 95 | {{- end }} 96 | terminationGracePeriodSeconds: 10 97 | {{- if .Values.priorityClassName }} 98 | priorityClassName: {{ .Values.priorityClassName | quote }} 99 | {{- end }} 100 | -------------------------------------------------------------------------------- /helm/snapscheduler/templates/role-leader-election.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: {{ include "snapscheduler.fullname" . }}-leader-election 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{- include "snapscheduler.labels" . | nindent 4 }} 9 | rules: 10 | - apiGroups: 11 | - "" 12 | resources: 13 | - configmaps 14 | verbs: 15 | - get 16 | - list 17 | - watch 18 | - create 19 | - update 20 | - patch 21 | - delete 22 | - apiGroups: 23 | - coordination.k8s.io 24 | resources: 25 | - leases 26 | verbs: 27 | - get 28 | - list 29 | - watch 30 | - create 31 | - update 32 | - patch 33 | - delete 34 | - apiGroups: 35 | - "" 36 | resources: 37 | - events 38 | verbs: 39 | - create 40 | - patch 41 | -------------------------------------------------------------------------------- /helm/snapscheduler/templates/rolebinding-leader-election.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: RoleBinding 4 | metadata: 5 | name: {{ include "snapscheduler.fullname" . }}-leader-election 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{- include "snapscheduler.labels" . | nindent 4 }} 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: Role 12 | name: {{ include "snapscheduler.fullname" . }}-leader-election 13 | subjects: 14 | - kind: ServiceAccount 15 | name: {{ include "snapscheduler.serviceAccountName" . }} 16 | namespace: {{ .Release.Namespace }} 17 | -------------------------------------------------------------------------------- /helm/snapscheduler/templates/rolebinding-proxy.yaml: -------------------------------------------------------------------------------- 1 | {{- if not .Values.metrics.disableAuth }} 2 | kind: ClusterRoleBinding 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: {{ include "snapscheduler.fullname" . }}-proxy 6 | labels: 7 | {{- include "snapscheduler.labels" . | nindent 4 }} 8 | subjects: 9 | - kind: ServiceAccount 10 | name: {{ include "snapscheduler.serviceAccountName" . }} 11 | namespace: {{ .Release.Namespace }} 12 | roleRef: 13 | kind: ClusterRole 14 | name: {{ include "snapscheduler.fullname" . }}-proxy 15 | apiGroup: rbac.authorization.k8s.io 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /helm/snapscheduler/templates/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: {{ include "snapscheduler.fullname" . }} 5 | labels: 6 | {{- include "snapscheduler.labels" . | nindent 4 }} 7 | subjects: 8 | - kind: ServiceAccount 9 | name: {{ include "snapscheduler.serviceAccountName" . }} 10 | namespace: {{ .Release.Namespace }} 11 | roleRef: 12 | kind: ClusterRole 13 | name: {{ include "snapscheduler.fullname" . }} 14 | apiGroup: rbac.authorization.k8s.io 15 | -------------------------------------------------------------------------------- /helm/snapscheduler/templates/service-metrics.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ include "snapscheduler.fullname" . }}-metrics 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{- include "snapscheduler.labels" . | nindent 4 }} 9 | spec: 10 | ports: 11 | - name: https 12 | port: 8443 13 | targetPort: https 14 | selector: 15 | {{- include "snapscheduler.selectorLabels" . | nindent 4 }} 16 | -------------------------------------------------------------------------------- /helm/snapscheduler/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ include "snapscheduler.serviceAccountName" . }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{- include "snapscheduler.labels" . | nindent 4 }} 9 | {{- end -}} 10 | -------------------------------------------------------------------------------- /helm/snapscheduler/templates/snapscheduler.backube_snapshotschedules.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.manageCRDs }} 2 | --- 3 | apiVersion: apiextensions.k8s.io/v1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | controller-gen.kubebuilder.io/version: v0.18.0 8 | name: snapshotschedules.snapscheduler.backube 9 | spec: 10 | group: snapscheduler.backube 11 | names: 12 | kind: SnapshotSchedule 13 | listKind: SnapshotScheduleList 14 | plural: snapshotschedules 15 | singular: snapshotschedule 16 | scope: Namespaced 17 | versions: 18 | - additionalPrinterColumns: 19 | - jsonPath: .spec.schedule 20 | name: Schedule 21 | type: string 22 | - jsonPath: .spec.retention.expires 23 | name: Max age 24 | type: string 25 | - jsonPath: .spec.retention.maxCount 26 | name: Max num 27 | type: integer 28 | - jsonPath: .spec.disabled 29 | name: Disabled 30 | type: boolean 31 | - jsonPath: .status.nextSnapshotTime 32 | name: Next snapshot 33 | type: string 34 | name: v1 35 | schema: 36 | openAPIV3Schema: 37 | description: SnapshotSchedule defines a schedule for taking automated snapshots 38 | of PVC(s) 39 | properties: 40 | apiVersion: 41 | description: |- 42 | APIVersion defines the versioned schema of this representation of an object. 43 | Servers should convert recognized schemas to the latest internal value, and 44 | may reject unrecognized values. 45 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources 46 | type: string 47 | kind: 48 | description: |- 49 | Kind is a string value representing the REST resource this object represents. 50 | Servers may infer this from the endpoint the client submits requests to. 51 | Cannot be updated. 52 | In CamelCase. 53 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds 54 | type: string 55 | metadata: 56 | type: object 57 | spec: 58 | description: SnapshotScheduleSpec defines the desired state of SnapshotSchedule 59 | properties: 60 | claimSelector: 61 | description: A filter to select which PVCs to snapshot via this schedule 62 | properties: 63 | matchExpressions: 64 | description: matchExpressions is a list of label selector requirements. 65 | The requirements are ANDed. 66 | items: 67 | description: |- 68 | A label selector requirement is a selector that contains values, a key, and an operator that 69 | relates the key and values. 70 | properties: 71 | key: 72 | description: key is the label key that the selector applies 73 | to. 74 | type: string 75 | operator: 76 | description: |- 77 | operator represents a key's relationship to a set of values. 78 | Valid operators are In, NotIn, Exists and DoesNotExist. 79 | type: string 80 | values: 81 | description: |- 82 | values is an array of string values. If the operator is In or NotIn, 83 | the values array must be non-empty. If the operator is Exists or DoesNotExist, 84 | the values array must be empty. This array is replaced during a strategic 85 | merge patch. 86 | items: 87 | type: string 88 | type: array 89 | x-kubernetes-list-type: atomic 90 | required: 91 | - key 92 | - operator 93 | type: object 94 | type: array 95 | x-kubernetes-list-type: atomic 96 | matchLabels: 97 | additionalProperties: 98 | type: string 99 | description: |- 100 | matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels 101 | map is equivalent to an element of matchExpressions, whose key field is "key", the 102 | operator is "In", and the values array contains only "value". The requirements are ANDed. 103 | type: object 104 | type: object 105 | x-kubernetes-map-type: atomic 106 | disabled: 107 | description: Indicates that this schedule should be temporarily disabled 108 | type: boolean 109 | retention: 110 | description: Retention determines how long this schedule's snapshots 111 | will be kept. 112 | properties: 113 | expires: 114 | description: |- 115 | The length of time (time.Duration) after which a given Snapshot will be 116 | deleted. 117 | pattern: ^\d+(h|m|s)$ 118 | type: string 119 | maxCount: 120 | description: The maximum number of snapshots to retain per PVC 121 | format: int32 122 | minimum: 1 123 | type: integer 124 | type: object 125 | schedule: 126 | description: |- 127 | Schedule is a Cronspec specifying when snapshots should be taken. See 128 | https://en.wikipedia.org/wiki/Cron for a description of the format. 129 | pattern: ^(@(annually|yearly|monthly|weekly|daily|hourly))|((((\d+,)*\d+|(\d+(\/|-)\d+)|\*(\/\d+)?)\s?){5})$ 130 | type: string 131 | snapshotTemplate: 132 | description: A template to customize the Snapshots. 133 | properties: 134 | labels: 135 | additionalProperties: 136 | type: string 137 | description: |- 138 | A list of labels that should be added to each Snapshot created by this 139 | schedule. 140 | type: object 141 | snapshotClassName: 142 | description: The name of the VolumeSnapshotClass to be used when 143 | creating Snapshots. 144 | type: string 145 | type: object 146 | type: object 147 | status: 148 | description: SnapshotScheduleStatus defines the observed state of SnapshotSchedule 149 | properties: 150 | conditions: 151 | description: Conditions is a list of conditions related to operator 152 | reconciliation. 153 | items: 154 | description: Condition contains details for one aspect of the current 155 | state of this API Resource. 156 | properties: 157 | lastTransitionTime: 158 | description: |- 159 | lastTransitionTime is the last time the condition transitioned from one status to another. 160 | This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. 161 | format: date-time 162 | type: string 163 | message: 164 | description: |- 165 | message is a human readable message indicating details about the transition. 166 | This may be an empty string. 167 | maxLength: 32768 168 | type: string 169 | observedGeneration: 170 | description: |- 171 | observedGeneration represents the .metadata.generation that the condition was set based upon. 172 | For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date 173 | with respect to the current state of the instance. 174 | format: int64 175 | minimum: 0 176 | type: integer 177 | reason: 178 | description: |- 179 | reason contains a programmatic identifier indicating the reason for the condition's last transition. 180 | Producers of specific condition types may define expected values and meanings for this field, 181 | and whether the values are considered a guaranteed API. 182 | The value should be a CamelCase string. 183 | This field may not be empty. 184 | maxLength: 1024 185 | minLength: 1 186 | pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ 187 | type: string 188 | status: 189 | description: status of the condition, one of True, False, Unknown. 190 | enum: 191 | - "True" 192 | - "False" 193 | - Unknown 194 | type: string 195 | type: 196 | description: type of condition in CamelCase or in foo.example.com/CamelCase. 197 | maxLength: 316 198 | pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ 199 | type: string 200 | required: 201 | - lastTransitionTime 202 | - message 203 | - reason 204 | - status 205 | - type 206 | type: object 207 | type: array 208 | lastSnapshotTime: 209 | description: The time of the most recent snapshot taken by this schedule 210 | format: date-time 211 | type: string 212 | nextSnapshotTime: 213 | description: The time of the next scheduled snapshot 214 | format: date-time 215 | type: string 216 | type: object 217 | type: object 218 | served: true 219 | storage: true 220 | subresources: 221 | status: {} 222 | {{- end }} 223 | -------------------------------------------------------------------------------- /helm/snapscheduler/values.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # replicaCount is the number of replicas of the snapscheduler operator to run. 3 | replicaCount: 1 4 | 5 | image: 6 | repository: quay.io/backube/snapscheduler 7 | tagOverride: "" 8 | image: "" 9 | pullPolicy: IfNotPresent 10 | 11 | imagePullSecrets: [] 12 | nameOverride: "" 13 | fullnameOverride: "" 14 | 15 | enableOwnerReferences: false 16 | 17 | rbacProxy: 18 | image: 19 | repository: quay.io/brancz/kube-rbac-proxy 20 | tag: v0.19.1@sha256:9f21034731c7c3228611b9d40807f3230ce8ed2b286b913bf2d1e760d8d866fc 21 | image: "" 22 | pullPolicy: IfNotPresent 23 | resources: 24 | requests: 25 | cpu: 10m 26 | memory: 100Mi 27 | 28 | serviceAccount: 29 | # Specifies whether a service account should be created 30 | create: true 31 | # The name of the service account to use. If not set and create is true, a 32 | # name is generated using the fullname template 33 | name: 34 | 35 | podSecurityContext: 36 | runAsNonRoot: true 37 | # Uncomment when we no longer support OCP 4.10 38 | # seccompProfile: 39 | # type: RuntimeDefault 40 | 41 | # additional annotations to add to pods 42 | podAnnotations: {} 43 | 44 | # additional labels to add to pods 45 | podLabels: {} 46 | 47 | securityContext: 48 | allowPrivilegeEscalation: false 49 | capabilities: 50 | drop: 51 | - "ALL" 52 | 53 | resources: 54 | requests: 55 | cpu: 10m 56 | memory: 100Mi 57 | 58 | nodeSelector: 59 | kubernetes.io/arch: amd64 60 | kubernetes.io/os: linux 61 | 62 | tolerations: [] 63 | 64 | topologySpreadConstraints: [] 65 | # - maxSkew: 1 66 | # topologyKey: topology.kubernetes.io/zone 67 | # whenUnsatisfiable: DoNotSchedule 68 | # labelSelector: 69 | # matchLabels: 70 | # app.kubernetes.io/instance: snapscheduler 71 | 72 | affinity: 73 | podAntiAffinity: 74 | preferredDuringSchedulingIgnoredDuringExecution: 75 | - weight: 100 76 | podAffinityTerm: 77 | labelSelector: 78 | matchExpressions: 79 | - key: backube/snapscheduler-affinity 80 | operator: In 81 | values: 82 | - manager 83 | topologyKey: kubernetes.io/hostname 84 | 85 | metrics: 86 | # Disable auth checks when scraping metrics (allow anyone to scrape) 87 | disableAuth: false 88 | 89 | manageCRDs: true 90 | 91 | # See https://kubernetes.io/blog/2023/01/12/ 92 | # protect-mission-critical-pods-priorityclass/ 93 | priorityClassName: "" 94 | -------------------------------------------------------------------------------- /internal/controller/snapshots_expire.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (C) 2019 The snapscheduler authors 3 | 4 | This program is free software: you can redistribute it and/or modify 5 | it under the terms of the GNU Affero General Public License as published 6 | by the Free Software Foundation, either version 3 of the License, or 7 | (at your option) any later version. 8 | 9 | This program is distributed in the hope that it will be useful, 10 | but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | GNU Affero General Public License for more details. 13 | 14 | You should have received a copy of the GNU Affero General Public License 15 | along with this program. If not, see . 16 | */ 17 | 18 | package controller 19 | 20 | import ( 21 | "context" 22 | "errors" 23 | "sort" 24 | "time" 25 | 26 | "github.com/go-logr/logr" 27 | snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" 28 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 29 | "sigs.k8s.io/controller-runtime/pkg/client" 30 | 31 | snapschedulerv1 "github.com/backube/snapscheduler/api/v1" 32 | ) 33 | 34 | // expireByCount deletes the oldest snapshots until the number of snapshots for 35 | // a given PVC (created by the supplied schedule) is no more than the 36 | // schedule's maxCount. This function is the entry point for count-based 37 | // expiration of snapshots. 38 | func expireByCount(ctx context.Context, schedule *snapschedulerv1.SnapshotSchedule, 39 | logger logr.Logger, c client.Client) error { 40 | if schedule.Spec.Retention.MaxCount == nil { 41 | // No count-based retention configured 42 | return nil 43 | } 44 | 45 | snapList, err := snapshotsFromSchedule(ctx, schedule, logger, c) 46 | if err != nil { 47 | logger.Error(err, "unable to retrieve list of snapshots") 48 | return err 49 | } 50 | 51 | grouped := groupSnapsByPVC(snapList) 52 | for _, list := range grouped { 53 | list = sortSnapsByTime(list) 54 | if len(list) > int(*schedule.Spec.Retention.MaxCount) { 55 | list = list[:len(list)-int(*schedule.Spec.Retention.MaxCount)] 56 | err := deleteSnapshots(ctx, list, logger, c) 57 | if err != nil { 58 | return err 59 | } 60 | } 61 | } 62 | 63 | return nil 64 | } 65 | 66 | // expireByTime deletes snapshots that are older than the retention time in the 67 | // specified schedule. It only affects snapshots that were created by the provided schedule. 68 | // This function is the entry point for the time-based expiration of snapshots 69 | func expireByTime(ctx context.Context, schedule *snapschedulerv1.SnapshotSchedule, 70 | now time.Time, logger logr.Logger, c client.Client) error { 71 | expiration, err := getExpirationTime(schedule, now, logger) 72 | if err != nil { 73 | logger.Error(err, "unable to determine snapshot expiration time") 74 | return err 75 | } 76 | if expiration == nil { 77 | // No time-based retention configured 78 | return nil 79 | } 80 | 81 | snapList, err := snapshotsFromSchedule(ctx, schedule, logger, c) 82 | if err != nil { 83 | logger.Error(err, "unable to retrieve list of snapshots") 84 | return err 85 | } 86 | 87 | expiredSnaps := filterExpiredSnaps(snapList, *expiration) 88 | 89 | logger.Info("deleting expired snapshots", "expiration", expiration.Format(time.RFC3339), 90 | "total", len(snapList), "expired", len(expiredSnaps)) 91 | err = deleteSnapshots(ctx, expiredSnaps, logger, c) 92 | return err 93 | } 94 | 95 | func deleteSnapshots(ctx context.Context, snapshots []snapv1.VolumeSnapshot, 96 | logger logr.Logger, c client.Client) error { 97 | for i := range snapshots { 98 | snap := snapshots[i] 99 | if err := c.Delete(ctx, &snap, client.PropagationPolicy(metav1.DeletePropagationBackground)); err != nil { 100 | logger.Error(err, "error deleting snapshot", "name", snap.Name) 101 | return err 102 | } 103 | } 104 | return nil 105 | } 106 | 107 | // getExpirationTime returns the cutoff Time for snapshots created with the 108 | // referenced schedule. Any snapshot created prior to the returned time should 109 | // be considered expired. 110 | func getExpirationTime(schedule *snapschedulerv1.SnapshotSchedule, 111 | now time.Time, logger logr.Logger) (*time.Time, error) { 112 | if schedule.Spec.Retention.Expires == "" { 113 | // No time-based retention configured 114 | return nil, nil 115 | } 116 | 117 | lifetime, err := time.ParseDuration(schedule.Spec.Retention.Expires) 118 | if err != nil { 119 | logger.Error(err, "unable to parse spec.retention.expires") 120 | return nil, err 121 | } 122 | 123 | if lifetime < 0 { 124 | err := errors.New("duration must be greater than 0") 125 | logger.Error(err, "invalid value for spec.retention.expires") 126 | return nil, err 127 | } 128 | 129 | expiration := now.Add(-lifetime).UTC() 130 | return &expiration, nil 131 | } 132 | 133 | // filterExpiredSnaps returns the set of expired snapshots from the provided list. 134 | func filterExpiredSnaps(snaps []snapv1.VolumeSnapshot, 135 | expiration time.Time) []snapv1.VolumeSnapshot { 136 | outList := make([]snapv1.VolumeSnapshot, 0) 137 | for _, snap := range snaps { 138 | if snap.CreationTimestamp.Time.Before(expiration) { 139 | outList = append(outList, snap) 140 | } 141 | } 142 | return outList 143 | } 144 | 145 | // snapshotsFromSchedule returns a list of snapshots that were created by the 146 | // supplied schedule 147 | func snapshotsFromSchedule(ctx context.Context, schedule *snapschedulerv1.SnapshotSchedule, 148 | logger logr.Logger, c client.Client) ([]snapv1.VolumeSnapshot, error) { 149 | labelSelector := &metav1.LabelSelector{ 150 | MatchLabels: map[string]string{ 151 | ScheduleKey: schedule.Name, 152 | }, 153 | } 154 | selector, err := metav1.LabelSelectorAsSelector(labelSelector) 155 | if err != nil { 156 | logger.Error(err, "unable to create label selector for snapshot expiration") 157 | return nil, err 158 | } 159 | 160 | listOpts := []client.ListOption{ 161 | client.InNamespace(schedule.Namespace), 162 | client.MatchingLabelsSelector{ 163 | Selector: selector, 164 | }, 165 | } 166 | var snapList snapv1.VolumeSnapshotList 167 | err = c.List(ctx, &snapList, listOpts...) 168 | if err != nil { 169 | logger.Error(err, "unable to retrieve list of snapshots") 170 | return nil, err 171 | } 172 | 173 | return snapList.Items, nil 174 | } 175 | 176 | // groupSnapsByPVC takes a list of snapshots and groups them by the PVC they 177 | // were created from 178 | func groupSnapsByPVC(snaps []snapv1.VolumeSnapshot) map[string][]snapv1.VolumeSnapshot { 179 | groupedSnaps := make(map[string][]snapv1.VolumeSnapshot) 180 | for _, snap := range snaps { 181 | pvcName := snap.Spec.Source.PersistentVolumeClaimName 182 | if pvcName != nil { 183 | if groupedSnaps[*pvcName] == nil { 184 | groupedSnaps[*pvcName] = []snapv1.VolumeSnapshot{} 185 | } 186 | groupedSnaps[*pvcName] = append(groupedSnaps[*pvcName], snap) 187 | } 188 | } 189 | 190 | return groupedSnaps 191 | } 192 | 193 | // sortSnapsByTime sorts the snapshots in order of ascending CreationTimestamp 194 | func sortSnapsByTime(snaps []snapv1.VolumeSnapshot) []snapv1.VolumeSnapshot { 195 | sorted := append([]snapv1.VolumeSnapshot(nil), snaps...) 196 | sort.Slice(sorted, func(i, j int) bool { 197 | return sorted[i].CreationTimestamp.Before(&sorted[j].CreationTimestamp) 198 | }) 199 | return sorted 200 | } 201 | -------------------------------------------------------------------------------- /internal/controller/suite_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021 The snapscheduler authors. 3 | 4 | This program is free software: you can redistribute it and/or modify 5 | it under the terms of the GNU Affero General Public License as published 6 | by the Free Software Foundation, either version 3 of the License, or 7 | (at your option) any later version. 8 | 9 | This program is distributed in the hope that it will be useful, 10 | but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | GNU Affero General Public License for more details. 13 | 14 | You should have received a copy of the GNU Affero General Public License 15 | along with this program. If not, see . 16 | */ 17 | 18 | package controller 19 | 20 | import ( 21 | "context" 22 | "path/filepath" 23 | "testing" 24 | 25 | snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" 26 | //nolint:revive // Allow . import 27 | . "github.com/onsi/ginkgo/v2" 28 | //nolint:revive // Allow . import 29 | . "github.com/onsi/gomega" 30 | "k8s.io/client-go/kubernetes/scheme" 31 | "k8s.io/client-go/rest" 32 | ctrl "sigs.k8s.io/controller-runtime" 33 | "sigs.k8s.io/controller-runtime/pkg/client" 34 | "sigs.k8s.io/controller-runtime/pkg/envtest" 35 | logf "sigs.k8s.io/controller-runtime/pkg/log" 36 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 37 | ctrlMetrics "sigs.k8s.io/controller-runtime/pkg/metrics/server" 38 | 39 | snapschedulerv1 "github.com/backube/snapscheduler/api/v1" 40 | //+kubebuilder:scaffold:imports 41 | ) 42 | 43 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to 44 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. 45 | 46 | var cfg *rest.Config 47 | var k8sClient client.Client 48 | var testEnv *envtest.Environment 49 | var cancel context.CancelFunc 50 | 51 | func TestControllers(t *testing.T) { 52 | RegisterFailHandler(Fail) 53 | 54 | RunSpecs(t, "Controller Suite") 55 | } 56 | 57 | var _ = BeforeSuite(func() { 58 | logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) 59 | 60 | var ctx context.Context 61 | ctx, cancel = context.WithCancel(context.TODO()) 62 | 63 | By("bootstrapping test environment") 64 | testEnv = &envtest.Environment{ 65 | CRDDirectoryPaths: []string{ 66 | filepath.Join("..", "..", "config", "crd", "bases"), 67 | filepath.Join("..", "..", "hack", "crds"), 68 | }, 69 | ErrorIfCRDPathMissing: true, 70 | } 71 | 72 | var err error 73 | cfg, err = testEnv.Start() 74 | Expect(err).NotTo(HaveOccurred()) 75 | Expect(cfg).NotTo(BeNil()) 76 | 77 | err = snapschedulerv1.AddToScheme(scheme.Scheme) 78 | Expect(err).NotTo(HaveOccurred()) 79 | err = snapv1.AddToScheme(scheme.Scheme) 80 | Expect(err).NotTo(HaveOccurred()) 81 | 82 | //+kubebuilder:scaffold:scheme 83 | 84 | // k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) 85 | // Expect(err).NotTo(HaveOccurred()) 86 | // Expect(k8sClient).NotTo(BeNil()) 87 | 88 | k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ 89 | Scheme: scheme.Scheme, 90 | Metrics: ctrlMetrics.Options{ 91 | BindAddress: "0", 92 | }, 93 | }) 94 | Expect(err).NotTo(HaveOccurred()) 95 | 96 | go func() { 97 | defer GinkgoRecover() 98 | err = k8sManager.Start(ctx) 99 | Expect(err).NotTo(HaveOccurred()) 100 | }() 101 | 102 | k8sClient = k8sManager.GetClient() 103 | Expect(k8sClient).NotTo(BeNil()) 104 | }) 105 | 106 | var _ = AfterSuite(func() { 107 | cancel() 108 | By("tearing down the test environment") 109 | err := testEnv.Stop() 110 | Expect(err).NotTo(HaveOccurred()) 111 | }) 112 | -------------------------------------------------------------------------------- /release-checklist.md: -------------------------------------------------------------------------------- 1 | # Release checklist 2 | 3 | ## Create a release 4 | 5 | * Update [CHANGELOG.md](CHANGELOG.md) 6 | * Update Helm chart template 7 | * In Chart.yaml, update `version`, `appVersion`, and 8 | `annotations.artifacthub.io/changes` 9 | * In Chart.yaml, update `artifacthub.io/changes` annotation based on changelog 10 | * Update version compatibility matrix in [docs/index.md](docs/index.md) 11 | * Build OperatorHub bundle 12 | * Commit to `master` 13 | * Branch to a release branch 14 | * Tag a release (vX.Y.Z) on that branch 15 | * Ensure the container becomes available on [Quay](https://quay.io/repository/backube/snapscheduler?tab=tags) 16 | * Edit [.github/workflows/periodic.yml](.github/workflows/periodic.yml) to 17 | enable periodic builds of release branch and tagged containers 18 | 19 | ## Release updated Helm chart 20 | 21 | * Package the Helm chart 22 | `$ helm package helm/snapscheduler` 23 | * Add it to the backube chart repo 24 | 25 | ## Release on OperatorHub 26 | 27 | * Create bundle for operatorhub 28 | `$ make bundle CHANNELS="candidate,stable" DEFAULT_CHANNEL=stable 29 | IMG=quay.io/backube/snapscheduler:2.0.0 VERSION=2.0.0` 30 | * `CHANNELS` is the list of channels that this bundle will be a part of 31 | * `VERSION` is the operator version (on operatorhub) 32 | * `DEFAULT_CHANNEL` is the channel that users will get by default 33 | * `IMG` is the container image + tag that will be deployed by the bundle 34 | * In the CSV, add the operator image: `.metadata.annotations.containerImage: quay.io/backube/snapscheduler:2.0.0` 35 | * In `bundle/metadata/annotations.yaml`, add the proper annotation to restrict 36 | which OpenShift version catalogs it will be added to: 37 | 38 | ```yaml 39 | # https://redhat-connect.gitbook.io/certified-operator-guide/ocp-deployment/operator-metadata/bundle-directory/managing-openshift-versions 40 | # Single version means that version and greater 41 | # Single version preceded by "=" means ONLY that version 42 | # Range is also permitted 43 | com.redhat.openshift.versions: "v4.7" 44 | ``` 45 | 46 | * Add it to the [community 47 | repo](https://github.com/k8s-operatorhub/community-operators/tree/main/operators/snapscheduler) 48 | by copying the bundle directory in as a new subdir named after the version 49 | * Do the same for the [OpenShift 50 | repo](https://github.com/redhat-openshift-ecosystem/community-operators-prod/tree/main/operators/snapscheduler) 51 | -------------------------------------------------------------------------------- /test-kuttl/e2e/custom-snapclass/00-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Pod 3 | apiVersion: v1 4 | metadata: 5 | name: workload 6 | status: 7 | phase: Running 8 | -------------------------------------------------------------------------------- /test-kuttl/e2e/custom-snapclass/00-create-workload.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: datavol 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | resources: 10 | requests: 11 | storage: 1Gi 12 | 13 | --- 14 | kind: Pod 15 | apiVersion: v1 16 | metadata: 17 | name: workload 18 | spec: 19 | containers: 20 | - name: workload 21 | image: gcr.io/distroless/static:debug-nonroot 22 | command: ["sh", "-c"] 23 | args: ["echo 'data' > /mnt/datafile; sleep 99999"] 24 | securityContext: 25 | allowPrivilegeEscalation: false 26 | capabilities: 27 | drop: ["ALL"] 28 | readOnlyRootFilesystem: true 29 | # seccompProfile: 30 | # type: RuntimeDefault 31 | volumeMounts: 32 | - name: data 33 | mountPath: "/mnt" 34 | securityContext: 35 | runAsNonRoot: true 36 | terminationGracePeriodSeconds: 2 37 | volumes: 38 | - name: data 39 | persistentVolumeClaim: 40 | claimName: datavol 41 | -------------------------------------------------------------------------------- /test-kuttl/e2e/custom-snapclass/05-create-schedule.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: snapscheduler.backube/v1 3 | kind: SnapshotSchedule 4 | metadata: 5 | name: withclass 6 | spec: 7 | schedule: "* * * * *" 8 | snapshotTemplate: 9 | snapshotClassName: my-custom-class 10 | -------------------------------------------------------------------------------- /test-kuttl/e2e/custom-snapclass/10-waitfor-snapshot.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - timeout: 90 6 | script: | 7 | set -e -o pipefail 8 | 9 | echo "Waiting for snapshot" 10 | while [[ $(kubectl -n "$NAMESPACE" get volumesnapshots -oname) == '' ]]; do 11 | sleep 1 12 | done 13 | 14 | sname=$(kubectl -n "$NAMESPACE" get volumesnapshots -oname) 15 | 16 | echo "Verifying usage of custom snapshotclass" 17 | kubectl -n "$NAMESPACE" get $sname -otemplate='{{.spec.volumeSnapshotClassName}}' | grep -q 'my-custom-class' 18 | -------------------------------------------------------------------------------- /test-kuttl/e2e/custom-snapclass/9999-delete-ns.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - command: kubectl delete --wait=false ns ${NAMESPACE} 6 | -------------------------------------------------------------------------------- /test-kuttl/e2e/label-selector-equality/00-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Pod 3 | apiVersion: v1 4 | metadata: 5 | name: workload 6 | status: 7 | phase: Running 8 | -------------------------------------------------------------------------------- /test-kuttl/e2e/label-selector-equality/00-create-workload.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: datavol 6 | labels: 7 | snap: nope 8 | whatever: zzz 9 | spec: 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 1Gi 15 | 16 | --- 17 | kind: PersistentVolumeClaim 18 | apiVersion: v1 19 | metadata: 20 | name: voldata 21 | labels: 22 | snap: me 23 | whatever: zzz 24 | spec: 25 | accessModes: 26 | - ReadWriteOnce 27 | resources: 28 | requests: 29 | storage: 1Gi 30 | 31 | --- 32 | kind: Pod 33 | apiVersion: v1 34 | metadata: 35 | name: workload 36 | spec: 37 | containers: 38 | - name: workload 39 | image: gcr.io/distroless/static:debug-nonroot 40 | command: ["sh", "-c"] 41 | args: ["echo 'data' > /mnt/datafile; sleep 99999"] 42 | securityContext: 43 | allowPrivilegeEscalation: false 44 | capabilities: 45 | drop: ["ALL"] 46 | readOnlyRootFilesystem: true 47 | # seccompProfile: 48 | # type: RuntimeDefault 49 | volumeMounts: 50 | - name: data 51 | mountPath: "/mnt" 52 | - name: data2 53 | mountPath: "/mnt2" 54 | securityContext: 55 | runAsNonRoot: true 56 | terminationGracePeriodSeconds: 2 57 | volumes: 58 | - name: data 59 | persistentVolumeClaim: 60 | claimName: datavol 61 | - name: data2 62 | persistentVolumeClaim: 63 | claimName: voldata 64 | -------------------------------------------------------------------------------- /test-kuttl/e2e/label-selector-equality/05-create-schedule.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: snapscheduler.backube/v1 3 | kind: SnapshotSchedule 4 | metadata: 5 | name: selector 6 | spec: 7 | schedule: "* * * * *" 8 | claimSelector: 9 | matchLabels: 10 | snap: me 11 | -------------------------------------------------------------------------------- /test-kuttl/e2e/label-selector-equality/10-waitfor-snapshot.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - timeout: 90 6 | script: | 7 | set -e -o pipefail 8 | 9 | echo "Waiting for snapshot" 10 | while [[ $(kubectl -n "$NAMESPACE" get volumesnapshots -oname | wc -l) == 0 ]]; do 11 | sleep 1 12 | done 13 | 14 | sname=$(kubectl -n "$NAMESPACE" get volumesnapshots -oname) 15 | 16 | echo "Verifying correct PVC was snapshotted" 17 | echo "$sname" | grep -q "voldata" 18 | echo "Verifying there is only 1 snapshot" 19 | echo "$sname" 20 | [[ $(echo "$sname" | wc -l) == 1 ]] 21 | -------------------------------------------------------------------------------- /test-kuttl/e2e/label-selector-equality/9999-delete-ns.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - command: kubectl delete --wait=false ns ${NAMESPACE} 6 | -------------------------------------------------------------------------------- /test-kuttl/e2e/label-selector-set/00-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Pod 3 | apiVersion: v1 4 | metadata: 5 | name: workload 6 | status: 7 | phase: Running 8 | -------------------------------------------------------------------------------- /test-kuttl/e2e/label-selector-set/00-create-workload.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: datavol 6 | labels: 7 | snap: nope 8 | whatever: zzz 9 | spec: 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 1Gi 15 | 16 | --- 17 | kind: PersistentVolumeClaim 18 | apiVersion: v1 19 | metadata: 20 | name: voldata 21 | labels: 22 | snap: me 23 | whatever: zzz 24 | spec: 25 | accessModes: 26 | - ReadWriteOnce 27 | resources: 28 | requests: 29 | storage: 1Gi 30 | 31 | --- 32 | kind: Pod 33 | apiVersion: v1 34 | metadata: 35 | name: workload 36 | spec: 37 | containers: 38 | - name: workload 39 | image: gcr.io/distroless/static:debug-nonroot 40 | command: ["sh", "-c"] 41 | args: ["echo 'data' > /mnt/datafile; sleep 99999"] 42 | securityContext: 43 | allowPrivilegeEscalation: false 44 | capabilities: 45 | drop: ["ALL"] 46 | readOnlyRootFilesystem: true 47 | # seccompProfile: 48 | # type: RuntimeDefault 49 | volumeMounts: 50 | - name: data 51 | mountPath: "/mnt" 52 | - name: data2 53 | mountPath: "/mnt2" 54 | securityContext: 55 | runAsNonRoot: true 56 | terminationGracePeriodSeconds: 2 57 | volumes: 58 | - name: data 59 | persistentVolumeClaim: 60 | claimName: datavol 61 | - name: data2 62 | persistentVolumeClaim: 63 | claimName: voldata 64 | -------------------------------------------------------------------------------- /test-kuttl/e2e/label-selector-set/05-create-schedule.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: snapscheduler.backube/v1 3 | kind: SnapshotSchedule 4 | metadata: 5 | name: selector 6 | spec: 7 | schedule: "* * * * *" 8 | claimSelector: 9 | matchExpressions: 10 | - key: snap 11 | operator: In 12 | values: 13 | - me 14 | -------------------------------------------------------------------------------- /test-kuttl/e2e/label-selector-set/10-waitfor-snapshot.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - timeout: 90 6 | script: | 7 | set -e -o pipefail 8 | 9 | echo "Waiting for snapshot" 10 | while [[ $(kubectl -n "$NAMESPACE" get volumesnapshots -oname | wc -l) == 0 ]]; do 11 | sleep 1 12 | done 13 | 14 | sname=$(kubectl -n "$NAMESPACE" get volumesnapshots -oname) 15 | 16 | echo "Verifying correct PVC was snapshotted" 17 | echo "$sname" | grep -q "voldata" 18 | echo "Verifying there is only 1 snapshot" 19 | echo "$sname" 20 | [[ $(echo "$sname" | wc -l) == 1 ]] 21 | -------------------------------------------------------------------------------- /test-kuttl/e2e/label-selector-set/9999-delete-ns.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - command: kubectl delete --wait=false ns ${NAMESPACE} 6 | -------------------------------------------------------------------------------- /test-kuttl/e2e/metrics/00-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 120 5 | collectors: 6 | # Operator logs 7 | - selector: app.kubernetes.io/name=snapscheduler 8 | namespace: backube-snapscheduler 9 | # Resources in the test namespace 10 | - type: command 11 | command: kubectl -n $NAMESPACE get all,pvc,snapshotschedule,volumesnapshot,volumesnapshotcontent -oyaml 12 | # Logs from the job 13 | - type: command 14 | command: kubectl -n $NAMESPACE logs --prefix --all-containers job/check-metrics 15 | 16 | --- 17 | apiVersion: v1 18 | kind: Pod 19 | metadata: 20 | name: check-metrics 21 | status: 22 | phase: Succeeded 23 | -------------------------------------------------------------------------------- /test-kuttl/e2e/metrics/00-check-metrics.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Pod 3 | apiVersion: v1 4 | metadata: 5 | name: check-metrics 6 | spec: 7 | containers: 8 | - name: workload 9 | image: fedora:latest 10 | command: ["sh", "-c"] 11 | args: ["curl --insecure https://snapscheduler-metrics.backube-snapscheduler.svc.cluster.local:8443/metrics | grep 'workqueue_work_duration_seconds_count{controller=\"snapshotschedule\",name=\"snapshotschedule\"}'"] 12 | securityContext: 13 | allowPrivilegeEscalation: false 14 | capabilities: 15 | drop: ["ALL"] 16 | runAsUser: 1000 17 | runAsGroup: 1000 18 | readOnlyRootFilesystem: true 19 | restartPolicy: OnFailure 20 | securityContext: 21 | runAsNonRoot: true 22 | terminationGracePeriodSeconds: 2 23 | -------------------------------------------------------------------------------- /test-kuttl/e2e/metrics/9999-delete-ns.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - command: kubectl delete --wait=false ns ${NAMESPACE} 6 | -------------------------------------------------------------------------------- /test-kuttl/e2e/minimal-schedule/00-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Pod 3 | apiVersion: v1 4 | metadata: 5 | name: workload 6 | status: 7 | phase: Running 8 | -------------------------------------------------------------------------------- /test-kuttl/e2e/minimal-schedule/00-create-workload.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: datavol 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | resources: 10 | requests: 11 | storage: 1Gi 12 | 13 | --- 14 | kind: Pod 15 | apiVersion: v1 16 | metadata: 17 | name: workload 18 | spec: 19 | containers: 20 | - name: workload 21 | image: gcr.io/distroless/static:debug-nonroot 22 | command: ["sh", "-c"] 23 | args: ["echo 'data' > /mnt/datafile; sleep 99999"] 24 | securityContext: 25 | allowPrivilegeEscalation: false 26 | capabilities: 27 | drop: ["ALL"] 28 | readOnlyRootFilesystem: true 29 | # seccompProfile: 30 | # type: RuntimeDefault 31 | volumeMounts: 32 | - name: data 33 | mountPath: "/mnt" 34 | securityContext: 35 | runAsNonRoot: true 36 | terminationGracePeriodSeconds: 2 37 | volumes: 38 | - name: data 39 | persistentVolumeClaim: 40 | claimName: datavol 41 | -------------------------------------------------------------------------------- /test-kuttl/e2e/minimal-schedule/05-create-schedule.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: snapscheduler.backube/v1 3 | kind: SnapshotSchedule 4 | metadata: 5 | name: minimal 6 | spec: 7 | schedule: "* * * * *" 8 | -------------------------------------------------------------------------------- /test-kuttl/e2e/minimal-schedule/10-waitfor-snapshot.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - timeout: 300 6 | script: | 7 | set -e -o pipefail 8 | 9 | echo "Waiting for snapshot" 10 | while [[ $(kubectl -n "$NAMESPACE" get volumesnapshots -oname) == '' ]]; do 11 | sleep 1 12 | done 13 | 14 | sname=$(kubectl -n "$NAMESPACE" get volumesnapshots -oname) 15 | 16 | echo "Waiting for snapshot to be ready: $sname" 17 | while [[ $(kubectl -n "$NAMESPACE" get "$sname" -otemplate="{{.status.readyToUse}}") != "true" ]]; do 18 | kubectl -n "$NAMESPACE" get "$sname" 19 | sleep 5 20 | done 21 | -------------------------------------------------------------------------------- /test-kuttl/e2e/minimal-schedule/9999-delete-ns.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - command: kubectl delete --wait=false ns ${NAMESPACE} 6 | -------------------------------------------------------------------------------- /test-kuttl/e2e/multi-pvc/00-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Pod 3 | apiVersion: v1 4 | metadata: 5 | name: workload 6 | status: 7 | phase: Running 8 | -------------------------------------------------------------------------------- /test-kuttl/e2e/multi-pvc/00-create-workload.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: datavol 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | resources: 10 | requests: 11 | storage: 1Gi 12 | 13 | --- 14 | kind: PersistentVolumeClaim 15 | apiVersion: v1 16 | metadata: 17 | name: voldata 18 | spec: 19 | accessModes: 20 | - ReadWriteOnce 21 | resources: 22 | requests: 23 | storage: 1Gi 24 | 25 | --- 26 | kind: Pod 27 | apiVersion: v1 28 | metadata: 29 | name: workload 30 | spec: 31 | containers: 32 | - name: workload 33 | image: gcr.io/distroless/static:debug-nonroot 34 | command: ["sh", "-c"] 35 | args: ["echo 'data' > /mnt/datafile; sleep 99999"] 36 | securityContext: 37 | allowPrivilegeEscalation: false 38 | capabilities: 39 | drop: ["ALL"] 40 | readOnlyRootFilesystem: true 41 | # seccompProfile: 42 | # type: RuntimeDefault 43 | volumeMounts: 44 | - name: data 45 | mountPath: "/mnt" 46 | - name: data2 47 | mountPath: "/mnt2" 48 | securityContext: 49 | runAsNonRoot: true 50 | terminationGracePeriodSeconds: 2 51 | volumes: 52 | - name: data 53 | persistentVolumeClaim: 54 | claimName: datavol 55 | - name: data2 56 | persistentVolumeClaim: 57 | claimName: voldata 58 | -------------------------------------------------------------------------------- /test-kuttl/e2e/multi-pvc/05-create-schedule.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: snapscheduler.backube/v1 3 | kind: SnapshotSchedule 4 | metadata: 5 | name: multi 6 | spec: 7 | schedule: "* * * * *" 8 | -------------------------------------------------------------------------------- /test-kuttl/e2e/multi-pvc/10-waitfor-snapshot.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - timeout: 180 6 | script: | 7 | set -e -o pipefail 8 | 9 | for n in datavol voldata; do 10 | echo "Waiting for snapshot of $n" 11 | while [[ $(kubectl -n "$NAMESPACE" get volumesnapshots -oname | wc -l) -lt 1 ]]; do 12 | sleep 1 13 | done 14 | done 15 | -------------------------------------------------------------------------------- /test-kuttl/e2e/multi-pvc/9999-delete-ns.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - command: kubectl delete --wait=false ns ${NAMESPACE} 6 | -------------------------------------------------------------------------------- /test-kuttl/e2e/template-labels/00-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Pod 3 | apiVersion: v1 4 | metadata: 5 | name: workload 6 | status: 7 | phase: Running 8 | -------------------------------------------------------------------------------- /test-kuttl/e2e/template-labels/00-create-workload.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: datavol 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | resources: 10 | requests: 11 | storage: 1Gi 12 | 13 | --- 14 | kind: Pod 15 | apiVersion: v1 16 | metadata: 17 | name: workload 18 | spec: 19 | containers: 20 | - name: workload 21 | image: gcr.io/distroless/static:debug-nonroot 22 | command: ["sh", "-c"] 23 | args: ["echo 'data' > /mnt/datafile; sleep 99999"] 24 | securityContext: 25 | allowPrivilegeEscalation: false 26 | capabilities: 27 | drop: ["ALL"] 28 | readOnlyRootFilesystem: true 29 | # seccompProfile: 30 | # type: RuntimeDefault 31 | volumeMounts: 32 | - name: data 33 | mountPath: "/mnt" 34 | securityContext: 35 | runAsNonRoot: true 36 | terminationGracePeriodSeconds: 2 37 | volumes: 38 | - name: data 39 | persistentVolumeClaim: 40 | claimName: datavol 41 | -------------------------------------------------------------------------------- /test-kuttl/e2e/template-labels/05-create-schedule.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: snapscheduler.backube/v1 3 | kind: SnapshotSchedule 4 | metadata: 5 | name: withlabels 6 | spec: 7 | schedule: "* * * * *" 8 | snapshotTemplate: 9 | labels: 10 | mysnaplabel: myval 11 | label2: v2 12 | -------------------------------------------------------------------------------- /test-kuttl/e2e/template-labels/10-waitfor-snapshot.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - timeout: 90 6 | script: | 7 | set -e -o pipefail 8 | 9 | echo "Waiting for snapshot" 10 | while [[ $(kubectl -n "$NAMESPACE" get volumesnapshots -oname) == '' ]]; do 11 | sleep 1 12 | done 13 | 14 | sname=$(kubectl -n "$NAMESPACE" get volumesnapshots -oname) 15 | 16 | echo "Verifying presence of labels on snapshot" 17 | kubectl -n "$NAMESPACE" get $sname -otemplate='{{.metadata.labels}}' | grep -q 'mysnaplabel:myval' 18 | kubectl -n "$NAMESPACE" get $sname -otemplate='{{.metadata.labels}}' | grep -q 'label2:v2' 19 | -------------------------------------------------------------------------------- /test-kuttl/e2e/template-labels/9999-delete-ns.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - command: kubectl delete --wait=false ns ${NAMESPACE} 6 | -------------------------------------------------------------------------------- /test-kuttl/kuttl-test.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestSuite 4 | testDirs: 5 | - ./e2e 6 | timeout: 60 7 | --------------------------------------------------------------------------------