├── .all-contributorsrc
├── .github
├── FUNDING.yml
├── ISSUE_TEMPLATE
│ ├── bug_template.md
│ └── feature_request.md
├── config
│ ├── ct.yaml
│ └── lintconf.yaml
└── workflows
│ ├── ci.yml
│ ├── helm-publish.yml
│ ├── helm-test.yml
│ ├── main.yaml
│ ├── osv-scanner-pr.yml
│ └── scorecard.yml
├── .gitignore
├── .golangci.yaml
├── ADOPTERS.md
├── CONTRIBUTING.md
├── DEVELOPMENT.md
├── Dockerfile
├── LICENSE
├── Makefile
├── PROJECT
├── README.md
├── api
└── v1alpha1
│ ├── cleaner_types.go
│ ├── groupversion_info.go
│ ├── report_types.go
│ └── zz_generated.deepcopy.go
├── assets
├── discord_logo.png
├── logo.png
├── slack_logo.png
├── smtp_logo.png
├── teams_logo.svg
├── telegram_logo.png
└── webex_logo.png
├── charts
└── k8s-cleaner
│ ├── .helmignore
│ ├── Chart.yaml
│ ├── README.md
│ ├── README.md.gotmpl
│ ├── crd
│ ├── apps.projectsveltos.io_cleaners.yaml
│ └── apps.projectsveltos.io_reports.yaml
│ ├── templates
│ ├── _helpers.tpl
│ ├── crds.tpl
│ ├── deployment.yaml
│ ├── extra-manifests.yaml
│ ├── rbac.yaml
│ ├── service.yaml
│ ├── serviceaccount.yaml
│ └── servicemonitor.yaml
│ └── values.yaml
├── cmd
└── main.go
├── config
├── crd
│ ├── bases
│ │ ├── apps.projectsveltos.io_cleaners.yaml
│ │ └── apps.projectsveltos.io_reports.yaml
│ ├── kustomization.yaml
│ ├── kustomizeconfig.yaml
│ └── patches
│ │ ├── cainjection_in_pruners.yaml
│ │ ├── cainjection_in_reports.yaml
│ │ ├── webhook_in_pruners.yaml
│ │ └── webhook_in_reports.yaml
├── default
│ ├── kustomization.yaml
│ ├── manager_auth_proxy_patch.yaml
│ ├── manager_image_patch.yaml
│ └── manager_pull_policy.yaml
├── manager
│ ├── kustomization.yaml
│ └── manager.yaml
├── prometheus
│ ├── kustomization.yaml
│ └── monitor.yaml
├── rbac
│ ├── kustomization.yaml
│ ├── pruner_editor_role.yaml
│ ├── pruner_viewer_role.yaml
│ ├── report_editor_role.yaml
│ ├── report_viewer_role.yaml
│ ├── role.yaml
│ ├── role_binding.yaml
│ └── service_account.yaml
└── samples
│ ├── apps_v1alpha1_pruner.yaml
│ ├── apps_v1alpha1_report.yaml
│ └── kustomization.yaml
├── docs
├── assets
│ ├── logo.png
│ ├── sveltos-multi-cluster.gif
│ ├── sveltos_and_cleaner.gif
│ └── sveltos_roomba.gif
├── getting_started
│ ├── examples
│ │ ├── unhealthy_resources
│ │ │ ├── deployment.md
│ │ │ ├── more_examples.md
│ │ │ ├── outdated_secret_data.md
│ │ │ └── pod_expired_certs.md
│ │ └── unused_resources
│ │ │ ├── configmap.md
│ │ │ ├── jobs.md
│ │ │ ├── more_examples.md
│ │ │ ├── persistent_volume.md
│ │ │ ├── persistent_volume_claims.md
│ │ │ └── secret.md
│ ├── features
│ │ ├── automated_operations
│ │ │ └── scale_up_down_resources.md
│ │ ├── dryrun
│ │ │ └── dryrun.md
│ │ ├── label_filters
│ │ │ └── label_filters.md
│ │ ├── resourceselector
│ │ │ └── resourceselector.md
│ │ ├── schedule
│ │ │ └── schedule.md
│ │ ├── store_resources
│ │ │ └── store_resource_yaml.md
│ │ └── update_resources
│ │ │ └── update_resources.md
│ └── install
│ │ ├── install.md
│ │ ├── install_on_multiple_cluster.md
│ │ └── telemetry.md
├── index.md
├── javascripts
│ └── extra.js
├── notifications
│ └── notifications.md
├── reports
│ └── k8s-cleaner_reports.md
├── resources
│ └── blogs.md
└── stylesheets
│ └── extra.css
├── examples-automated-operations
└── scheduled-scaling
│ ├── pause.yaml
│ └── resume.yaml
├── examples-unhealthy-resources
├── deployments
│ ├── scale-down-high-restart-deployment.yaml
│ └── too-many-restarts.yaml
├── object-references
│ ├── deployment-referencing-non-existent-resources.yaml
│ └── ingress-referencing-non-existent-service.yaml
├── pod-with-evicted-state
│ └── pod-with-evicted-state.yml
├── pod-with-expired-certificates
│ └── pods-with-expired-certificates.yaml
├── pod-with-outdated-secrets
│ └── pods-with-outdated-secret-data.yaml
└── pod-with-terminating-state
│ └── pod-with-terminating-state.yaml
├── examples-unused-resources
├── clusterroles
│ └── unused_clusterroles.yaml
├── configmaps
│ └── orphaned_configmaps.yaml
├── deployments
│ ├── deployment_with_no_autoscaler.yaml
│ ├── deployment_with_replica_zero.yaml
│ └── orphaned_deployment.yaml
├── horizontal-pod-autoscalers
│ └── unused-hpas.yaml
├── ingresses
│ └── unused_ingresses.yaml
├── jobs
│ ├── completed_jobs.yaml
│ └── long-running-pods.yaml
├── kustomize_configmap_generator
│ └── left_over_configmap.yaml
├── persistent-volume-claims
│ └── unused_persistent-volume-claims.yaml
├── persistent-volumes
│ └── unbound_persistent-volumes.yaml
├── pod-disruption-budgets
│ └── unused_pod-disruption-budgets.yaml
├── pods
│ └── completed_pods.yaml
├── roles
│ └── unused_roles.yaml
├── secrets
│ └── orphaned_secrets.yaml
├── service-accounts
│ └── unused_service-accounts.yaml
├── stateful-sets
│ ├── statefulset_with_no_autoscaler.yaml
│ └── statefulset_with_no_replicas.yaml
└── time_based_delete
│ ├── delete_resource_based_on_expire_date.yaml
│ ├── delete_resource_based_on_ttl_annotation.yaml
│ └── delete_resources_older_than_24hours.yaml
├── go.mod
├── go.sum
├── hack
├── boilerplate.go.txt
└── tools
│ ├── get-golangci-lint.sh
│ ├── get-govulncheck.sh
│ ├── go.mod
│ ├── go.sum
│ ├── go_install.sh
│ └── tools.go
├── internal
├── controller
│ ├── cleaner_controller.go
│ ├── cleaner_controller_test.go
│ ├── executor
│ │ ├── client.go
│ │ ├── client_test.go
│ │ ├── executor_suite_test.go
│ │ ├── export_test.go
│ │ ├── notification_test.go
│ │ ├── notifications.go
│ │ ├── store.go
│ │ ├── validate_aggregatedselection
│ │ │ ├── README.md
│ │ │ ├── deployment_with_autoscaler
│ │ │ │ ├── cleaner.yaml
│ │ │ │ ├── matching.yaml
│ │ │ │ └── resources.yaml
│ │ │ ├── left_over_configmap_by_ kustomize_configmapgenerator
│ │ │ │ ├── cleaner.yaml
│ │ │ │ ├── matching.yaml
│ │ │ │ └── resources.yaml
│ │ │ ├── orphaned_configmaps
│ │ │ │ ├── cleaner.yaml
│ │ │ │ ├── matching.yaml
│ │ │ │ └── resources.yaml
│ │ │ ├── orphaned_deployments
│ │ │ │ ├── cleaner.yaml
│ │ │ │ ├── matching.yaml
│ │ │ │ └── resources.yaml
│ │ │ ├── orphaned_persistent-volume-claims
│ │ │ │ ├── cleaner.yaml
│ │ │ │ ├── matching.yaml
│ │ │ │ └── resources.yaml
│ │ │ ├── orphaned_secrets
│ │ │ │ ├── cleaner.yaml
│ │ │ │ ├── matching.yaml
│ │ │ │ └── resources.yaml
│ │ │ ├── unused_clusterrole.yaml
│ │ │ │ ├── cleaner.yaml
│ │ │ │ ├── matching.yaml
│ │ │ │ └── resources.yaml
│ │ │ ├── unused_horizontal-pod-autoscalers
│ │ │ │ ├── cleaner.yaml
│ │ │ │ ├── matching.yaml
│ │ │ │ └── resources.yaml
│ │ │ ├── unused_ingresses
│ │ │ │ ├── cleaner.yaml
│ │ │ │ ├── matching.yaml
│ │ │ │ └── resources.yaml
│ │ │ ├── unused_poddisruptionbudget
│ │ │ │ ├── cleaner.yaml
│ │ │ │ ├── matching.yaml
│ │ │ │ └── resources.yaml
│ │ │ ├── unused_roles
│ │ │ │ ├── cleaner.yaml
│ │ │ │ ├── matching.yaml
│ │ │ │ └── resources.yaml
│ │ │ └── unused_service-accounts
│ │ │ │ ├── cleaner.yaml
│ │ │ │ ├── matching.yaml
│ │ │ │ └── resources.yaml
│ │ ├── validate_resourceselector
│ │ │ ├── README.md
│ │ │ ├── completed_jobs
│ │ │ │ ├── cleaner.yaml
│ │ │ │ ├── matching.yaml
│ │ │ │ └── non-matching.yaml
│ │ │ ├── deleted_pods
│ │ │ │ ├── cleaner.yaml
│ │ │ │ ├── matching.yaml
│ │ │ │ └── non-matching.yaml
│ │ │ ├── deployment_with_zero_replicas
│ │ │ │ ├── cleaner.yaml
│ │ │ │ ├── matching.yaml
│ │ │ │ └── non-matching.yaml
│ │ │ ├── https_service
│ │ │ │ ├── cleaner.yaml
│ │ │ │ ├── matching.yaml
│ │ │ │ └── non-matching.yaml
│ │ │ ├── peristent-volumes
│ │ │ │ ├── cleaner.yaml
│ │ │ │ ├── matching.yaml
│ │ │ │ └── non-matching.yaml
│ │ │ ├── statefulset_with_zero_replicas
│ │ │ │ ├── cleaner.yaml
│ │ │ │ ├── matching.yaml
│ │ │ │ └── non-matching.yaml
│ │ │ └── time_based_delete
│ │ │ │ ├── cleaner.yaml
│ │ │ │ └── matching.yaml
│ │ ├── validate_test.go
│ │ ├── validate_transform
│ │ │ ├── README.md
│ │ │ └── service_selector
│ │ │ │ ├── cleaner.yaml
│ │ │ │ ├── matching.yaml
│ │ │ │ └── updated.yaml
│ │ ├── worker.go
│ │ └── worker_test.go
│ ├── export_test.go
│ └── suite_test.go
└── telemetry
│ └── report.go
├── manifest
└── manifest.yaml
├── mkdocs.yml
├── pkg
└── scope
│ └── pruner.go
├── renovate.json
└── test
├── fv
├── aggregated_test.go
├── delete_test.go
├── fv_suite_test.go
├── namespace_selector_test.go
└── transform_test.go
└── kind-cluster.yaml
/.all-contributorsrc:
--------------------------------------------------------------------------------
1 | {
2 | "files": [
3 | "README.md"
4 | ],
5 | "imageSize": 100,
6 | "commit": false,
7 | "commitType": "docs",
8 | "commitConvention": "angular",
9 | "contributors": [
10 | {
11 | "login": "gianlucam76",
12 | "name": "Gianluca Mardente",
13 | "avatar_url": "https://avatars.githubusercontent.com/u/52940363?v=4",
14 | "profile": "https://projectsveltos.github.io/sveltos/",
15 | "contributions": [
16 | "code"
17 | ]
18 | },
19 | {
20 | "login": "oliverbaehler",
21 | "name": "Oliver Bähler",
22 | "avatar_url": "https://avatars.githubusercontent.com/u/26610571?v=4",
23 | "profile": "https://keybase.io/oliverbaehler",
24 | "contributions": [
25 | "code"
26 | ]
27 | },
28 | {
29 | "login": "egrosdou01",
30 | "name": "Eleni Grosdouli",
31 | "avatar_url": "https://avatars.githubusercontent.com/u/147995681?v=4",
32 | "profile": "https://github.com/egrosdou01",
33 | "contributions": [
34 | "doc"
35 | ]
36 | },
37 | {
38 | "login": "colinjlacy",
39 | "name": "Colin J Lacy",
40 | "avatar_url": "https://avatars.githubusercontent.com/u/4993605?v=4",
41 | "profile": "https://github.com/colinjlacy",
42 | "contributions": [
43 | "code"
44 | ]
45 | },
46 | {
47 | "login": "aminmr",
48 | "name": "Amin Mohammadian",
49 | "avatar_url": "https://avatars.githubusercontent.com/u/61911987?v=4",
50 | "profile": "https://github.com/aminmr",
51 | "contributions": [
52 | "doc"
53 | ]
54 | }
55 | ],
56 | "contributorsPerLine": 7,
57 | "skipCi": true,
58 | "repoType": "github",
59 | "repoHost": "https://github.com",
60 | "projectName": "k8s-cleaner",
61 | "projectOwner": "gianlucam76"
62 | }
63 |
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | # These are supported funding model platforms
2 |
3 | github: gianlucam76
4 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_template.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug issue
3 | about: Use this template for tracking a bug.
4 | title: "BUG: [bug subject here]"
5 | labels: bug, untriaged
6 | ---
7 |
8 | # Problem Description
9 |
10 | _INSTRUCTIONS: Describe the problem in as much detail as possible, including the actions which caused the problem, and any other information you believe would be helpful in reproducing the problem._
11 |
12 | ``
13 | [insert problem description here]
14 | ``
15 |
16 | # System Information
17 |
18 | _INSTRUCTIONS: Provide the system and application information below._
19 |
20 | CLUSTERAPI VERSION: ` [e.g. v1.2.4 etc.] `
21 | SVELTOS VERSION: ` [e.g. PRODUCT version X.Y.Z] `
22 | KUBERNETES VERSION: ` [e.g. v1.25 etc.] `
23 |
24 | # Logs
25 |
26 | _INSTRUCTIONS: Provide any additional information you think would be helpful below. Large files, logs, etc. can be attached to this issue so long as they meet the GitHub attachment guidelines described here: https://help.github.com/en/github/managing-your-work-on-github/file-attachments-on-issues-and-pull-requests_
27 |
28 | ``
29 | [insert additional information here]
30 | ``
31 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/.github/config/ct.yaml:
--------------------------------------------------------------------------------
1 | remote: origin
2 | target-branch: main
3 | chart-dirs:
4 | - charts
5 | validate-chart-schema: false
6 | validate-maintainers: true
7 | validate-yaml: false
8 |
--------------------------------------------------------------------------------
/.github/config/lintconf.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | rules:
3 | braces:
4 | min-spaces-inside: 0
5 | max-spaces-inside: 0
6 | min-spaces-inside-empty: -1
7 | max-spaces-inside-empty: -1
8 | brackets:
9 | min-spaces-inside: 0
10 | max-spaces-inside: 0
11 | min-spaces-inside-empty: -1
12 | max-spaces-inside-empty: -1
13 | colons:
14 | max-spaces-before: 0
15 | max-spaces-after: 1
16 | commas:
17 | max-spaces-before: 0
18 | min-spaces-after: 1
19 | max-spaces-after: 1
20 | comments:
21 | require-starting-space: true
22 | min-spaces-from-content: 1
23 | document-end: disable
24 | document-start: disable # No --- to start a file
25 | empty-lines:
26 | max: 2
27 | max-start: 0
28 | max-end: 0
29 | hyphens:
30 | max-spaces-after: 1
31 | indentation:
32 | spaces: consistent
33 | indent-sequences: whatever # - list indentation will handle both indentation and without
34 | check-multi-line-strings: false
35 | key-duplicates: enable
36 | line-length: disable # Lines can be any length
37 | new-line-at-end-of-file: enable
38 | new-lines:
39 | type: unix
40 | trailing-spaces: enable
41 | truthy:
42 | level: warning
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: ci
2 | on:
3 | push:
4 | branches:
5 | - main
6 | permissions:
7 | contents: write
8 | jobs:
9 | deploy:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
13 | - uses: actions/setup-python@v5
14 | with:
15 | python-version: 3.x
16 | - run: pip install mkdocs-material
17 | - run: mkdocs gh-deploy --force
--------------------------------------------------------------------------------
/.github/workflows/helm-publish.yml:
--------------------------------------------------------------------------------
1 | name: Publish Helm
2 | permissions: read-all
3 | on:
4 | push:
5 | tags:
6 | - 'v*'
7 |
8 | concurrency:
9 | group: ${{ github.workflow }}-${{ github.ref }}
10 | cancel-in-progress: true
11 |
12 | jobs:
13 | publish-helm-oci:
14 | runs-on: ubuntu-24.04
15 | permissions:
16 | contents: write
17 | id-token: write
18 | packages: write
19 | outputs:
20 | chart-digest: ${{ steps.helm_publish.outputs.digest }}
21 | steps:
22 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
23 | - uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
24 | - name: "Extract Version"
25 | id: extract_version
26 | run: |
27 | GIT_TAG=${GITHUB_REF##*/}
28 | VERSION=${GIT_TAG##*v}
29 | echo "version=$(echo $VERSION)" >> $GITHUB_OUTPUT
30 | - name: Helm | Publish
31 | id: helm_publish
32 | uses: peak-scale/github-actions/helm-oci-chart@a441cca016861c546ab7e065277e40ce41a3eb84 # v0.2.0
33 | with:
34 | registry: ghcr.io
35 | repository: ${{ github.repository_owner }}/charts
36 | name: "k8s-cleaner"
37 | version: ${{ steps.extract_version.outputs.version }}
38 | # Uncomment this once you release docker image with workflow
39 | #app-version: ${{ steps.extract_version.outputs.version }}
40 | registry-username: ${{ github.actor }}
41 | registry-password: ${{ secrets.GITHUB_TOKEN }}
42 | update-dependencies: 'true' # Defaults to false
43 | sign-image: 'true'
44 | signature-repository: ghcr.io/${{ github.repository_owner }}/signatures
45 | helm-provenance:
46 | needs: publish-helm-oci
47 | permissions:
48 | id-token: write # To sign the provenance.
49 | packages: write # To upload assets to release.
50 | actions: read # To read the workflow path.
51 | uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.1.0
52 | with:
53 | image: ghcr.io/${{ github.repository_owner }}/charts/k8s-cleaner
54 | digest: "${{ needs.publish-helm-oci.outputs.chart-digest }}"
55 | registry-username: ${{ github.actor }}
56 | secrets:
57 | registry-password: ${{ secrets.GITHUB_TOKEN }}
--------------------------------------------------------------------------------
/.github/workflows/helm-test.yml:
--------------------------------------------------------------------------------
1 | name: Test Chart
2 | permissions: {}
3 | on:
4 | pull_request:
5 | branches: [ "*" ]
6 | jobs:
7 | lint:
8 | runs-on: ubuntu-24.04
9 | steps:
10 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
11 | with:
12 | fetch-depth: 0
13 | - uses: azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4
14 | - name: Setup Chart Linting
15 | id: lint
16 | uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b
17 | - name: Run chart-testing (list-changed)
18 | id: list-changed
19 | run: |
20 | changed=$(ct list-changed --config ./.github/config/ct.yaml)
21 | if [[ -n "$changed" ]]; then
22 | echo "::set-output name=changed::true"
23 | fi
24 | - name: Run chart-testing (lint)
25 | run: make helm-lint
26 | - name: Run docs-testing (helm-docs)
27 | id: helm-docs
28 | run: |
29 | make helm-docs
30 | if [[ $(git diff --stat) != '' ]]; then
31 | echo -e '\033[0;31mDocumentation outdated! (Run make helm-docs locally and commit)\033[0m ❌'
32 | git diff --color
33 | exit 1
34 | else
35 | echo -e '\033[0;32mDocumentation up to date\033[0m ✔'
36 | fi
37 | - name: Run chart-testing (install)
38 | run: make helm-test
39 | if: steps.list-changed.outputs.changed == 'true'
--------------------------------------------------------------------------------
/.github/workflows/main.yaml:
--------------------------------------------------------------------------------
1 | name: main
2 | on:
3 | workflow_dispatch:
4 | push:
5 | branches:
6 | - 'main'
7 | - 'dev'
8 | pull_request:
9 | types: [opened, edited, synchronize, reopened]
10 |
11 | permissions: {}
12 |
13 | jobs:
14 | build-static-test:
15 | runs-on: ubuntu-latest
16 | steps:
17 | - name: checkout
18 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
19 | - name: Set up Go
20 | uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
21 | with:
22 | go-version: 1.24.3
23 | - name: Build
24 | run: make build
25 | - name: FMT
26 | run: make fmt
27 | - name: VET
28 | run: make vet
29 | - name: LINT
30 | run: make lint
31 | env:
32 | LINT: true
33 | build-ut:
34 | runs-on: ubuntu-latest
35 | steps:
36 | - name: checkout
37 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
38 | - name: Set up Go
39 | uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
40 | with:
41 | go-version: 1.24.3
42 | - name: ut
43 | run: make test
44 | env:
45 | UT: true
46 | FV:
47 | runs-on: ubuntu-latest
48 | steps:
49 | - name: checkout
50 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
51 | - name: Set up Go
52 | uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
53 | with:
54 | go-version: 1.24.3
55 | - name: fv
56 | run: make create-cluster fv
57 | env:
58 | FV: true
--------------------------------------------------------------------------------
/.github/workflows/osv-scanner-pr.yml:
--------------------------------------------------------------------------------
1 | name: OSV-Scanner PR Scan
2 |
3 | on:
4 | pull_request:
5 | branches: [main]
6 | merge_group:
7 | branches: [main]
8 |
9 | permissions:
10 | # Required to upload SARIF file to CodeQL. See: https://github.com/github/codeql-action/issues/2117
11 | actions: read
12 | # Require writing security events to upload SARIF file to security tab
13 | security-events: write
14 | # Only need to read contents
15 | contents: read
16 |
17 | jobs:
18 | scan-pr:
19 | uses: "google/osv-scanner-action/.github/workflows/osv-scanner-reusable.yml@v2.0.2"
20 |
--------------------------------------------------------------------------------
/.github/workflows/scorecard.yml:
--------------------------------------------------------------------------------
1 | name: Scorecard supply-chain security
2 | permissions: {}
3 |
4 | on:
5 | schedule:
6 | - cron: '0 0 * * 5'
7 | push:
8 | branches:
9 | - main
10 |
11 | jobs:
12 | analysis:
13 | name: Scorecard analysis
14 | runs-on: ubuntu-latest
15 | permissions:
16 | security-events: write
17 | id-token: write
18 |
19 | steps:
20 | - name: "Checkout code"
21 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
22 | with:
23 | persist-credentials: false
24 |
25 | - name: "Run analysis"
26 | uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2
27 | with:
28 | results_file: results.sarif
29 | results_format: sarif
30 | repo_token: ${{ secrets.SCORECARD_TOKEN }}
31 | publish_results: true
32 |
33 | - name: "Upload artifact"
34 | uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
35 | with:
36 | name: SARIF file
37 | path: results.sarif
38 | retention-days: 5
39 |
40 | - name: "Upload to code-scanning"
41 | uses: github/codeql-action/upload-sarif@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18
42 | with:
43 | sarif_file: results.sarif
44 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Binaries for programs and plugins
2 | *.exe
3 | *.exe~
4 | *.dll
5 | *.so
6 | *.dylib
7 |
8 | # tmp files
9 | *.tmp
10 | tmp/
11 |
12 | # vim files
13 | *.swp
14 |
15 | # Test binary, built with `go test -c`
16 | *.test
17 |
18 | # Output of the go coverage tool, specifically when used with LiteIDE
19 | *.out
20 | out.json
21 |
22 | # Needed tools
23 | hack/tools/bin/
24 |
25 | # Needed by fv
26 | test/fv/workload_kubeconfig
27 | bin/manager
28 |
29 | manager_image_patch.yaml-e
30 | manager_pull_policy.yaml-e
31 | manager_auth_proxy_patch.yaml-e
32 |
33 | version.txt
34 |
--------------------------------------------------------------------------------
/ADOPTERS.md:
--------------------------------------------------------------------------------
1 | # Adopters
2 |
3 | This is a list of companies that have adopted k8s-cleaner.
4 | Feel free to open a Pull-Request to get yours listed.
5 |
6 | ### Adopter list (alphabetically)
7 |
8 | | Type | Name | Since | Website | Use-Case |
9 | |:-|:-|:-|:-|:-|
--------------------------------------------------------------------------------
/DEVELOPMENT.md:
--------------------------------------------------------------------------------
1 | # Development
2 |
3 | Our Makefile helps you with the development of new changes or fixes. [You may have a look at it](./Makefile), since not all targets are documented.
4 |
5 | ## Building
6 |
7 | You can build the docker image locally
8 |
9 | ```bash
10 | make docker-build
11 | ```
12 |
13 | This will push the build to your local docker images.
14 |
15 | ## Lint
16 |
17 | Execute lint testing:
18 |
19 | ```bash
20 | make lint
21 | ```
22 |
23 | ## Test
24 |
25 | Execute unit testing:
26 |
27 | ```bash
28 | make test
29 | ```
30 |
31 | ## E2E
32 |
33 | Execute functional verification:
34 |
35 | ```bash
36 | make kind-test
37 | ```
38 |
39 | This will create a *KinD* cluster locally, build k8s-cleaner docker-image, load it and run some tests.
40 |
41 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # Build the manager binary
2 | FROM golang:1.24.3 AS builder
3 |
4 | ARG BUILDOS
5 | ARG TARGETARCH
6 |
7 | WORKDIR /workspace
8 | # Copy the Go Modules manifests
9 | COPY go.mod go.mod
10 | COPY go.sum go.sum
11 | # cache deps before building and copying source so that we don't need to re-download as much
12 | # and so that source changes don't invalidate our downloaded layer
13 | RUN go mod download
14 |
15 | # Copy the go source
16 | COPY cmd/main.go cmd/main.go
17 | COPY api/ api/
18 | COPY internal/controller/ internal/controller/
19 | COPY internal/telemetry/ internal/telemetry/
20 | COPY pkg/ pkg/
21 |
22 | RUN CGO_ENABLED=0 GOOS=$BUILDOS GOARCH=$TARGETARCH go build -a -o manager cmd/main.go
23 |
24 | # Use distroless as minimal base image to package the manager binary
25 | # Refer to https://github.com/GoogleContainerTools/distroless for more details
26 | FROM gcr.io/distroless/static:nonroot
27 | WORKDIR /
28 | COPY --from=builder /workspace/manager .
29 | USER 65532:65532
30 |
31 | ENTRYPOINT ["/manager"]
32 |
--------------------------------------------------------------------------------
/PROJECT:
--------------------------------------------------------------------------------
1 | # Code generated by tool. DO NOT EDIT.
2 | # This file is used to track the info used to scaffold your project
3 | # and allow the plugins properly work.
4 | # More info: https://book.kubebuilder.io/reference/project-config.html
5 | domain: projectsveltos.io
6 | layout:
7 | - go.kubebuilder.io/v4
8 | projectName: k8s-cleaner
9 | repo: gianlucam76/k8s-cleaner
10 | resources:
11 | - api:
12 | crdVersion: v1
13 | namespaced: true
14 | controller: true
15 | domain: projectsveltos.io
16 | group: apps
17 | kind: Cleaner
18 | path: gianlucam76/k8s-cleaner/api/v1alpha1
19 | version: v1alpha1
20 | - api:
21 | crdVersion: v1
22 | namespaced: true
23 | domain: projectsveltos.io
24 | group: apps
25 | kind: Report
26 | path: gianlucam76/k8s-cleaner/api/v1alpha1
27 | version: v1alpha1
28 | version: "3"
29 |
--------------------------------------------------------------------------------
/api/v1alpha1/groupversion_info.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2023. projectsveltos.io. All rights reserved.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | // Package v1alpha1 contains API Schema definitions for the apps v1alpha1 API group
18 | // +kubebuilder:object:generate=true
19 | // +groupName=apps.projectsveltos.io
20 | package v1alpha1
21 |
22 | import (
23 | "k8s.io/apimachinery/pkg/runtime/schema"
24 | "sigs.k8s.io/controller-runtime/pkg/scheme"
25 | )
26 |
27 | var (
28 | // GroupVersion is group version used to register these objects
29 | GroupVersion = schema.GroupVersion{Group: "apps.projectsveltos.io", Version: "v1alpha1"}
30 |
31 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme
32 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
33 |
34 | // AddToScheme adds the types in this group-version to the given scheme.
35 | AddToScheme = SchemeBuilder.AddToScheme
36 | )
37 |
--------------------------------------------------------------------------------
/api/v1alpha1/report_types.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2023. projectsveltos.io. All rights reserved.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package v1alpha1
18 |
19 | import (
20 | corev1 "k8s.io/api/core/v1"
21 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
22 | )
23 |
24 | type ResourceInfo struct {
25 | // Resource identify a Kubernetes resource
26 | Resource corev1.ObjectReference `json:"resource,omitempty"`
27 |
28 | // FullResource contains full resources before
29 | // before Cleaner took an action on it
30 | // +optional
31 | FullResource []byte `json:"fullResource,omitempty"`
32 |
33 | // Message is an optional field.
34 | // +optional
35 | Message string `json:"message,omitempty"`
36 | }
37 |
38 | // ReportSpec defines the desired state of Report
39 | type ReportSpec struct {
40 | // Resources identify a set of Kubernetes resource
41 | ResourceInfo []ResourceInfo `json:"resourceInfo"`
42 |
43 | // Action indicates the action to take on selected object.
44 | Action Action `json:"action"`
45 | }
46 |
47 | //+kubebuilder:object:root=true
48 | //+kubebuilder:resource:path=reports,scope=Cluster
49 |
50 | // Report is the Schema for the reports API
51 | type Report struct {
52 | metav1.TypeMeta `json:",inline"`
53 | metav1.ObjectMeta `json:"metadata,omitempty"`
54 |
55 | Spec ReportSpec `json:"spec,omitempty"`
56 | }
57 |
58 | //+kubebuilder:object:root=true
59 |
60 | // ReportList contains a list of Report
61 | type ReportList struct {
62 | metav1.TypeMeta `json:",inline"`
63 | metav1.ListMeta `json:"metadata,omitempty"`
64 | Items []Report `json:"items"`
65 | }
66 |
67 | func init() {
68 | SchemeBuilder.Register(&Report{}, &ReportList{})
69 | }
70 |
--------------------------------------------------------------------------------
/assets/discord_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gianlucam76/k8s-cleaner/20805d30d24ad19fae97b933a41e46579f7a54c5/assets/discord_logo.png
--------------------------------------------------------------------------------
/assets/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gianlucam76/k8s-cleaner/20805d30d24ad19fae97b933a41e46579f7a54c5/assets/logo.png
--------------------------------------------------------------------------------
/assets/slack_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gianlucam76/k8s-cleaner/20805d30d24ad19fae97b933a41e46579f7a54c5/assets/slack_logo.png
--------------------------------------------------------------------------------
/assets/smtp_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gianlucam76/k8s-cleaner/20805d30d24ad19fae97b933a41e46579f7a54c5/assets/smtp_logo.png
--------------------------------------------------------------------------------
/assets/teams_logo.svg:
--------------------------------------------------------------------------------
1 |
2 |
23 |
--------------------------------------------------------------------------------
/assets/telegram_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gianlucam76/k8s-cleaner/20805d30d24ad19fae97b933a41e46579f7a54c5/assets/telegram_logo.png
--------------------------------------------------------------------------------
/assets/webex_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gianlucam76/k8s-cleaner/20805d30d24ad19fae97b933a41e46579f7a54c5/assets/webex_logo.png
--------------------------------------------------------------------------------
/charts/k8s-cleaner/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *.orig
18 | *~
19 | # Various IDEs
20 | .project
21 | .idea/
22 | *.tmproj
23 | .vscode/
24 |
--------------------------------------------------------------------------------
/charts/k8s-cleaner/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: k8s-cleaner
3 | description: "Cleaner identifies, removes, or updates stale/orphaned or unhealthy resources to maintain a clean and efficient Kubernetes cluster"
4 | type: application
5 | # Note: The version is overwritten by the release workflow.
6 | version: 1.5.0
7 | # Note: The version is overwritten by the release workflow.
8 | appVersion: 0.14.1
9 | home: https://github.com/gianlucam76/k8s-cleaner
10 | icon: https://raw.githubusercontent.com/gianlucam76/k8s-cleaner/main/assets/logo.png
11 | keywords:
12 | - kubernetes
13 | - operator
14 | - garbage-collection
15 | - gc
16 | - cleaner
17 | maintainers:
18 | - name: gianlucam76
19 | - name: oliverbaehler
20 |
--------------------------------------------------------------------------------
/charts/k8s-cleaner/README.md.gotmpl:
--------------------------------------------------------------------------------
1 | # K8s-cleaner
2 | {{ template "chart.deprecationWarning" . }}
3 |
4 | {{ template "chart.typeBadge" . }}
5 |
6 | {{ template "chart.description" . }}
7 |
8 | The chart is under active development and may contain bugs/unfinished documentation. Any testing/contributions are welcome! :)
9 |
10 | {{ template "chart.homepageLine" . }}
11 |
12 | {{/*
13 | Chart Maintainers
14 | */}}
15 | {{ template "chart.maintainersSection" . }}
16 |
17 | {{/*
18 | Chart Sources
19 | */}}
20 | {{ template "chart.sourcesSection" . }}
21 |
22 | {{/*
23 | Chart Requirements
24 | */}}
25 | {{ template "chart.requirementsSection" . }}
26 |
27 | # Major Changes
28 |
29 | Major Changes to functions are documented with the version affected. **Before upgrading the dependency version, check this section out!**
30 |
31 | | **Change** | **Chart Version** | **Description** | **Commits/PRs** |
32 | | :--------- | :---------------- | :-------------- | :-------------- |
33 | |||||
34 |
35 | {{/*
36 | Chart Values
37 | */}}
38 | {{ template "chart.valuesSection" . }}
39 |
--------------------------------------------------------------------------------
/charts/k8s-cleaner/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/*
2 | Expand the name of the chart.
3 | */}}
4 | {{- define "k8s-cleaner.name" -}}
5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
6 | {{- end }}
7 |
8 | {{/*
9 | Create a default fully qualified app name.
10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
11 | If release name contains chart name it will be used as a full name.
12 | */}}
13 | {{- define "k8s-cleaner.fullname" -}}
14 | {{- if .Values.fullnameOverride }}
15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
16 | {{- else }}
17 | {{- $name := default .Chart.Name .Values.nameOverride }}
18 | {{- if contains $name .Release.Name }}
19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }}
20 | {{- else }}
21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
22 | {{- end }}
23 | {{- end }}
24 | {{- end }}
25 |
26 | {{/*
27 | Create chart name and version as used by the chart label.
28 | */}}
29 | {{- define "k8s-cleaner.chart" -}}
30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
31 | {{- end }}
32 |
33 | {{/*
34 | Common labels
35 | */}}
36 | {{- define "k8s-cleaner.labels" -}}
37 | helm.sh/chart: {{ include "k8s-cleaner.chart" . }}
38 | {{ include "k8s-cleaner.selectorLabels" . }}
39 | {{- if .Chart.AppVersion }}
40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
41 | {{- end }}
42 | app.kubernetes.io/managed-by: {{ .Release.Service }}
43 | {{- end }}
44 |
45 | {{/*
46 | Selector labels
47 | */}}
48 | {{- define "k8s-cleaner.selectorLabels" -}}
49 | app.kubernetes.io/name: {{ include "k8s-cleaner.name" . }}
50 | app.kubernetes.io/instance: {{ .Release.Name }}
51 | {{- end }}
52 |
53 | {{/*
54 | Create the name of the service account to use
55 | */}}
56 | {{- define "k8s-cleaner.serviceAccountName" -}}
57 | {{- if .Values.serviceAccount.create }}
58 | {{- default (include "k8s-cleaner.fullname" .) .Values.serviceAccount.name }}
59 | {{- else }}
60 | {{- default "default" .Values.serviceAccount.name }}
61 | {{- end }}
62 | {{- end }}
63 |
64 |
65 | {{- define "k8s-cleaner.template" -}}
66 | {{- if $.ctx }}
67 | {{- if typeIs "string" $.tpl }}
68 | {{- tpl $.tpl $.ctx | replace "+|" "\n" }}
69 | {{- else }}
70 | {{- tpl ($.tpl | toYaml) $.ctx | replace "+|" "\n" }}
71 | {{- end }}
72 | {{- end }}
73 | {{- end -}}
--------------------------------------------------------------------------------
/charts/k8s-cleaner/templates/crds.tpl:
--------------------------------------------------------------------------------
1 | {{/* CustomResources Lifecycle */}}
2 | {{- if $.Values.crds.install }}
3 | {{ range $path, $_ := .Files.Glob "crd/**" }}
4 | {{- with $ }}
5 | {{- $content := (tpl (.Files.Get $path) .) -}}
6 | {{- $p := (fromYaml $content) -}}
7 |
8 | {{/* Add Common Lables */}}
9 | {{- $_ := set $p.metadata "labels" (mergeOverwrite (default dict (get $p.metadata "labels")) (fromYaml (include "k8s-cleaner.labels" $))) -}}
10 |
11 | {{/* Add Keep annotation to CRDs */}}
12 | {{- if $.Values.crds.keep }}
13 | {{- $_ := set $p.metadata.annotations "helm.sh/resource-policy" "keep" -}}
14 | {{- end }}
15 |
16 | {{- if $p }}
17 | {{- printf "---\n%s" (toYaml $p) | nindent 0 }}
18 | {{- end }}
19 | {{ end }}
20 | {{- end }}
21 | {{- end }}
--------------------------------------------------------------------------------
/charts/k8s-cleaner/templates/extra-manifests.yaml:
--------------------------------------------------------------------------------
1 | {{- range .Values.extraObjects }}
2 | ---
3 | {{- tpl (toYaml . ) $ }}
4 | {{- end }}
5 |
--------------------------------------------------------------------------------
/charts/k8s-cleaner/templates/rbac.yaml:
--------------------------------------------------------------------------------
1 | {{- if $.Values.rbac.create }}
2 | ---
3 | apiVersion: rbac.authorization.k8s.io/v1
4 | kind: ClusterRole
5 | metadata:
6 | labels:
7 | {{- include "k8s-cleaner.labels" . | nindent 4 }}
8 | name: {{ include "k8s-cleaner.fullname" . }}-controller-role
9 | rules:
10 | - apiGroups:
11 | - '*'
12 | resources:
13 | - '*'
14 | verbs:
15 | - '*'
16 | - delete
17 | - get
18 | - list
19 | - watch
20 | - apiGroups:
21 | - apps.projectsveltos.io
22 | resources:
23 | - cleaners
24 | verbs:
25 | - create
26 | - delete
27 | - get
28 | - list
29 | - patch
30 | - update
31 | - watch
32 | - apiGroups:
33 | - apps.projectsveltos.io
34 | resources:
35 | - cleaners/finalizers
36 | verbs:
37 | - update
38 | - apiGroups:
39 | - apps.projectsveltos.io
40 | resources:
41 | - cleaners/status
42 | verbs:
43 | - get
44 | - patch
45 | - update
46 | - apiGroups:
47 | - apps.projectsveltos.io
48 | resources:
49 | - reports
50 | verbs:
51 | - '*'
52 | ---
53 | apiVersion: rbac.authorization.k8s.io/v1
54 | kind: ClusterRole
55 | metadata:
56 | labels:
57 | {{- include "k8s-cleaner.labels" . | nindent 4 }}
58 | name: {{ include "k8s-cleaner.fullname" . }}-metrics-reader
59 | rules:
60 | - nonResourceURLs:
61 | - /metrics
62 | verbs:
63 | - get
64 | ---
65 | apiVersion: rbac.authorization.k8s.io/v1
66 | kind: ClusterRole
67 | metadata:
68 | labels:
69 | {{- include "k8s-cleaner.labels" . | nindent 4 }}
70 | name: {{ include "k8s-cleaner.fullname" . }}-proxy-role
71 | rules:
72 | - apiGroups:
73 | - authentication.k8s.io
74 | resources:
75 | - tokenreviews
76 | verbs:
77 | - create
78 | - apiGroups:
79 | - authorization.k8s.io
80 | resources:
81 | - subjectaccessreviews
82 | verbs:
83 | - create
84 | ---
85 | apiVersion: rbac.authorization.k8s.io/v1
86 | kind: ClusterRoleBinding
87 | metadata:
88 | labels:
89 | {{- include "k8s-cleaner.labels" . | nindent 4 }}
90 | name: {{ include "k8s-cleaner.fullname" . }}-controller-rolebinding
91 | roleRef:
92 | apiGroup: rbac.authorization.k8s.io
93 | kind: ClusterRole
94 | name: {{ include "k8s-cleaner.fullname" . }}-controller-role
95 | subjects:
96 | - kind: ServiceAccount
97 | name: {{ include "k8s-cleaner.serviceAccountName" . }}
98 | namespace: {{ $.Release.Namespace}}
99 | ---
100 | apiVersion: rbac.authorization.k8s.io/v1
101 | kind: ClusterRoleBinding
102 | metadata:
103 | labels:
104 | {{- include "k8s-cleaner.labels" . | nindent 4 }}
105 | name: {{ include "k8s-cleaner.fullname" . }}-proxy-rolebinding
106 | roleRef:
107 | apiGroup: rbac.authorization.k8s.io
108 | kind: ClusterRole
109 | name: {{ include "k8s-cleaner.fullname" . }}-proxy-role
110 | subjects:
111 | - kind: ServiceAccount
112 | name: {{ include "k8s-cleaner.serviceAccountName" . }}
113 | namespace: {{ $.Release.Namespace }}
114 | {{- end }}
--------------------------------------------------------------------------------
/charts/k8s-cleaner/templates/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: {{ include "k8s-cleaner.fullname" . }}-metrics
5 | labels:
6 | {{- include "k8s-cleaner.labels" . | nindent 4 }}
7 | spec:
8 | type: ClusterIP
9 | ports:
10 | - port: 8081
11 | targetPort: metrics
12 | protocol: TCP
13 | name: metrics
14 | selector:
15 | {{- include "k8s-cleaner.selectorLabels" . | nindent 4 }}
16 |
--------------------------------------------------------------------------------
/charts/k8s-cleaner/templates/serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.serviceAccount.create -}}
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: {{ include "k8s-cleaner.serviceAccountName" . }}
6 | labels:
7 | {{- include "k8s-cleaner.labels" . | nindent 4 }}
8 | {{- with .Values.serviceAccount.annotations }}
9 | annotations:
10 | {{- toYaml . | nindent 4 }}
11 | {{- end }}
12 | automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
13 | {{- end }}
14 |
--------------------------------------------------------------------------------
/charts/k8s-cleaner/templates/servicemonitor.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.serviceMonitor.enabled }}
2 | apiVersion: monitoring.coreos.com/v1
3 | kind: ServiceMonitor
4 | metadata:
5 | name: {{ include "k8s-cleaner.fullname" . }}
6 | namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
7 | labels:
8 | {{- include "k8s-cleaner.labels" . | nindent 4 }}
9 | {{- with .Values.serviceMonitor.labels }}
10 | {{- toYaml . | nindent 4 }}
11 | {{- end }}
12 | {{- with .Values.serviceMonitor.annotations }}
13 | annotations:
14 | {{- toYaml . | nindent 4 }}
15 | {{- end }}
16 | spec:
17 | endpoints:
18 | {{- with .Values.serviceMonitor.endpoint }}
19 | - interval: {{ .interval }}
20 | port: metrics
21 | path: /metrics
22 | scheme: https
23 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
24 | {{- with .tlsConfig }}
25 | tlsConfig:
26 | {{- toYaml . | nindent 6 }}
27 | {{- end }}
28 | {{- with .scrapeTimeout }}
29 | scrapeTimeout: {{ . }}
30 | {{- end }}
31 | {{- with .metricRelabelings }}
32 | metricRelabelings: {{- toYaml . | nindent 6 }}
33 | {{- end }}
34 | {{- with .relabelings }}
35 | relabelings: {{- toYaml . | nindent 6 }}
36 | {{- end }}
37 | {{- end }}
38 | {{- with .Values.serviceMonitor.jobLabel }}
39 | jobLabel: {{- toYaml . | nindent 4 }}
40 | {{- end }}
41 | {{- with .Values.serviceMonitor.targetLabels }}
42 | targetLabels: {{- toYaml . | nindent 4 }}
43 | {{- end }}
44 | selector:
45 | matchLabels:
46 | {{- if .Values.serviceMonitor.matchLabels }}
47 | {{- toYaml .Values.serviceMonitor.matchLabels | nindent 6 }}
48 | {{- else }}
49 | {{- include "k8s-cleaner.selectorLabels" . | nindent 6 }}
50 | {{- end }}
51 | namespaceSelector:
52 | matchNames:
53 | - {{ .Release.Namespace }}
54 | {{- end }}
--------------------------------------------------------------------------------
/config/crd/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # This kustomization.yaml is not intended to be run by itself,
2 | # since it depends on service name and namespace that are out of this kustomize package.
3 | # It should be run by config/default
4 | resources:
5 | - bases/apps.projectsveltos.io_cleaners.yaml
6 | - bases/apps.projectsveltos.io_reports.yaml
7 | #+kubebuilder:scaffold:crdkustomizeresource
8 |
9 | patches:
10 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
11 | # patches here are for enabling the conversion webhook for each CRD
12 | #- path: patches/webhook_in_cleaners.yaml
13 | #- path: patches/webhook_in_reports.yaml
14 | #+kubebuilder:scaffold:crdkustomizewebhookpatch
15 |
16 | # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix.
17 | # patches here are for enabling the CA injection for each CRD
18 | #- path: patches/cainjection_in_cleaners.yaml
19 | #- path: patches/cainjection_in_reports.yaml
20 | #+kubebuilder:scaffold:crdkustomizecainjectionpatch
21 |
22 | # the following config is for teaching kustomize how to do kustomization for CRDs.
23 | configurations:
24 | - kustomizeconfig.yaml
25 |
--------------------------------------------------------------------------------
/config/crd/kustomizeconfig.yaml:
--------------------------------------------------------------------------------
1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD
2 | nameReference:
3 | - kind: Service
4 | version: v1
5 | fieldSpecs:
6 | - kind: CustomResourceDefinition
7 | version: v1
8 | group: apiextensions.k8s.io
9 | path: spec/conversion/webhook/clientConfig/service/name
10 |
11 | namespace:
12 | - kind: CustomResourceDefinition
13 | version: v1
14 | group: apiextensions.k8s.io
15 | path: spec/conversion/webhook/clientConfig/service/namespace
16 | create: false
17 |
18 | varReference:
19 | - path: metadata/annotations
20 |
--------------------------------------------------------------------------------
/config/crd/patches/cainjection_in_pruners.yaml:
--------------------------------------------------------------------------------
1 | # The following patch adds a directive for certmanager to inject CA into the CRD
2 | apiVersion: apiextensions.k8s.io/v1
3 | kind: CustomResourceDefinition
4 | metadata:
5 | annotations:
6 | cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME
7 | name: cleaners.apps.projectsveltos.io
8 |
--------------------------------------------------------------------------------
/config/crd/patches/cainjection_in_reports.yaml:
--------------------------------------------------------------------------------
1 | # The following patch adds a directive for certmanager to inject CA into the CRD
2 | apiVersion: apiextensions.k8s.io/v1
3 | kind: CustomResourceDefinition
4 | metadata:
5 | annotations:
6 | cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME
7 | name: reports.apps.projectsveltos.io
8 |
--------------------------------------------------------------------------------
/config/crd/patches/webhook_in_pruners.yaml:
--------------------------------------------------------------------------------
1 | # The following patch enables a conversion webhook for the CRD
2 | apiVersion: apiextensions.k8s.io/v1
3 | kind: CustomResourceDefinition
4 | metadata:
5 | name: cleaners.apps.projectsveltos.io
6 | spec:
7 | conversion:
8 | strategy: Webhook
9 | webhook:
10 | clientConfig:
11 | service:
12 | namespace: projectsveltos
13 | name: webhook-service
14 | path: /convert
15 | conversionReviewVersions:
16 | - v1
17 |
--------------------------------------------------------------------------------
/config/crd/patches/webhook_in_reports.yaml:
--------------------------------------------------------------------------------
1 | # The following patch enables a conversion webhook for the CRD
2 | apiVersion: apiextensions.k8s.io/v1
3 | kind: CustomResourceDefinition
4 | metadata:
5 | name: reports.apps.projectsveltos.io
6 | spec:
7 | conversion:
8 | strategy: Webhook
9 | webhook:
10 | clientConfig:
11 | service:
12 | namespace: system
13 | name: webhook-service
14 | path: /convert
15 | conversionReviewVersions:
16 | - v1
17 |
--------------------------------------------------------------------------------
/config/default/manager_auth_proxy_patch.yaml:
--------------------------------------------------------------------------------
1 | # This patch inject a sidecar container which is a HTTP proxy for the
2 | # controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
3 | apiVersion: apps/v1
4 | kind: Deployment
5 | metadata:
6 | name: controller
7 | namespace: projectsveltos
8 | spec:
9 | template:
10 | spec:
11 | containers:
12 | - name: controller
13 | args:
14 | - "--diagnostics-address=:8443"
15 | - "--version=main"
16 |
17 |
--------------------------------------------------------------------------------
/config/default/manager_image_patch.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: controller
5 | namespace: projectsveltos
6 | spec:
7 | template:
8 | spec:
9 | containers:
10 | # Change the value of image field below to your controller image URL
11 | - image: docker.io/projectsveltos/k8s-cleaner:main
12 | name: controller
13 |
--------------------------------------------------------------------------------
/config/default/manager_pull_policy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: controller
5 | namespace: projectsveltos
6 | spec:
7 | template:
8 | spec:
9 | containers:
10 | - name: controller
11 | imagePullPolicy:
12 |
--------------------------------------------------------------------------------
/config/manager/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - manager.yaml
3 | apiVersion: kustomize.config.k8s.io/v1beta1
4 | kind: Kustomization
5 | images:
6 | - name: controller
7 | newName: controller
8 | newTag: latest
9 |
--------------------------------------------------------------------------------
/config/manager/manager.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: projectsveltos
5 | ---
6 | apiVersion: apps/v1
7 | kind: Deployment
8 | metadata:
9 | name: controller
10 | namespace: projectsveltos
11 | labels:
12 | control-plane: k8s-cleaner
13 | app.kubernetes.io/name: deployment
14 | app.kubernetes.io/instance: controller-manager
15 | app.kubernetes.io/component: manager
16 | app.kubernetes.io/created-by: k8s-cleaner
17 | app.kubernetes.io/part-of: k8s-cleaner
18 | app.kubernetes.io/managed-by: kustomize
19 | spec:
20 | selector:
21 | matchLabels:
22 | control-plane: k8s-cleaner
23 | replicas: 1
24 | template:
25 | metadata:
26 | annotations:
27 | kubectl.kubernetes.io/default-container: controller
28 | labels:
29 | control-plane: k8s-cleaner
30 | spec:
31 | # TODO(user): Uncomment the following code to configure the nodeAffinity expression
32 | # according to the platforms which are supported by your solution.
33 | # It is considered best practice to support multiple architectures. You can
34 | # build your manager image using the makefile target docker-buildx.
35 | # affinity:
36 | # nodeAffinity:
37 | # requiredDuringSchedulingIgnoredDuringExecution:
38 | # nodeSelectorTerms:
39 | # - matchExpressions:
40 | # - key: kubernetes.io/arch
41 | # operator: In
42 | # values:
43 | # - amd64
44 | # - arm64
45 | # - ppc64le
46 | # - s390x
47 | # - key: kubernetes.io/os
48 | # operator: In
49 | # values:
50 | # - linux
51 | securityContext:
52 | runAsNonRoot: true
53 | # TODO(user): For common cases that do not require escalating privileges
54 | # it is recommended to ensure that all your Pods/Containers are restrictive.
55 | # More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted
56 | # Please uncomment the following code if your project does NOT have to work on old Kubernetes
57 | # versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ).
58 | # seccompProfile:
59 | # type: RuntimeDefault
60 | containers:
61 | - command:
62 | - /manager
63 | args:
64 | image: controller:latest
65 | name: controller
66 | ports:
67 | - containerPort: 8443
68 | name: metrics
69 | protocol: TCP
70 | - containerPort: 9440
71 | name: healthz
72 | protocol: TCP
73 | securityContext:
74 | allowPrivilegeEscalation: false
75 | capabilities:
76 | drop:
77 | - "ALL"
78 | livenessProbe:
79 | failureThreshold: 3
80 | httpGet:
81 | path: /healthz
82 | port: healthz
83 | scheme: HTTP
84 | initialDelaySeconds: 15
85 | periodSeconds: 20
86 | readinessProbe:
87 | failureThreshold: 3
88 | httpGet:
89 | path: /readyz
90 | port: healthz
91 | scheme: HTTP
92 | initialDelaySeconds: 5
93 | periodSeconds: 10
94 | serviceAccountName: controller
95 | terminationGracePeriodSeconds: 10
96 |
--------------------------------------------------------------------------------
/config/prometheus/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - monitor.yaml
3 |
--------------------------------------------------------------------------------
/config/prometheus/monitor.yaml:
--------------------------------------------------------------------------------
1 |
2 | # Prometheus Monitor Service (Metrics)
3 | apiVersion: monitoring.coreos.com/v1
4 | kind: ServiceMonitor
5 | metadata:
6 | labels:
7 | control-plane: k8s-cleaner
8 | app.kubernetes.io/name: servicemonitor
9 | app.kubernetes.io/instance: controller-manager-metrics-monitor
10 | app.kubernetes.io/component: metrics
11 | app.kubernetes.io/created-by: k8s-cleaner
12 | app.kubernetes.io/part-of: k8s-cleaner
13 | app.kubernetes.io/managed-by: kustomize
14 | name: controller-metrics-monitor
15 | namespace: projectsveltos
16 | spec:
17 | endpoints:
18 | - path: /metrics
19 | port: https
20 | scheme: https
21 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
22 | tlsConfig:
23 | insecureSkipVerify: true
24 | selector:
25 | matchLabels:
26 | control-plane: k8s-cleaner
27 |
--------------------------------------------------------------------------------
/config/rbac/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | # All RBAC will be applied under this service account in
3 | # the deployment namespace. You may comment out this resource
4 | # if your manager will use a service account that exists at
5 | # runtime. Be sure to update RoleBinding and ClusterRoleBinding
6 | # subjects if changing service account names.
7 | - service_account.yaml
8 | - role.yaml
9 | - role_binding.yaml
10 |
11 |
--------------------------------------------------------------------------------
/config/rbac/pruner_editor_role.yaml:
--------------------------------------------------------------------------------
1 | # permissions for end users to edit cleaners.
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | labels:
6 | app.kubernetes.io/name: clusterrole
7 | app.kubernetes.io/instance: cleaner-editor-role
8 | app.kubernetes.io/component: rbac
9 | app.kubernetes.io/created-by: k8s-cleaner
10 | app.kubernetes.io/part-of: k8s-cleaner
11 | app.kubernetes.io/managed-by: kustomize
12 | name: cleaner-editor-role
13 | rules:
14 | - apiGroups:
15 | - apps.projectsveltos.io
16 | resources:
17 | - cleaners
18 | verbs:
19 | - create
20 | - delete
21 | - get
22 | - list
23 | - patch
24 | - update
25 | - watch
26 | - apiGroups:
27 | - apps.projectsveltos.io
28 | resources:
29 | - cleaners/status
30 | verbs:
31 | - get
32 |
--------------------------------------------------------------------------------
/config/rbac/pruner_viewer_role.yaml:
--------------------------------------------------------------------------------
1 | # permissions for end users to view cleaners.
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | labels:
6 | app.kubernetes.io/name: clusterrole
7 | app.kubernetes.io/instance: cleaner-viewer-role
8 | app.kubernetes.io/component: rbac
9 | app.kubernetes.io/created-by: k8s-cleaner
10 | app.kubernetes.io/part-of: k8s-cleaner
11 | app.kubernetes.io/managed-by: kustomize
12 | name: cleaner-viewer-role
13 | rules:
14 | - apiGroups:
15 | - apps.projectsveltos.io
16 | resources:
17 | - cleaners
18 | verbs:
19 | - get
20 | - list
21 | - watch
22 | - apiGroups:
23 | - apps.projectsveltos.io
24 | resources:
25 | - cleaners/status
26 | verbs:
27 | - get
28 |
--------------------------------------------------------------------------------
/config/rbac/report_editor_role.yaml:
--------------------------------------------------------------------------------
1 | # permissions for end users to edit reports.
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | labels:
6 | app.kubernetes.io/name: clusterrole
7 | app.kubernetes.io/instance: report-editor-role
8 | app.kubernetes.io/component: rbac
9 | app.kubernetes.io/created-by: k8s-cleaner
10 | app.kubernetes.io/part-of: k8s-cleaner
11 | app.kubernetes.io/managed-by: kustomize
12 | name: report-editor-role
13 | rules:
14 | - apiGroups:
15 | - apps.projectsveltos.io
16 | resources:
17 | - reports
18 | verbs:
19 | - create
20 | - delete
21 | - get
22 | - list
23 | - patch
24 | - update
25 | - watch
26 | - apiGroups:
27 | - apps.projectsveltos.io
28 | resources:
29 | - reports/status
30 | verbs:
31 | - get
32 |
--------------------------------------------------------------------------------
/config/rbac/report_viewer_role.yaml:
--------------------------------------------------------------------------------
1 | # permissions for end users to view reports.
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | labels:
6 | app.kubernetes.io/name: clusterrole
7 | app.kubernetes.io/instance: report-viewer-role
8 | app.kubernetes.io/component: rbac
9 | app.kubernetes.io/created-by: k8s-cleaner
10 | app.kubernetes.io/part-of: k8s-cleaner
11 | app.kubernetes.io/managed-by: kustomize
12 | name: report-viewer-role
13 | rules:
14 | - apiGroups:
15 | - apps.projectsveltos.io
16 | resources:
17 | - reports
18 | verbs:
19 | - get
20 | - list
21 | - watch
22 | - apiGroups:
23 | - apps.projectsveltos.io
24 | resources:
25 | - reports/status
26 | verbs:
27 | - get
28 |
--------------------------------------------------------------------------------
/config/rbac/role.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | name: controller-role
6 | rules:
7 | - apiGroups:
8 | - '*'
9 | resources:
10 | - '*'
11 | verbs:
12 | - '*'
13 | - apiGroups:
14 | - apps.projectsveltos.io
15 | resources:
16 | - cleaners
17 | verbs:
18 | - get
19 | - list
20 | - patch
21 | - watch
22 | - apiGroups:
23 | - apps.projectsveltos.io
24 | resources:
25 | - cleaners/finalizers
26 | verbs:
27 | - update
28 | - apiGroups:
29 | - apps.projectsveltos.io
30 | resources:
31 | - cleaners/status
32 | verbs:
33 | - get
34 | - patch
35 | - update
36 | - apiGroups:
37 | - authentication.k8s.io
38 | resources:
39 | - tokenreviews
40 | verbs:
41 | - create
42 | - apiGroups:
43 | - authorization.k8s.io
44 | resources:
45 | - subjectaccessreviews
46 | verbs:
47 | - create
48 |
--------------------------------------------------------------------------------
/config/rbac/role_binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: clusterrolebinding
6 | app.kubernetes.io/instance: manager-rolebinding
7 | app.kubernetes.io/component: rbac
8 | app.kubernetes.io/created-by: k8s-cleaner
9 | app.kubernetes.io/part-of: k8s-cleaner
10 | app.kubernetes.io/managed-by: kustomize
11 | name: controller-rolebinding
12 | roleRef:
13 | apiGroup: rbac.authorization.k8s.io
14 | kind: ClusterRole
15 | name: controller-role
16 | subjects:
17 | - kind: ServiceAccount
18 | name: controller
19 | namespace: projectsveltos
20 |
--------------------------------------------------------------------------------
/config/rbac/service_account.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: serviceaccount
6 | app.kubernetes.io/instance: controller-manager-sa
7 | app.kubernetes.io/component: rbac
8 | app.kubernetes.io/created-by: k8s-cleaner
9 | app.kubernetes.io/part-of: k8s-cleaner
10 | app.kubernetes.io/managed-by: kustomize
11 | name: controller
12 | namespace: projectsveltos
13 |
--------------------------------------------------------------------------------
/config/samples/apps_v1alpha1_pruner.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps.projectsveltos.io/v1alpha1
2 | kind: Cleaner
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: cleaner
6 | app.kubernetes.io/instance: cleaner-sample
7 | app.kubernetes.io/part-of: k8s-cleaner
8 | app.kubernetes.io/managed-by: kustomize
9 | app.kubernetes.io/created-by: k8s-cleaner
10 | name: cleaner-sample
11 | spec:
12 | # TODO(user): Add fields here
13 |
--------------------------------------------------------------------------------
/config/samples/apps_v1alpha1_report.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps.projectsveltos.io/v1alpha1
2 | kind: Report
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: report
6 | app.kubernetes.io/instance: report-sample
7 | app.kubernetes.io/part-of: k8s-cleaner
8 | app.kubernetes.io/managed-by: kustomize
9 | app.kubernetes.io/created-by: k8s-cleaner
10 | name: report-sample
11 | spec:
12 | # TODO(user): Add fields here
13 |
--------------------------------------------------------------------------------
/config/samples/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ## Append samples of your project ##
2 | resources:
3 | - apps_v1alpha1_cleaner.yaml
4 | - apps_v1alpha1_report.yaml
5 | #+kubebuilder:scaffold:manifestskustomizesamples
6 |
--------------------------------------------------------------------------------
/docs/assets/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gianlucam76/k8s-cleaner/20805d30d24ad19fae97b933a41e46579f7a54c5/docs/assets/logo.png
--------------------------------------------------------------------------------
/docs/assets/sveltos-multi-cluster.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gianlucam76/k8s-cleaner/20805d30d24ad19fae97b933a41e46579f7a54c5/docs/assets/sveltos-multi-cluster.gif
--------------------------------------------------------------------------------
/docs/assets/sveltos_and_cleaner.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gianlucam76/k8s-cleaner/20805d30d24ad19fae97b933a41e46579f7a54c5/docs/assets/sveltos_and_cleaner.gif
--------------------------------------------------------------------------------
/docs/assets/sveltos_roomba.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gianlucam76/k8s-cleaner/20805d30d24ad19fae97b933a41e46579f7a54c5/docs/assets/sveltos_roomba.gif
--------------------------------------------------------------------------------
/docs/getting_started/examples/unhealthy_resources/more_examples.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: k8s-cleaner - Kubernetes Controller that identifies, removes, or updates stale/orphaned or unhealthy resources
3 | description: Cleanup unused more examples
4 | tags:
5 | - Kubernetes
6 | - Controller
7 | - Kubernetes Resources
8 | - Identify
9 | - Update
10 | - Remove
11 | authors:
12 | - Eleni Grosdouli
13 | ---
14 |
15 | ## More Examples
16 |
17 | The k8s-cleaner has many more examples to explore [here](https://github.com/gianlucam76/k8s-cleaner/tree/main/examples-unhealthy-resources).
18 |
19 | ## Contribute to k8s-cleaner Examples
20 |
21 | We encourage all users to contribute to the example directory by adding their own Cleaner configurations💡. This will help the community benefit from the expertise and build a stronger knowledge base of Cleaner use cases.
22 |
23 | To add an example, create a new file in the example directory with a descriptive name and add it in the Cleaner configuration. Once the example is added, feel free to submit a Pull Request (PR) to share it with the community.
24 |
--------------------------------------------------------------------------------
/docs/getting_started/examples/unused_resources/more_examples.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: k8s-cleaner - Kubernetes Controller that identifies, removes, or updates stale/orphaned or unhealthy resources
3 | description: Cleanup unused more examples
4 | tags:
5 | - Kubernetes
6 | - Controller
7 | - Kubernetes Resources
8 | - Identify
9 | - Update
10 | - Remove
11 | authors:
12 | - Eleni Grosdouli
13 | ---
14 |
15 | ## More Examples
16 |
17 | The k8s-cleaner has many more examples to explore [here](https://github.com/gianlucam76/k8s-cleaner/tree/main/examples-unused-resources).
18 |
19 | ## Contribute to k8s-cleaner Examples
20 |
21 | We encourage all users to contribute to the example directory by adding their own Cleaner configurations💡. This will help the community benefit from the expertise and build a stronger knowledge base of Cleaner use cases.
22 |
23 | To add an example, create a new file in the example directory with a descriptive name and add it in the Cleaner configuration. Once the example is added, feel free to submit a Pull Request (PR) to share it with the community.
24 |
--------------------------------------------------------------------------------
/docs/getting_started/examples/unused_resources/persistent_volume.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: k8s-cleaner - Kubernetes Controller that identifies, removes, or updates stale/orphaned or unhealthy resources
3 | description: Cleanup unused PersistentVolumes
4 | tags:
5 | - Kubernetes
6 | - Controller
7 | - Kubernetes Resources
8 | - Identify
9 | - Update
10 | - Remove
11 | authors:
12 | - Eleni Grosdouli
13 | ---
14 |
15 | ## Introduction
16 |
17 | The k8s-cleaner is able to delete unsused `PersistentVolumes`. The below Cleaner instance will find any `PersistentVolume` resources with the Phase set to anything **but** `Bound`.
18 |
19 | ## Example - Cleaner Instance
20 |
21 | !!! example ""
22 |
23 | ```yaml
24 | ---
25 | apiVersion: apps.projectsveltos.io/v1alpha1
26 | kind: Cleaner
27 | metadata:
28 | name: unbound-persistent-volumes
29 | spec:
30 | schedule: "* 0 * * *"
31 | resourcePolicySet:
32 | resourceSelectors:
33 | - kind: PersistentVolume
34 | group: ""
35 | version: v1
36 | evaluate: |
37 | function evaluate()
38 | hs = {}
39 | hs.matching = false
40 | if obj.status ~= nil and obj.status.phase ~= "Bound" then
41 | hs.matching = true
42 | end
43 | return hs
44 | end
45 | action: Delete
46 | ```
47 |
--------------------------------------------------------------------------------
/docs/getting_started/examples/unused_resources/persistent_volume_claims.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: k8s-cleaner - Kubernetes Controller that identifies, removes, or updates stale/orphaned or unhealthy resources
3 | description: Cleanup unused PersistentVolumeClaims
4 | tags:
5 | - Kubernetes
6 | - Controller
7 | - Kubernetes Resources
8 | - Identify
9 | - Update
10 | - Remove
11 | authors:
12 | - Eleni Grosdouli
13 | ---
14 |
15 | ## Introduction
16 |
17 | The k8s-cleaner is able to delete unsused `PersistentVolumeClaim`. The below Cleaner instance will find any `PersistentVolumeClaim` resources which are currently unused by any **Pods**. The example below will consider **all** namespaces.
18 |
19 | ## Example - Cleaner Instance
20 |
21 | !!! example ""
22 |
23 | ```yaml
24 | ---
25 | apiVersion: apps.projectsveltos.io/v1alpha1
26 | kind: Cleaner
27 | metadata:
28 | name: stale-persistent-volume-claim
29 | spec:
30 | schedule: "* 0 * * *"
31 | action: Delete # Delete matching resources
32 | resourcePolicySet:
33 | resourceSelectors:
34 | - kind: Pod
35 | group: ""
36 | version: v1
37 | - kind: PersistentVolumeClaim
38 | group: ""
39 | version: v1
40 | aggregatedSelection: |
41 | function isUsed(pvc, pods)
42 | if pods == nil then
43 | return false
44 | end
45 | for _, pod in ipairs(pods) do
46 | if pod.spec.volumes ~= nil then
47 | for _,volume in ipairs(pod.spec.volumes) do
48 | if volume.persistentVolumeClaim ~= nil and volume.persistentVolumeClaim.claimName == pvc.metadata.name then
49 | return true
50 | end
51 | end
52 | end
53 | end
54 | return false
55 | end
56 |
57 | function evaluate()
58 | local hs = {}
59 | hs.message = ""
60 |
61 | local pods = {}
62 | local pvcs = {}
63 | local unusedPVCs = {}
64 |
65 | -- Separate pods and pvcs from the resources
66 | -- Group those by namespace
67 | for _, resource in ipairs(resources) do
68 | local kind = resource.kind
69 | if kind == "Pod" then
70 | if not pods[resource.metadata.namespace] then
71 | pods[resource.metadata.namespace] = {}
72 | end
73 | table.insert(pods[resource.metadata.namespace], resource)
74 | elseif kind == "PersistentVolumeClaim" then
75 | if not pvcs[resource.metadata.namespace] then
76 | pvcs[resource.metadata.namespace] = {}
77 | end
78 | table.insert(pvcs[resource.metadata.namespace], resource)
79 | end
80 | end
81 |
82 | -- Iterate through each namespace and identify unused PVCs
83 | for namespace, perNamespacePVCs in pairs(pvcs) do
84 | for _, pvc in ipairs(perNamespacePVCs) do
85 | if not isUsed(pvc, pods[namespace]) then
86 | table.insert(unusedPVCs, {resource = pvc})
87 | end
88 | end
89 | end
90 |
91 | if #unusedPVCs > 0 then
92 | hs.resources = unusedPVCs
93 | end
94 | return hs
95 | end
96 | ```
97 |
--------------------------------------------------------------------------------
/docs/getting_started/features/dryrun/dryrun.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: k8s-cleaner - Kubernetes Controller that identifies, removes, or updates stale/orphaned or unhealthy resources
3 | description: Dry Run
4 | tags:
5 | - Kubernetes
6 | - Controller
7 | - Kubernetes Resources
8 | - Identify
9 | - Update
10 | - Remove
11 | authors:
12 | - Eleni Grosdouli
13 | ---
14 |
15 | ## Introduction to Dry Run
16 |
17 | To preview which resources match the Cleaner's criteria, set the **Action** field to **Scan**. The k8s-cleaner will still execute its logic but will **not** delete and/or update any resources.
18 |
19 | To identify matching resources, you can then either ask k8s-cleaner to generate a [Report](../../../reports/k8s-cleaner_reports.md) or search the controller logs for the message `resource is a match for cleaner`.
20 |
21 |
22 | ## Example - Dry Run
23 |
24 | The example below, provides a definition of eliminating **Deployments** in the `test` namespace with both the `serving=api` and the `environment!=production` labels set.
25 | !!! example ""
26 |
27 | ```yaml
28 | ---
29 | apiVersion: apps.projectsveltos.io/v1alpha1
30 | kind: Cleaner
31 | metadata:
32 | name: cleaner-sample1
33 | spec:
34 | schedule: "* 0 * * *" # Runs every day at midnight
35 | resourcePolicySet:
36 | resourceSelectors:
37 | - namespace: test
38 | kind: Deployment
39 | group: "apps"
40 | version: v1
41 | labelFilters:
42 | - key: serving
43 | operation: Equal
44 | value: api # Match deployments with the "serving" label set to "api"
45 | - key: environment
46 | operation: Different
47 | value: production # Match deployments with the "environment" label different from "production"
48 | action: Scan
49 | ```
50 |
51 | By setting the **Action** field to **Scan**, we can safely test the Cleaner's filtering logic without affecting your actual deployment configurations. Once we are confident in the filtering criteria, you can set the **Action** to **delete** or **modify**.
52 |
--------------------------------------------------------------------------------
/docs/getting_started/features/label_filters/label_filters.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: k8s-cleaner - Kubernetes Controller that identifies, removes, or updates stale/orphaned or unhealthy resources
3 | description: Label Filters
4 | tags:
5 | - Kubernetes
6 | - Controller
7 | - Kubernetes Resources
8 | - Identify
9 | - Update
10 | - Remove
11 | authors:
12 | - Eleni Grosdouli
13 | ---
14 |
15 | ## Introduction to Label Filters
16 |
17 | The k8s-cleaner has the ability to select resources based on a label. This capability allows precise resource management.
18 |
19 |
20 | ## Example - Label Filters
21 |
22 | The example below, provides a definition of eliminating **Deployments** in the `test` namespace with both the `serving=api` and the `environment!=production` labels set.
23 | !!! example ""
24 |
25 | ```yaml
26 | ---
27 | apiVersion: apps.projectsveltos.io/v1alpha1
28 | kind: Cleaner
29 | metadata:
30 | name: cleaner-sample1
31 | spec:
32 | schedule: "* 0 * * *" # Executes every day at midnight
33 | resourcePolicySet:
34 | resourceSelectors:
35 | - namespace: test
36 | kind: Deployment
37 | group: "apps"
38 | version: v1
39 | labelFilters:
40 | - key: serving
41 | operation: Equal
42 | value: api # Identifies Deployments with "serving" label set to "api"
43 | - key: environment
44 | operation: Different
45 | value: production # Identifies Deployments with "environment" label different from "production"
46 | action: Delete # Deletes matching Deployments
47 | ```
48 |
49 | By utilising the label filters capability, we can refine the scope of resource management, ensuring that only specific resources are targeted for removal and/or update.
50 |
51 | This approach helps maintain a **clean** and **organised** Kubernetes environment without affecting unintended resources.
--------------------------------------------------------------------------------
/docs/getting_started/features/resourceselector/resourceselector.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: k8s-cleaner - Kubernetes Controller that identifies, removes, or updates stale/orphaned or unhealthy resources
3 | description: resourceSelector
4 | tags:
5 | - Kubernetes
6 | - Controller
7 | - Kubernetes Resources
8 | - Identify
9 | - Update
10 | - Remove
11 | authors:
12 | - Eleni Grosdouli
13 | ---
14 |
15 | ## Introduction to resourceSelector
16 |
17 | It might be cases that operator need to examine resources of distinct types simultaneously.
18 |
19 | Let's assume we would like to eliminate **all** Deployment instances that are not backed-up by an Autoscaler instance. The k8s-cleaner allows this action. By employing the `resourceSelector`, we can select **all** `Deployment` and `Autoscaler` instances.
20 |
21 | As a next step, we have to define the `aggregatedSelection`. `AggregatedSelection` will be given all instances collected by the Cleaner using the `resourceSelector`. In this example, all Deployment and Autoscaler instances in the **foo** namespace.
22 |
23 | ## Example - Deployment not Backed-up by Autoscaler
24 |
25 | ```yaml
26 | ---
27 | # Find all Deployments not backed up by an Autoscaler. Those are a match.
28 | apiVersion: apps.projectsveltos.io/v1alpha1
29 | kind: Cleaner
30 | metadata:
31 | name: cleaner-sample3
32 | spec:
33 | schedule: "* 0 * * *"
34 | action: Delete # Delete matching resources
35 | resourcePolicySet:
36 | resourceSelectors:
37 | - namespace: foo
38 | kind: Deployment
39 | group: "apps"
40 | version: v1
41 | - namespace: foo
42 | kind: HorizontalPodAutoscaler
43 | group: "autoscaling"
44 | version: v2
45 | aggregatedSelection: |
46 | function evaluate()
47 | local hs = {}
48 | hs.valid = true
49 | hs.message = ""
50 |
51 | local deployments = {}
52 | local autoscalers = {}
53 | local deploymentWithNoAutoscaler = {}
54 |
55 | -- Separate deployments and services from the resources
56 | for _, resource in ipairs(resources) do
57 | local kind = resource.kind
58 | if kind == "Deployment" then
59 | table.insert(deployments, resource)
60 | elseif kind == "HorizontalPodAutoscaler" then
61 | table.insert(autoscalers, resource)
62 | end
63 | end
64 |
65 | -- Check for each deployment if there is a matching HorizontalPodAutoscaler
66 | for _, deployment in ipairs(deployments) do
67 | local deploymentName = deployment.metadata.name
68 | local matchingAutoscaler = false
69 |
70 | for _, autoscaler in ipairs(autoscalers) do
71 | if autoscaler.spec.scaleTargetRef.name == deployment.metadata.name then
72 | matchingAutoscaler = true
73 | break
74 | end
75 | end
76 |
77 | if not matchingAutoscaler then
78 | table.insert(deploymentWithNoAutoscaler, {resource = deployment})
79 | break
80 | end
81 | end
82 |
83 | if #deploymentWithNoAutoscaler > 0 then
84 | hs.resources = deploymentWithNoAutoscaler
85 | end
86 | return hs
87 | end
88 | ```
--------------------------------------------------------------------------------
/docs/getting_started/features/store_resources/store_resource_yaml.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: k8s-cleaner - Kubernetes Controller that identifies, removes, or updates stale/orphaned or unhealthy resources
3 | description: Store Resource Yaml
4 | tags:
5 | - Kubernetes
6 | - Controller
7 | - Kubernetes Resources
8 | - Identify
9 | - Update
10 | - Remove
11 | authors:
12 | - Eleni Grosdouli
13 | ---
14 |
15 | ## Store Resource Yaml
16 |
17 | This is a small section describing how to store resources before the k8s-cleaner deletes or modifies them. The k8s-cleaner has an optional field called `StoreResourcePath`.
18 |
19 | When this option is set, the k8s-cleaner will dump all the maching resources before any modification (update and/or deletion) is performed.
20 |
21 | The maching resource will be stored in the below directory.
22 |
23 | ```bash
24 | /<__StoreResourcePath__ value>////.yaml
25 | ```
26 | ## Example - Unsused ConfigMap
27 |
28 | ### Step 1 - Create PersistentVolumeClaim
29 | !!! example "PersistentVolumeClaim"
30 |
31 | ```yaml
32 | apiVersion: v1
33 | kind: PersistentVolumeClaim
34 | metadata:
35 | name: cleaner-pvc
36 | namespace: projectsveltos
37 | labels:
38 | app: k8s-cleaner
39 | spec:
40 | storageClassName: standard
41 | accessModes:
42 | - ReadWriteOnce
43 | resources:
44 | requests:
45 | storage: 2Gi
46 | ```
47 |
48 | The above YAML definition will create a `PersistentVolumeClaim` of 2Gi. In case more storage is required, simply update the YAML definition.
49 |
50 | ```bash
51 | $ kubectl apply -f "pvc.yaml"
52 | ```
53 |
54 | ### Step 2 - Update k8s-cleaner-controller Deployment
55 |
56 | The next is to update the `k8s-cleaner-controller` deployment located in the `projectsveltos` namespace. Then, we will define the `PersistentVolumeClaim` and the actual storage location.
57 |
58 | ```bash
59 | $ kubectl get deploy -n projectsveltos
60 | NAME READY UP-TO-DATE AVAILABLE AGE
61 | k8s-cleaner-controller 1/1 1 1 10m
62 | $ kubectl edit deploy k8s-cleaner-controller -n projectsveltos
63 | ```
64 |
65 | !!! example "k8s-cleaner-controller"
66 |
67 | ```yaml
68 | volumes:
69 | - name: volume
70 | persistentVolumeClaim:
71 | claimName: cleaner-pvc
72 |
73 | volumeMounts:
74 | - mountPath: /pvc/
75 | name: volume
76 | ```
77 |
78 | The YAML defition files will be stored in `/pvc/`.
79 |
80 | ### Step 3 - Cleaner Resource Creation
81 |
82 | In step 3, we will create a Cleaner Resource and define the deletion of any unused `configMap` resources based on a cron job. To store the resources before performing any deletions, we will add the argument ` storeResourcePath: "/pvc/"` and store the resources inside the `/pvc/` directory.
83 |
84 | For instance get this [Cleaner instance](https://raw.githubusercontent.com/gianlucam76/k8s-cleaner/refs/heads/main/examples-unused-resources/configmaps/orphaned_configmaps.yaml) that finds unused ConfigMaps. Set __spec.storeResourcePath: "/pvc/"__ (eventually change __spec.action: Scan__)
85 |
86 | When cleaner find the ununsed `ConfigMap`, it will first store the resource definition and then delete the actual resource.
87 |
88 | ### Validation
89 |
90 | ```bash
91 | docker exec -i cleaner-management-worker ls /var/local-path-provisioner/pvc-8314c600-dc54-4e23-a796-06b73080f589_projectsveltos_cleaner-pvc
92 | unused-configmaps
93 |
94 | /var/local-path-provisioner/pvc-8314c600-dc54-4e23-a796-06b73080f589_projectsveltos_cleaner-pvc/unused-configmaps/test/ConfigMap:
95 | kube-root-ca.crt.yaml
96 | my-configmap.yaml
97 | ```
--------------------------------------------------------------------------------
/docs/getting_started/features/update_resources/update_resources.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: k8s-cleaner - Kubernetes Controller that identifies, removes, or updates stale/orphaned or unhealthy resources
3 | description: Resource Update
4 | tags:
5 | - Kubernetes
6 | - Controller
7 | - Kubernetes Resources
8 | - Identify
9 | - Update
10 | - Remove
11 | authors:
12 | - Eleni Grosdouli
13 | ---
14 |
15 | ## Introduction to Resource Update
16 |
17 | Beyond removing **stale** resources, the k8s-cleaner can also facilitate in the dynamic update of existing resource configurations.
18 |
19 | The capability allows users to modify resource specifications based on **specific criteria**, ensuring alignment with evolving requirements and maintaining resource consistency.
20 |
21 |
22 | ## Example - Resource Update
23 |
24 | Consider the scenario where we want to update **Service** objects in the `foo` namespace to use **version2** apps.
25 |
26 | 1. The **evaluate** function allows users to select resources, Services in the `foo` namespace pointing to **version1** apps.
27 | 2. The **trasnform** function will change the matching resources, by updating the `obj.spec.selector["app"]` to **version2**.
28 |
29 | !!! example ""
30 |
31 | ```yaml
32 | ---
33 | apiVersion: apps.projectsveltos.io/v1alpha1
34 | kind: Cleaner
35 | metadata:
36 | name: cleaner-sample3
37 | spec:
38 | schedule: "* 0 * * *"
39 | resourcePolicySet:
40 | resourceSelectors:
41 | - namespace: foo
42 | kind: Service
43 | group: ""
44 | version: v1
45 | evaluate: |
46 | -- Define how resources will be selected
47 | function evaluate()
48 | hs = {}
49 | hs.matching = false
50 | if obj.spec.selector ~= nil then
51 | if obj.spec.selector["app"] == "version1" then
52 | hs.matching = true
53 | end
54 | end
55 | return hs
56 | end
57 | action: Transform # Update matching resources
58 | transform: |
59 | -- Define how resources will be updated
60 | function transform()
61 | hs = {}
62 | obj.spec.selector["app"] = "version2"
63 | hs.resource = obj
64 | return hs
65 | end
66 | ```
--------------------------------------------------------------------------------
/docs/getting_started/install/telemetry.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: k8s-cleaner - Kubernetes Controller that identifies, removes, or updates stale/orphaned or unhealthy resources
3 | description: Welcome to the k8s-cleaner installation page
4 | tags:
5 | - Kubernetes
6 | - Controller
7 | - Kubernetes Resources
8 | - Identify
9 | - Update
10 | - Remove
11 | authors:
12 | - Gianluca Mardente
13 | ---
14 |
15 | ## Telemetry
16 |
17 | As an open-source project, k8s-cleaner relies on user insights to guide its development. Telemetry data helps us:
18 |
19 | - **Prioritize Features**: Identify the most commonly used features and focus on enhancing them.
20 | - **Improve Performance**: Analyze usage patterns to optimize Sveltos' performance and resource utilization.
21 | - **Make Informed Decisions**: Use data-driven insights to shape the future of Sveltos.
22 |
23 | By choosing to participate in telemetry, users contribute to the ongoing improvement of Sveltos and help ensure it meets the needs of the community.
24 |
25 | ## What Data Do We Collect?
26 |
27 | We collect minimal, anonymized data about Sveltos usage:
28 |
29 | - **Version Information**: To track the distribution of different k8s-cleaner versions.
30 | - **Cluster Management Data**: To understand the scale and complexity of Sveltos deployments. This includes:
31 | 1. Number of Cleaner instances
32 | 1. Number of nodes
33 |
34 | ## How We Protect Your Privacy
35 |
36 | - **Anonymized Data**: All data is collected and processed anonymously, without any personally identifiable information.
37 | - **Secure Storage**: Telemetry data is stored securely and access is strictly controlled by the Sveltos maintainers.
38 |
39 | ## Opting-Out of Telemetry
40 |
41 | To **opt-out** from the telemetry data set the `--disable-telemetry=true` flag in the k8s-cleaner deployment.
42 |
43 | ### Requesting Data Erasure
44 |
45 | You have the right to request the erasure of your data under certain circumstances. To initiate a data erasure request, please contact us at [support@projectsveltos.io](support@projectsveltos.io).
46 |
--------------------------------------------------------------------------------
/docs/javascripts/extra.js:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 |
--------------------------------------------------------------------------------
/docs/reports/k8s-cleaner_reports.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: k8s-cleaner - Kubernetes Controller that identifies, removes, or updates stale/orphaned or unhealthy resources
3 | description: Welcome to the k8s-cleaner reports page
4 | tags:
5 | - Kubernetes
6 | - Controller
7 | - Kubernetes Resources
8 | - Identify
9 | - Update
10 | - Remove
11 | authors:
12 | - Eleni Grosdouli
13 | ---
14 |
15 | ## Introduction to k8s-cleaner Reports
16 |
17 | Users have the ability to instruct the k8s-cleaner to generate a report with all the resources deleted or modified.
18 |
19 | The k8s-cleaner will create a Report instance based on the name of the Report name.
20 |
21 | ## Example - Report Defintion
22 |
23 | !!! example "Cleaner Definition with Notifications set to type CleanerReport"
24 |
25 | ```yaml
26 | ---
27 | apiVersion: apps.projectsveltos.io/v1alpha1
28 | kind: Cleaner
29 | metadata:
30 | name: cleaner-with-report
31 | spec:
32 | schedule: "0 * * * *"
33 | action: Delete # Delete matching resources
34 | resourcePolicySet:
35 | resourceSelectors:
36 | - namespace: test
37 | kind: Deployment
38 | group: "apps"
39 | version: v1
40 | notifications:
41 | - name: report
42 | type: CleanerReport
43 | ```
44 |
45 | ### Validation
46 |
47 | ```bash
48 | $ kubectl get report
49 | NAME AGE
50 | cleaner-sample3 51m
51 | ```
52 |
53 | ### Report Output
54 |
55 | ```yaml
56 | apiVersion: apps.projectsveltos.io/v1alpha1
57 | kind: Report
58 | metadata:
59 | creationTimestamp: "2023-12-17T17:05:00Z"
60 | generation: 2
61 | name: cleaner-with-report
62 | resourceVersion: "1625"
63 | uid: dda9a231-9a51-4133-aeb5-f0520feb8746
64 | spec:
65 | action: Delete
66 | message: 'time: 2023-12-17 17:07:00.394736089 +0000 UTC m=+129.172023518'
67 | resources:
68 | - apiVersion: apps/v1
69 | kind: Deployment
70 | name: my-nginx-deployment
71 | namespace: test
72 | ```
--------------------------------------------------------------------------------
/docs/resources/blogs.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: k8s-cleaner - Kubernetes Controller that identifies, removes, or updates stale/orphaned or unhealthy resources
3 | description: Welcome to the blogs page
4 | tags:
5 | - Kubernetes
6 | - Controller
7 | - Kubernetes Resources
8 | - Identify
9 | - Update
10 | - Remove
11 | authors:
12 | - Eleni Grosdouli
13 | ---
14 |
15 | Star
16 |
17 | ## k8s-cleaner Available Articles
18 |
19 | 1. [Automated Kubernetes Cluster Cleanup at Scale](https://itnext.io/automated-kubernetes-cluster-cleanup-at-scale-b8bb6dd48e7e)
20 |
21 |
--------------------------------------------------------------------------------
/docs/stylesheets/extra.css:
--------------------------------------------------------------------------------
1 | @keyframes heart {
2 | 0%, 40%, 80%, 100% {
3 | transform: scale(1);
4 | }
5 | 20%, 60% {
6 | transform: scale(1.15);
7 | }
8 | }
9 | .heart {
10 | animation: heart 1000ms infinite;
11 | }
12 |
13 | /* Update the logo size on the banner */
14 | .md-logo img {
15 | height: 50px !important;
16 | width: auto !important;
17 | }
--------------------------------------------------------------------------------
/examples-automated-operations/scheduled-scaling/pause.yaml:
--------------------------------------------------------------------------------
1 | # This cleaner:
2 | # - runs at 8PM every day
3 | # - finds all Deployments/StatefulSet/DaemonSet with
4 | # annotation "pause-resume"
5 | #
6 | # For any such resource:
7 | # - store current replicas in the annotation "previous-replicas"
8 | # - set their replicas to zero (scale down and pause)
9 | #
10 | apiVersion: apps.projectsveltos.io/v1alpha1
11 | kind: Cleaner
12 | metadata:
13 | name: scale-down-deployment-statefulset-daemonset
14 | spec:
15 | schedule: "* 20 * * *"
16 | action: Transform
17 | transform: |
18 | -- Set replicas to 0
19 | function transform()
20 | hs = {}
21 |
22 | if obj.metadata.annotations == nil then
23 | obj.metadata.annotations = {}
24 | end
25 | -- store in the annotation current replicas value
26 | obj.metadata.annotations["previous-replicas"] = tostring(obj.spec.replicas)
27 |
28 | -- reset replicas to 0
29 | obj.spec.replicas = 0
30 |
31 | hs.resource = obj
32 | return hs
33 | end
34 | resourcePolicySet:
35 | resourceSelectors:
36 | - kind: Deployment
37 | group: apps
38 | version: v1
39 | - kind: StatefulSet
40 | group: "apps"
41 | version: v1
42 | - kind: DaemonSet
43 | group: "apps"
44 | version: v1
45 | aggregatedSelection: |
46 | function evaluate()
47 | local hs = {}
48 |
49 | -- returns true if object has annotaiton "pause-resume"
50 | function hasPauseAnnotation(obj)
51 | if obj.metadata.annotations ~= nil then
52 | if obj.metadata.annotations["pause-resume"] then
53 | return true
54 | end
55 |
56 | return false
57 | end
58 |
59 | return
60 | end
61 |
62 | local resourceToPause = {}
63 |
64 | for _, resource in ipairs(resources) do
65 | if hasPauseAnnotation(resource) then
66 | table.insert(resourceToPause, {resource = resource})
67 | end
68 | end
69 |
70 | if #resourceToPause > 0 then
71 | hs.resources = resourceToPause
72 | end
73 | return hs
74 | end
--------------------------------------------------------------------------------
/examples-automated-operations/scheduled-scaling/resume.yaml:
--------------------------------------------------------------------------------
1 | # This cleaner:
2 | # - runs at 8AM every day
3 | # - finds all Deployments/StatefulSet/DaemonSet with
4 | # annotation "pause-resume"
5 | #
6 | # For any such resource:
7 | # - get old replicas in the annotation "previous-replicas"
8 | # - set their replicas to such value (scale deployment/statefulset/daemonset up)
9 | #
10 | apiVersion: apps.projectsveltos.io/v1alpha1
11 | kind: Cleaner
12 | metadata:
13 | name: scale-up-deployment-statefulset-daemonset
14 | spec:
15 | schedule: "* 8 * * *"
16 | action: Transform
17 | transform: |
18 | -- Set replicas to 0
19 | function transform()
20 | hs = {}
21 | if obj.metadata.annotations == nil then
22 | return
23 | end
24 | if not obj.metadata.annotations["previous-replicas"] then
25 | return
26 | end
27 | -- reset replicas
28 | obj.spec.replicas = tonumber(obj.metadata.annotations["previous-replicas"])
29 | hs.resource = obj
30 | return hs
31 | end
32 | resourcePolicySet:
33 | resourceSelectors:
34 | - kind: Deployment
35 | group: apps
36 | version: v1
37 | - kind: StatefulSet
38 | group: "apps"
39 | version: v1
40 | - kind: DaemonSet
41 | group: "apps"
42 | version: v1
43 | aggregatedSelection: |
44 | function evaluate()
45 | local hs = {}
46 |
47 | -- returns true if object has annotaiton "pause-resume"
48 | function hasPauseAnnotation(obj)
49 | if obj.metadata.annotations ~= nil then
50 | if obj.metadata.annotations["pause-resume"] then
51 | return true
52 | end
53 |
54 | return false
55 | end
56 |
57 | return false
58 | end
59 |
60 | local resourceToUnPause = {}
61 |
62 | for _, resource in ipairs(resources) do
63 | if hasPauseAnnotation(resource) then
64 | table.insert(resourceToUnPause, {resource = resource})
65 | end
66 | end
67 |
68 | if #resourceToUnPause > 0 then
69 | hs.resources = resourceToUnPause
70 | end
71 | return hs
72 | end
--------------------------------------------------------------------------------
/examples-unhealthy-resources/pod-with-evicted-state/pod-with-evicted-state.yml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance is designed to find all evicted Pod instances in all namespaces.
2 | # It evaluates pods that have failed with the reason "Evicted" and marks them for deletion.
3 | # The cleaner runs on a scheduled interval every 5 minutes, ensuring that evicted pods
4 | # are promptly removed, freeing up cluster resources and maintaining a clean environment.
5 | # By automatically deleting evicted pods, this Cleaner helps improve resource management
6 | # and cluster performance.
7 | apiVersion: apps.projectsveltos.io/v1alpha1
8 | kind: Cleaner
9 | metadata:
10 | name: evicted-pods
11 | spec:
12 | schedule: "*/5 * * * *"
13 | resourcePolicySet:
14 | resourceSelectors:
15 | - kind: Pod
16 | group: ""
17 | version: v1
18 | excludeDeleted: false
19 | evaluate: |
20 | function evaluate()
21 | hs = {}
22 | hs.matching = false
23 |
24 | -- Check if the pod's status is Failed and the reason is Evicted
25 | if obj.status.phase == "Failed" and obj.status.reason == "Evicted" then
26 | -- If the pod is evicted, mark it for cleaning
27 | hs.matching = true
28 | end
29 |
30 | return hs
31 | end
32 | action: Delete
33 |
--------------------------------------------------------------------------------
/examples-unhealthy-resources/pod-with-terminating-state/pod-with-terminating-state.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps.projectsveltos.io/v1alpha1
2 | kind: Cleaner
3 | metadata:
4 | name: terminating-pods
5 | spec:
6 | schedule: "*/5 * * * *"
7 | resourcePolicySet:
8 | resourceSelectors:
9 | - kind: Pod
10 | group: ""
11 | version: v1
12 | excludeDeleted: false
13 | evaluate: |
14 | function evaluate()
15 | hs = {}
16 | hs.matching = false
17 |
18 | -- Check if the pod has a deletionTimestamp field (i.e., pod is terminating)
19 | if obj.metadata.deletionTimestamp ~= nil then
20 | -- If deletionTimestamp has a value, the pod is terminating
21 | hs.matching = true
22 | end
23 |
24 | return hs
25 | end
26 | action: Delete
27 | deleteOptions:
28 | gracePeriodSeconds: 0
29 | propagationPolicy: Background
30 |
--------------------------------------------------------------------------------
/examples-unused-resources/clusterroles/unused_clusterroles.yaml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance finds all unused ClusterRole instances.
2 | # An unused ClusterRole is an instance that is not referenced
3 | # by any ClusterRoleBinding or RoleBinding
4 | apiVersion: apps.projectsveltos.io/v1alpha1
5 | kind: Cleaner
6 | metadata:
7 | name: unused-roles
8 | spec:
9 | schedule: "* 0 * * *"
10 | action: Delete
11 | resourcePolicySet:
12 | resourceSelectors:
13 | - kind: ClusterRole
14 | group: "rbac.authorization.k8s.io"
15 | version: v1
16 | - kind: ClusterRoleBinding
17 | group: "rbac.authorization.k8s.io"
18 | version: v1
19 | - kind: RoleBinding
20 | group: "rbac.authorization.k8s.io"
21 | version: v1
22 | aggregatedSelection: |
23 | function evaluate()
24 | local hs = {}
25 | hs.message = ""
26 |
27 | -- Contains list of existing ClusterRoles
28 | local existingClusterRoles = {}
29 | -- Contains list of ClusterRoles currently referenced by
30 | -- roleBindings or ClusterRoleBindings
31 | local usedClusterRoles = {}
32 |
33 | local unusedClusterRoles = {}
34 |
35 | -- Create list of existingClusterRoles and usedClusterRoles
36 | for _, resource in ipairs(resources) do
37 | local kind = resource.kind
38 | if kind == "ClusterRole" then
39 | table.insert(existingClusterRoles, resource)
40 | elseif kind == "ClusterRoleBinding" then
41 | if resource.roleRef.kind == "ClusterRole" then
42 | usedClusterRoles[resource.roleRef.name] = true
43 | end
44 | elseif kind == "RoleBinding" then
45 | if resource.roleRef.kind == "ClusterRole" then
46 | usedClusterRoles[resource.roleRef.name] = true
47 | end
48 | end
49 | end
50 |
51 | -- Iterate over existing clusterRoles and find not used anymore
52 | for _,clusterRole in ipairs(existingClusterRoles) do
53 | if not usedClusterRoles[clusterRole.metadata.name] then
54 | table.insert(unusedClusterRoles, {resource = clusterRole})
55 | end
56 | end
57 |
58 | if #unusedClusterRoles > 0 then
59 | hs.resources = unusedClusterRoles
60 | end
61 | return hs
62 | end
--------------------------------------------------------------------------------
/examples-unused-resources/deployments/deployment_with_no_autoscaler.yaml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance will find any deployment all namespaces
2 | # with has no associated Autoscaler and instructs Cleaner to
3 | # delete those instances
4 | apiVersion: apps.projectsveltos.io/v1alpha1
5 | kind: Cleaner
6 | metadata:
7 | name: deployment-with-no-autoscaler
8 | spec:
9 | schedule: "* 0 * * *"
10 | action: Delete # Delete matching resources
11 | resourcePolicySet:
12 | resourceSelectors:
13 | - kind: Deployment
14 | group: "apps"
15 | version: v1
16 | - kind: HorizontalPodAutoscaler
17 | group: "autoscaling"
18 | version: v2
19 | aggregatedSelection: |
20 | function evaluate()
21 | local hs = {}
22 | hs.message = ""
23 |
24 | local deployments = {}
25 | local autoscalers = {}
26 | local deploymentWithNoAutoscaler = {}
27 |
28 | -- Separate deployments and services from the resources
29 | for _, resource in ipairs(resources) do
30 | local kind = resource.kind
31 | if kind == "Deployment" then
32 | table.insert(deployments, resource)
33 | elseif kind == "HorizontalPodAutoscaler" then
34 | if resource.spec.scaleTargetRef.kind == "Deployment" then
35 | table.insert(autoscalers, resource)
36 | end
37 | end
38 | end
39 |
40 | -- Check for each deployment if there is a matching HorizontalPodAutoscaler
41 | for _, deployment in ipairs(deployments) do
42 | local deploymentName = deployment.metadata.name
43 | local matchingAutoscaler = false
44 |
45 | for _, autoscaler in ipairs(autoscalers) do
46 | if autoscaler.metadata.namespace == deployment.metadata.namespace then
47 | if autoscaler.spec.scaleTargetRef.name == deployment.metadata.name then
48 | matchingAutoscaler = true
49 | break
50 | end
51 | end
52 | end
53 |
54 | if not matchingAutoscaler then
55 | table.insert(deploymentWithNoAutoscaler, {resource = deployment})
56 | break
57 | end
58 | end
59 |
60 | if #deploymentWithNoAutoscaler > 0 then
61 | hs.resources = deploymentWithNoAutoscaler
62 | end
63 | return hs
64 | end
--------------------------------------------------------------------------------
/examples-unused-resources/deployments/deployment_with_replica_zero.yaml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance will find any deployment in any namespace
2 | # with spec.replicas set to 0 and deletes those instances
3 | apiVersion: apps.projectsveltos.io/v1alpha1
4 | kind: Cleaner
5 | metadata:
6 | name: deployment-with-zero-replicas
7 | spec:
8 | schedule: "* 0 * * *"
9 | resourcePolicySet:
10 | resourceSelectors:
11 | - kind: Deployment
12 | group: "apps"
13 | version: v1
14 | evaluate: |
15 | function evaluate()
16 | hs = {}
17 | hs.matching = false
18 | if obj.spec.replicas == 0 then
19 | hs.matching = true
20 | end
21 | return hs
22 | end
23 | action: Delete
--------------------------------------------------------------------------------
/examples-unused-resources/deployments/orphaned_deployment.yaml:
--------------------------------------------------------------------------------
1 | # Selects Deployment, Pod, and Service resources, and then filters the results to
2 | # identify deployments that have no pods or services associated with them.
3 | apiVersion: apps.projectsveltos.io/v1alpha1
4 | kind: Cleaner
5 | metadata:
6 | name: orphaned-deployments
7 | spec:
8 | schedule: "* 0 * * *"
9 | action: Delete # Delete matching resources
10 | resourcePolicySet:
11 | resourceSelectors:
12 | - kind: Deployment
13 | group: "apps"
14 | version: v1
15 | - kind: Pod
16 | group: ""
17 | version: v1
18 | - kind: Service
19 | group: ""
20 | version: v1
21 | aggregatedSelection: |
22 | function table_equal(t1, t2)
23 | local metatable = {}
24 | metatable.__eq = function(t1, t2)
25 | if type(t1) ~= "table" or type(t2) ~= "table" then
26 | return false
27 | end
28 |
29 | local keys = {}
30 | for k in pairs(t1) do
31 | keys[k] = true
32 | end
33 |
34 | for k in pairs(t2) do
35 | if not keys[k] then
36 | return false
37 | end
38 | end
39 |
40 | for k, v in pairs(t1) do
41 | if t2[k] ~= v then
42 | return false
43 | end
44 | end
45 |
46 | return true
47 | end
48 |
49 | setmetatable(t1, metatable)
50 | setmetatable(t2, metatable)
51 |
52 | return t1 == t2
53 | end
54 |
55 | function evaluate()
56 | local hs = {}
57 | hs.message = ""
58 |
59 | local deployments = {}
60 | local pods = {}
61 | local services = {}
62 | local orphanedDeployments = {}
63 |
64 | -- Separate deployments, pods, and services from the resources
65 | for _, resource in ipairs(resources) do
66 | local kind = resource.kind
67 | if kind == "Deployment" then
68 | table.insert(deployments, resource)
69 | elseif kind == "Pod" then
70 | table.insert(pods, resource)
71 | elseif kind == "Service" then
72 | table.insert(services, resource)
73 | end
74 | end
75 |
76 | -- Identify deployments that have no pods or services associated with them
77 | for _, deployment in ipairs(deployments) do
78 | local deploymentName = deployment.metadata.name
79 | local hasPod = false
80 | local hasService = false
81 |
82 | for _, pod in ipairs(pods) do
83 | if pod.metadata.namespace == deployment.metadata.namespace then
84 | for _, owner in ipairs(pod.metadata.ownerReferences) do
85 | if owner.name == deploymentName then
86 | hasPod = true
87 | break
88 | end
89 | end
90 | end
91 | end
92 |
93 | for _, service in ipairs(services) do
94 | if service.metadata.namespace == deployment.metadata.namespace then
95 | if table_equal(service.spec.selector, deployment.metadata.labels) then
96 | hasService = true
97 | break
98 | end
99 | end
100 | end
101 |
102 | if not hasPod and not hasService then
103 | table.insert(orphanedDeployments, {resource = deployment})
104 | break
105 | end
106 | end
107 |
108 | if #orphanedDeployments > 0 then
109 | hs.resources = orphanedDeployments
110 | end
111 | return hs
112 | end
--------------------------------------------------------------------------------
/examples-unused-resources/horizontal-pod-autoscalers/unused-hpas.yaml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance find any HorizontalPodAutoscaler instance
2 | # matching no Deployment or StatefulSet and delete those nstances
3 | apiVersion: apps.projectsveltos.io/v1alpha1
4 | kind: Cleaner
5 | metadata:
6 | name: unused-horizontal-pod-autoscalers
7 | spec:
8 | schedule: "* 0 * * *"
9 | action: Delete # Delete matching resources
10 | resourcePolicySet:
11 | resourceSelectors:
12 | - kind: Deployment
13 | group: "apps"
14 | version: v1
15 | - kind: StatefulSet
16 | group: "apps"
17 | version: v1
18 | - kind: HorizontalPodAutoscaler
19 | group: "autoscaling"
20 | version: v2
21 | aggregatedSelection: |
22 | function getKey(namespace, name)
23 | return namespace .. ":" .. name
24 | end
25 |
26 | function evaluate()
27 | local hs = {}
28 | hs.message = ""
29 |
30 | local deployments = {}
31 | local statefulSets = {}
32 | local autoscalers = {}
33 | local unusedAutoscalers = {}
34 |
35 | for _, resource in ipairs(resources) do
36 | local kind = resource.kind
37 | if kind == "Deployment" then
38 | key = getKey(resource.metadata.namespace, resource.metadata.name)
39 | deployments[key] = true
40 | elseif kind == "StatefulSet" then
41 | key = getKey(resource.metadata.namespace, resource.metadata.name)
42 | statefulSets[key] = true
43 | elseif kind == "HorizontalPodAutoscaler" then
44 | table.insert(autoscalers, resource)
45 | end
46 | end
47 |
48 | -- Check for each horizontalPodAutoscaler if there is a matching Deployment or StatefulSet
49 | for _,hpa in ipairs(autoscalers) do
50 | key = getKey(hpa.metadata.namespace, hpa.spec.scaleTargetRef.name)
51 | if hpa.spec.scaleTargetRef.kind == "Deployment" then
52 | if not deployments[key] then
53 | table.insert(unusedAutoscalers, {resource = hpa})
54 | end
55 | elseif hpa.spec.scaleTargetRef.kind == "StatefulSet" then
56 | if not statefulSets[key] then
57 | table.insert(unusedAutoscalers, {resource = hpa})
58 | end
59 | end
60 | end
61 |
62 | if #unusedAutoscalers > 0 then
63 | hs.resources = unusedAutoscalers
64 | end
65 | return hs
66 | end
--------------------------------------------------------------------------------
/examples-unused-resources/ingresses/unused_ingresses.yaml:
--------------------------------------------------------------------------------
1 | # Find all unused Ingress instances.
2 | # An Ingress instance is considered unused if:
3 | # - default backend is not defined or not existings
4 | # - none of referenced services (via spec.rules) exists
5 | #
6 | # This does not take into account resource (field Resource *v1.TypedLocalObjectReference)
7 | apiVersion: apps.projectsveltos.io/v1alpha1
8 | kind: Cleaner
9 | metadata:
10 | name: stale-ingresses
11 | spec:
12 | schedule: "* 0 * * *"
13 | action: Delete # Delete matching resources
14 | resourcePolicySet:
15 | resourceSelectors:
16 | - kind: Ingress
17 | group: "networking.k8s.io"
18 | version: v1
19 | - kind: Service
20 | group: ""
21 | version: v1
22 | aggregatedSelection: |
23 | function getKey(namespace, name)
24 | return namespace .. ":" .. name
25 | end
26 |
27 | -- check default backend: if default backend is configured, return true
28 | -- if currently exists
29 | function isDefaultBackendValid(ingress, services)
30 | if ingress.spec.defaultBackend ~= nil then
31 | if ingress.spec.defaultBackend.service ~= nil then
32 | key = getKey(ingress.metadata.namespace, ingress.spec.defaultBackend.service.name)
33 | if services[key] then
34 | return true
35 | end
36 | end
37 | end
38 | return false
39 | end
40 |
41 | -- check if any referenced service (via rules) currently exists
42 | -- returns false only if none of the referenced services exists
43 | function isAnyReferencedServiceValid(ingress, services)
44 | if ingress.spec.rules ~= nil then
45 | for _,rule in ipairs(ingress.spec.rules) do
46 | if rule.http ~= nil and rule.http.paths ~= nil then
47 | for _,path in ipairs(rule.http.paths) do
48 | if path.backend.service ~= nil then
49 | key = getKey(ingress.metadata.namespace, path.backend.service.name)
50 | if services[key] then
51 | return true
52 | end
53 | end
54 | end
55 | end
56 | end
57 | end
58 | return false
59 | end
60 |
61 |
62 | function evaluate()
63 | local hs = {}
64 | hs.valid = true
65 | hs.message = ""
66 |
67 | local services = {}
68 | local ingresses = {}
69 | local unusedIngresses = {}
70 |
71 | -- Separate ingresses and services from the resources
72 | -- Store existing services in a map like struct
73 | for _, resource in ipairs(resources) do
74 | local kind = resource.kind
75 | if kind == "Ingress" then
76 | table.insert(ingresses, resource)
77 | elseif kind == "Service" then
78 | key = getKey(resource.metadata.namespace,resource.metadata.name)
79 | print(key)
80 | services[key] = true
81 | end
82 | end
83 |
84 | for _,ingress in ipairs(ingresses) do
85 | local used = false
86 | key = getKey(ingress.metadata.namespace, ingress.metadata.name)
87 | if isDefaultBackendValid(ingress, services) then
88 | used = true
89 | elseif isAnyReferencedServiceValid(ingress, services) then
90 | used = true
91 | end
92 |
93 | if not used then
94 | table.insert(unusedIngresses, {resource = ingress})
95 | end
96 | end
97 |
98 | if #unusedIngresses > 0 then
99 | hs.resources = unusedIngresses
100 | end
101 | return hs
102 | end
--------------------------------------------------------------------------------
/examples-unused-resources/jobs/completed_jobs.yaml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance finds any Jobs that:
2 | # - has status.completionTime set
3 | # - has status.succeeded set to a value greater than zero
4 | # - has no running or pending pods
5 | # and instruct Cleaner to delete this Job.
6 | apiVersion: apps.projectsveltos.io/v1alpha1
7 | kind: Cleaner
8 | metadata:
9 | name: completed-jobs
10 | spec:
11 | schedule: "* 0 * * *"
12 | resourcePolicySet:
13 | resourceSelectors:
14 | - kind: Job
15 | group: "batch"
16 | version: v1
17 | evaluate: |
18 | function evaluate()
19 | hs = {}
20 | hs.matching = false
21 | if obj.status ~= nil then
22 | if obj.status.completionTime ~= nil and obj.status.succeeded > 0 then
23 | hs.matching = true
24 | end
25 | end
26 | return hs
27 | end
28 | action: Delete
--------------------------------------------------------------------------------
/examples-unused-resources/jobs/long-running-pods.yaml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance finds any Pod that:
2 | # - has been running for longer than one hour (3600 seconds)
3 | # - was created by a Job
4 | # and instruct Cleaner to delete this Pod.
5 | # This does not delete the Job.
6 | apiVersion: apps.projectsveltos.io/v1alpha1
7 | kind: Cleaner
8 | metadata:
9 | name: pods-from-job
10 | spec:
11 | schedule: "* 0 * * *"
12 | resourcePolicySet:
13 | resourceSelectors:
14 | - kind: Pod
15 | group: ""
16 | version: v1
17 | evaluate: |
18 | -- Convert creationTimestamp "2023-12-12T09:35:56Z"
19 | function convertTimestampString(timestampStr)
20 | local convertedTimestamp = string.gsub(
21 | timestampStr,
22 | '(%d+)-(%d+)-(%d+)T(%d+):(%d+):(%d+)Z',
23 | function(y, mon, d, h, mi, s)
24 | return os.time({
25 | year = tonumber(y),
26 | month = tonumber(mon),
27 | day = tonumber(d),
28 | hour = tonumber(h),
29 | min = tonumber(mi),
30 | sec = tonumber(s)
31 | })
32 | end
33 | )
34 | return convertedTimestamp
35 | end
36 |
37 | function evaluate()
38 | hs = {}
39 | hs.matching = false
40 |
41 | currentTime = os.time()
42 |
43 | creationTimestamp = convertTimestampString(obj.metadata.creationTimestamp)
44 |
45 | hs.message = creationTimestamp
46 | print('creationTimestamp: ' .. creationTimestamp)
47 | print('currentTime: ' .. currentTime)
48 |
49 | timeDifference = os.difftime(currentTime, tonumber(creationTimestamp))
50 |
51 | print('timeDifference: ' .. timeDifference)
52 |
53 | -- if pod has been running for over an hour
54 | if timeDifference > 3600 then
55 | if obj.metadata.ownerReferences ~= nil then
56 | for _, owner in ipairs(obj.metadata.ownerReferences) do
57 | if owner.kind == "Job" and owner.apiVersion == "batch/v1" then
58 | hs.matching = true
59 | end
60 | end
61 | end
62 | end
63 |
64 |
65 | return hs
66 | end
67 | action: Delete
68 |
--------------------------------------------------------------------------------
/examples-unused-resources/kustomize_configmap_generator/left_over_configmap.yaml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance finds all ConfigMaps in a given namespace "test" that start with same prefix "dynamic-config-properties"
2 | # Those ConfigMaps were all generated by kustomize ConfigMapGenerator.
3 | # Cleaner will delete all such instances, leaving only the one was created last (by looking at the creation timestamp)
4 | apiVersion: apps.projectsveltos.io/v1alpha1
5 | kind: Cleaner
6 | metadata:
7 | name: unused-configmaps
8 | spec:
9 | schedule: "* 0 * * *"
10 | action: Delete
11 | resourcePolicySet:
12 | resourceSelectors:
13 | - kind: ConfigMap
14 | group: ""
15 | version: v1
16 | namespace: test # namespace where to look for ConfigMaps
17 | aggregatedSelection: |
18 | namePrefix = "dynamic-config-properties-" -- all ConfigMap with this name prefix
19 |
20 | -- This function returns true if name starts with prefix
21 | function hasPrefix(name, prefix)
22 | local prefixLength = string.len(prefix) -- Get the length of the prefix
23 | return string.sub(name, 1, prefixLength) == prefix
24 | end
25 |
26 | -- Convert creationTimestamp "2023-12-12T09:35:56Z"
27 | function convertTimestampString(timestampStr)
28 | local convertedTimestamp = string.gsub(
29 | timestampStr,
30 | '(%d+)-(%d+)-(%d+)T(%d+):(%d+):(%d+)Z',
31 | function(y, mon, d, h, mi, s)
32 | return os.time({
33 | year = tonumber(y),
34 | month = tonumber(mon),
35 | day = tonumber(d),
36 | hour = tonumber(h),
37 | min = tonumber(mi),
38 | sec = tonumber(s)
39 | })
40 | end
41 | )
42 | return convertedTimestamp
43 | end
44 |
45 | function evaluate()
46 | local hs = {}
47 | hs.message = ""
48 |
49 | local configMaps = {}
50 | local duplicateConfigMaps = {}
51 |
52 | -- Find all ConfigMap with namePrefix
53 | for _, resource in ipairs(resources) do
54 | if hasPrefix(resource.metadata.name, namePrefix) then
55 | table.insert(duplicateConfigMaps, resource)
56 | end
57 | end
58 |
59 | -- Out of duplicate ConfigMaps, find the one that was created last
60 | local mostRecentResource = nil
61 | local latestTimestamp = os.time()
62 |
63 |
64 | for _, configMap in ipairs(duplicateConfigMaps) do
65 | creationTimestamp = convertTimestampString(configMap.metadata.creationTimestamp)
66 |
67 | timeDifference = os.difftime(tonumber(creationTimestamp), latestTimestamp)
68 |
69 | -- Check if the current resource has a later timestamp
70 | if timeDifference < 0 then
71 | mostRecentResource = configMap
72 | latestTimestamp = tonumber(creationTimestamp)
73 | end
74 | end
75 |
76 | local oldConfigMaps = {}
77 | for _, configMap in ipairs(duplicateConfigMaps) do
78 | if configMap.metadata.name ~= mostRecentResource.metadata.name then
79 | print("ConfigMap is duplicate: ", configMap.metadata.name)
80 | table.insert(oldConfigMaps, {resource = configMap})
81 | end
82 | end
83 |
84 | if #oldConfigMaps > 0 then
85 | hs.resources = oldConfigMaps
86 | end
87 | return hs
88 | end
--------------------------------------------------------------------------------
/examples-unused-resources/persistent-volume-claims/unused_persistent-volume-claims.yaml:
--------------------------------------------------------------------------------
1 | # Find all PersistentVolumeClaims currently not
2 | # used by any Pods. It considers all namespaces.
3 | apiVersion: apps.projectsveltos.io/v1alpha1
4 | kind: Cleaner
5 | metadata:
6 | name: stale-persistent-volume-claim
7 | spec:
8 | schedule: "* 0 * * *"
9 | action: Delete # Delete matching resources
10 | resourcePolicySet:
11 | resourceSelectors:
12 | - kind: Pod
13 | group: ""
14 | version: v1
15 | - kind: PersistentVolumeClaim
16 | group: ""
17 | version: v1
18 | aggregatedSelection: |
19 | function isUsed(pvc, pods)
20 | if pods == nil then
21 | return false
22 | end
23 | for _, pod in ipairs(pods) do
24 | if pod.spec.volumes ~= nil then
25 | for _,volume in ipairs(pod.spec.volumes) do
26 | if volume.persistentVolumeClaim ~= nil and volume.persistentVolumeClaim.claimName == pvc.metadata.name then
27 | return true
28 | end
29 | end
30 | end
31 | end
32 | return false
33 | end
34 |
35 | function evaluate()
36 | local hs = {}
37 | hs.message = ""
38 |
39 | local pods = {}
40 | local pvcs = {}
41 | local unusedPVCs = {}
42 |
43 | -- Separate pods and pvcs from the resources
44 | -- Group those by namespace
45 | for _, resource in ipairs(resources) do
46 | local kind = resource.kind
47 | if kind == "Pod" then
48 | if not pods[resource.metadata.namespace] then
49 | pods[resource.metadata.namespace] = {}
50 | end
51 | table.insert(pods[resource.metadata.namespace], resource)
52 | elseif kind == "PersistentVolumeClaim" then
53 | if not pvcs[resource.metadata.namespace] then
54 | pvcs[resource.metadata.namespace] = {}
55 | end
56 | table.insert(pvcs[resource.metadata.namespace], resource)
57 | end
58 | end
59 |
60 | -- Iterate through each namespace and identify unused PVCs
61 | for namespace, perNamespacePVCs in pairs(pvcs) do
62 | for _, pvc in ipairs(perNamespacePVCs) do
63 | if not isUsed(pvc, pods[namespace]) then
64 | table.insert(unusedPVCs, {resource = pvc})
65 | end
66 | end
67 | end
68 |
69 | if #unusedPVCs > 0 then
70 | hs.resources = unusedPVCs
71 | end
72 | return hs
73 | end
--------------------------------------------------------------------------------
/examples-unused-resources/persistent-volumes/unbound_persistent-volumes.yaml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance will find any PersistentVolume with Phase
2 | # set to anything but "Bound" and delete those
3 | apiVersion: apps.projectsveltos.io/v1alpha1
4 | kind: Cleaner
5 | metadata:
6 | name: unbound-persistent-volumes
7 | spec:
8 | schedule: "* 0 * * *"
9 | resourcePolicySet:
10 | resourceSelectors:
11 | - kind: PersistentVolume
12 | group: ""
13 | version: v1
14 | evaluate: |
15 | function evaluate()
16 | hs = {}
17 | hs.matching = false
18 | if obj.status ~= nil and obj.status.phase ~= "Bound" then
19 | hs.matching = true
20 | end
21 | return hs
22 | end
23 | action: Delete
--------------------------------------------------------------------------------
/examples-unused-resources/pod-disruption-budgets/unused_pod-disruption-budgets.yaml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance finds all PodDisruptionBudget instances which are stale.
2 | # A PodDisruptionBudget is stale if:
3 | # - matches no Deployment instance
4 | # - matches no StatefulSet instance
5 | apiVersion: apps.projectsveltos.io/v1alpha1
6 | kind: Cleaner
7 | metadata:
8 | name: stale-pod-disruption-budgets
9 | spec:
10 | schedule: "* 0 * * *"
11 | action: Delete
12 | resourcePolicySet:
13 | resourceSelectors:
14 | - kind: PodDisruptionBudget
15 | group: "policy"
16 | version: v1
17 | - kind: Deployment
18 | group: "apps"
19 | version: v1
20 | - kind: StatefulSet
21 | group: "apps"
22 | version: v1
23 | aggregatedSelection: |
24 | function isMatch(pdbLabels, destLabels)
25 | for k,v in pairs(pdbLabels) do
26 | if destLabels[k] ~= v then
27 | return false
28 | end
29 | end
30 | return true
31 | end
32 |
33 | function isMatchingAny(pdb, resources)
34 | if resources == nil then
35 | return false
36 | end
37 | for _,resource in ipairs(resources) do
38 | if pdb.metadata.namespace == resource.metadata.namespace then
39 | if isMatch(pdb.spec.selector.matchLabels, resource.spec.template.metadata.labels) then
40 | return true
41 | end
42 | end
43 | end
44 | return false
45 | end
46 |
47 | function evaluate()
48 | local hs = {}
49 | hs.message = ""
50 |
51 | local pdbs= {}
52 | local deployments = {}
53 | local statefulsets = {}
54 | local stalePdbs = {}
55 |
56 | -- Separate pdbs and deployments and statefulsets from the resources
57 | for _, resource in ipairs(resources) do
58 | local kind = resource.kind
59 | if kind == "PodDisruptionBudget" then
60 | table.insert(pdbs, resource)
61 | elseif kind == "Deployment" then
62 | table.insert(deployments, resource)
63 | elseif kind == "StatefulSet" then
64 | table.insert(statefulsets, resource)
65 | end
66 | end
67 |
68 | for _,pdb in ipairs(pdbs) do
69 | if not isMatchingAny(pdb,deployments) and not isMatchingAny(pdb,statefulsets) then
70 | table.insert(stalePdbs, {resource = pdb})
71 | end
72 | end
73 |
74 | if #stalePdbs > 0 then
75 | hs.resources = stalePdbs
76 | end
77 | return hs
78 | end
--------------------------------------------------------------------------------
/examples-unused-resources/pods/completed_pods.yaml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance finds any Pod with conditions PodCompleted
2 | # set to true and instructs Cleaner to delete it.
3 | apiVersion: apps.projectsveltos.io/v1alpha1
4 | kind: Cleaner
5 | metadata:
6 | name: completed-pods
7 | spec:
8 | schedule: "* 0 * * *"
9 | resourcePolicySet:
10 | resourceSelectors:
11 | - kind: Pod
12 | group: ""
13 | version: v1
14 | evaluate: |
15 | function evaluate()
16 | hs = {}
17 | hs.matching = false
18 | if obj.status.conditions ~= nil then
19 | for _, condition in ipairs(obj.status.conditions) do
20 | if condition.reason == "PodCompleted" and condition.status == "True" then
21 | hs.matching = true
22 | end
23 | end
24 | end
25 | return hs
26 | end
27 | action: Delete
--------------------------------------------------------------------------------
/examples-unused-resources/roles/unused_roles.yaml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance finds all unused Role instances.
2 | # All namespaces are considered.
3 | # An unused Role is an instance that is not referenced
4 | # by any RoleBinding
5 | apiVersion: apps.projectsveltos.io/v1alpha1
6 | kind: Cleaner
7 | metadata:
8 | name: unused-roles
9 | spec:
10 | schedule: "* 0 * * *"
11 | action: Delete
12 | resourcePolicySet:
13 | resourceSelectors:
14 | - kind: Role
15 | group: "rbac.authorization.k8s.io"
16 | version: v1
17 | - kind: RoleBinding
18 | group: "rbac.authorization.k8s.io"
19 | version: v1
20 | aggregatedSelection: |
21 | -- Given Role namespace and name returns a unique Key
22 | function getRoleKey(namespace, name)
23 | return namespace .. ":" .. name
24 | end
25 |
26 | function evaluate()
27 | local hs = {}
28 | hs.message = ""
29 |
30 | -- Contains list of existing roles
31 | local existingRoles = {}
32 | -- Contains list of roles currently referenced by roleBindings
33 | local usedRoles = {}
34 |
35 | local unusedRoles = {}
36 |
37 | -- Create list of existingRoles and usedRoles
38 | for _, resource in ipairs(resources) do
39 | local kind = resource.kind
40 | if kind == "Role" then
41 | table.insert(existingRoles, resource)
42 | elseif kind == "RoleBinding" then
43 | if resource.roleRef.kind == "Role" then
44 | roleKey = getRoleKey(resource.metadata.namespace, resource.roleRef.name)
45 | usedRoles[roleKey] = true
46 | end
47 | end
48 | end
49 |
50 | -- Iterate over existing roles and find not used anymore
51 | for _,role in ipairs(existingRoles) do
52 | roleKey = getRoleKey(role.metadata.namespace, role.metadata.name)
53 | if not usedRoles[roleKey] then
54 | table.insert(unusedRoles, {resource = role})
55 | end
56 | end
57 |
58 | if #unusedRoles > 0 then
59 | hs.resources = unusedRoles
60 | end
61 | return hs
62 | end
--------------------------------------------------------------------------------
/examples-unused-resources/service-accounts/unused_service-accounts.yaml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance finds all unused ServiceAccounts instances. All namespaces are considered.
2 | # A ServiceAccount is unused if:
3 | # - used by no Pod instance
4 | # - referenced in no RoleBinding
5 | # - referenced in no ClusterRoleBinding
6 | apiVersion: apps.projectsveltos.io/v1alpha1
7 | kind: Cleaner
8 | metadata:
9 | name: unused-service-accounts
10 | spec:
11 | schedule: "* 0 * * *"
12 | action: Delete
13 | resourcePolicySet:
14 | resourceSelectors:
15 | - kind: Pod
16 | group: ""
17 | version: v1
18 | - kind: ServiceAccount
19 | group: ""
20 | version: v1
21 | - kind: RoleBinding
22 | group: "rbac.authorization.k8s.io"
23 | version: v1
24 | - kind: ClusterRoleBinding
25 | group: "rbac.authorization.k8s.io"
26 | version: v1
27 | aggregatedSelection: |
28 | function getKey(namespace, name)
29 | return namespace .. ":" .. name
30 | end
31 |
32 | function addRoleBindingServiceAccounts(roleBinding, usedServiceAccounts)
33 | if roleBinding.subjects ~= nil then
34 | for _,subject in ipairs(roleBinding.subjects) do
35 | if subject.kind == "ServiceAccount" then
36 | key = getKey(roleBinding.metadata.namespace, subject.name)
37 | usedServiceAccounts[key] = true
38 | end
39 | end
40 | end
41 | end
42 |
43 | function addClusterRoleBindingServiceAccounts(clusterRoleBinding, usedServiceAccounts)
44 | if clusterRoleBinding.subjects ~= nil then
45 | for _,subject in ipairs(clusterRoleBinding.subjects) do
46 | if subject.kind == "ServiceAccount" then
47 | key = getKey(subject.namespace, subject.name)
48 | usedServiceAccounts[key] = true
49 | end
50 | end
51 | end
52 | end
53 |
54 | function addPodServiceAccount(pod, usedServiceAccounts)
55 | if pod.spec.serviceAccountName ~= nil then
56 | serviceAccount = pod.spec.serviceAccountName
57 | else
58 | serviceAccount = "default"
59 | end
60 | key = getKey(pod.metadata.namespace, serviceAccount)
61 | usedServiceAccounts[key] = true
62 | end
63 |
64 | function evaluate()
65 | local hs = {}
66 | hs.message = ""
67 |
68 | local serviceAccounts = {}
69 | local usedServiceAccounts = {}
70 | local unusedServiceAccounts = {}
71 |
72 | for _, resource in ipairs(resources) do
73 | local kind = resource.kind
74 | if kind == "ServiceAccount" then
75 | table.insert(serviceAccounts, resource)
76 | elseif kind == "Pod" then
77 | addPodServiceAccount(resource, usedServiceAccounts)
78 | elseif kind == "RoleBinding" then
79 | addRoleBindingServiceAccounts(resource, usedServiceAccounts)
80 | elseif kind == "ClusterRoleBinding" then
81 | addClusterRoleBindingServiceAccounts(resource, usedServiceAccounts)
82 | end
83 | end
84 |
85 | -- walk all existing serviceAccounts and find the unused ones
86 | for _,serviceAccount in ipairs(serviceAccounts) do
87 | key = getKey(serviceAccount.metadata.namespace, serviceAccount.metadata.name)
88 | if not usedServiceAccounts[key] then
89 | table.insert(unusedServiceAccounts, {resource = serviceAccount})
90 | end
91 | end
92 |
93 | if #unusedServiceAccounts > 0 then
94 | hs.resources = unusedServiceAccounts
95 | end
96 | return hs
97 | end
--------------------------------------------------------------------------------
/examples-unused-resources/stateful-sets/statefulset_with_no_autoscaler.yaml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance will find StatefulSet instances in all namespaces
2 | # with has no associated Autoscaler and instructs Cleaner to
3 | # delete those instances
4 | apiVersion: apps.projectsveltos.io/v1alpha1
5 | kind: Cleaner
6 | metadata:
7 | name: statefulset-with-no-autoscaler
8 | spec:
9 | schedule: "* 0 * * *"
10 | action: Delete # Delete matching resources
11 | resourcePolicySet:
12 | resourceSelectors:
13 | - kind: StatefulSet
14 | group: "apps"
15 | version: v1
16 | - kind: HorizontalPodAutoscaler
17 | group: "autoscaling"
18 | version: v2
19 | aggregatedSelection: |
20 | function evaluate()
21 | local hs = {}
22 | hs.message = ""
23 |
24 | local statefulSets = {}
25 | local autoscalers = {}
26 | local statefulSetWithNoAutoscaler = {}
27 |
28 | -- Separate statefulSets and services from the resources
29 | for _, resource in ipairs(resources) do
30 | local kind = resource.kind
31 | if kind == "StatefulSet" then
32 | table.insert(statefulSets, resource)
33 | elseif kind == "HorizontalPodAutoscaler" then
34 | if resource.spec.scaleTargetRef.kind == "StatefulSet" then
35 | table.insert(autoscalers, resource)
36 | end
37 | end
38 | end
39 |
40 | -- Check for each statefulSets if there is a matching HorizontalPodAutoscaler
41 | for _, statefulSet in ipairs(statefulSets) do
42 | local statefulSetName = statefulSet.metadata.name
43 | local matchingAutoscaler = false
44 |
45 | for _, autoscaler in ipairs(autoscalers) do
46 | if autoscaler.metadata.namespace == statefulSet.metadata.namespace then
47 | if autoscaler.spec.scaleTargetRef.name == statefulSet.metadata.name then
48 | matchingAutoscaler = true
49 | break
50 | end
51 | end
52 | end
53 |
54 | if not matchingAutoscaler then
55 | table.insert(statefulSetWithNoAutoscaler, {resource = statefulSet})
56 | break
57 | end
58 | end
59 |
60 | if #statefulSetWithNoAutoscaler > 0 then
61 | hs.resources = statefulSetWithNoAutoscaler
62 | end
63 | return hs
64 | end
--------------------------------------------------------------------------------
/examples-unused-resources/stateful-sets/statefulset_with_no_replicas.yaml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance will find any StatefulSets in any namespace
2 | # with spec.replicas set to 0 and deletes those instances
3 | apiVersion: apps.projectsveltos.io/v1alpha1
4 | kind: Cleaner
5 | metadata:
6 | name: statefulset-with-zero-replicas
7 | spec:
8 | schedule: "* 0 * * *"
9 | resourcePolicySet:
10 | resourceSelectors:
11 | - kind: StatefulSet
12 | group: "apps"
13 | version: v1
14 | evaluate: |
15 | function evaluate()
16 | hs = {}
17 | hs.matching = false
18 | if obj.spec.replicas == 0 then
19 | hs.matching = true
20 | end
21 | return hs
22 | end
23 | action: Delete
--------------------------------------------------------------------------------
/examples-unused-resources/time_based_delete/delete_resources_older_than_24hours.yaml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance finds any Pod that:
2 | # - has been running for longer than 24 hour
3 | # and instruct Cleaner to delete this Pod.
4 | #
5 | # If you want to filter Pods based on namespace =>
6 | # - kind: Pod
7 | # group: ""
8 | # version: v1
9 | # namespace:
10 | #
11 | # If you want to filter Pods based on labels =>
12 | # - kind: Pod
13 | # group: ""
14 | # version: v1
15 | # labelFilters:
16 | # - key: app
17 | # operation: Equal
18 | # value: nginx
19 | # - key: environment
20 | # operation: Different
21 | # value: production
22 | #
23 | # If you need further filtering modify `function evaluate` you can access any
24 | # field of obj
25 | #
26 | # If you want to remove any other resource including your own custom resources
27 | # replace kind/group/version
28 | #
29 | apiVersion: apps.projectsveltos.io/v1alpha1
30 | kind: Cleaner
31 | metadata:
32 | name: pods-from-job
33 | spec:
34 | schedule: "* 0 * * *"
35 | resourcePolicySet:
36 | resourceSelectors:
37 | - kind: Pod
38 | group: ""
39 | version: v1
40 | evaluate: |
41 | -- Convert creationTimestamp "2023-12-12T09:35:56Z"
42 | function convertTimestampString(timestampStr)
43 | local convertedTimestamp = string.gsub(
44 | timestampStr,
45 | '(%d+)-(%d+)-(%d+)T(%d+):(%d+):(%d+)Z',
46 | function(y, mon, d, h, mi, s)
47 | return os.time({
48 | year = tonumber(y),
49 | month = tonumber(mon),
50 | day = tonumber(d),
51 | hour = tonumber(h),
52 | min = tonumber(mi),
53 | sec = tonumber(s)
54 | })
55 | end
56 | )
57 | return convertedTimestamp
58 | end
59 |
60 | function evaluate()
61 | hs = {}
62 | hs.matching = false
63 |
64 | -- any resource older than this time will be removed
65 | local removeAfterHour = 24
66 |
67 | currentTime = os.time()
68 |
69 | creationTimestamp = convertTimestampString(obj.metadata.creationTimestamp)
70 |
71 | hs.message = creationTimestamp
72 | print('creationTimestamp: ' .. creationTimestamp)
73 | print('currentTime: ' .. currentTime)
74 |
75 | timeDifference = os.difftime(currentTime, tonumber(creationTimestamp))
76 |
77 | print('timeDifference: ' .. timeDifference)
78 |
79 | -- if resource has been running for over 24 hours
80 | if timeDifference > removeAfterHour*60*60 then
81 | hs.matching = true
82 | end
83 |
84 | return hs
85 | end
86 | action: Delete
87 |
--------------------------------------------------------------------------------
/hack/boilerplate.go.txt:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2023. projectsveltos.io. All rights reserved.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
--------------------------------------------------------------------------------
/hack/tools/get-golangci-lint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -euo pipefail
4 |
5 | # Define the URL for downloading the golangci-lint archive
6 | curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(pwd)/bin "$1"
--------------------------------------------------------------------------------
/hack/tools/get-govulncheck.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -euo pipefail
4 |
5 | GOBIN=$(pwd)/bin go install golang.org/x/vuln/cmd/govulncheck@$1
6 |
--------------------------------------------------------------------------------
/hack/tools/go_install.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # Copyright 2021 The Kubernetes Authors.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | set -o errexit
17 | set -o nounset
18 | set -o pipefail
19 |
20 | if [ -z "${1}" ]; then
21 | echo "must provide module as first parameter"
22 | exit 1
23 | fi
24 |
25 | if [ -z "${2}" ]; then
26 | echo "must provide binary name as second parameter"
27 | exit 1
28 | fi
29 |
30 | if [ -z "${3}" ]; then
31 | echo "must provide version as third parameter"
32 | exit 1
33 | fi
34 |
35 | if [ -z "${GOBIN}" ]; then
36 | echo "GOBIN is not set. Must set GOBIN to install the bin in a specified directory."
37 | exit 1
38 | fi
39 |
40 | rm -f "${GOBIN}/${2}"* || true
41 |
42 | # install the golang module specified as the first argument
43 | go install "${1}@${3}"
44 | mv "${GOBIN}/${2}" "${GOBIN}/${2}-${3}"
45 | ln -sf "${GOBIN}/${2}-${3}" "${GOBIN}/${2}"
--------------------------------------------------------------------------------
/hack/tools/tools.go:
--------------------------------------------------------------------------------
1 | //go:build tools
2 | // +build tools
3 |
4 | /*
5 | Copyright 2022. projectsveltos.io. All rights reserved.
6 |
7 | Licensed under the Apache License, Version 2.0 (the "License");
8 | you may not use this file except in compliance with the License.
9 | You may obtain a copy of the License at
10 |
11 | http://www.apache.org/licenses/LICENSE-2.0
12 |
13 | Unless required by applicable law or agreed to in writing, software
14 | distributed under the License is distributed on an "AS IS" BASIS,
15 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | See the License for the specific language governing permissions and
17 | limitations under the License.
18 | */
19 |
20 | // This package imports things required by build scripts, to force `go mod` to see them as
21 | // dependencies
22 | package tools
23 |
24 | import (
25 | _ "github.com/a8m/envsubst"
26 | _ "github.com/helm/chart-testing/v3/ct"
27 | _ "github.com/onsi/ginkgo/v2/ginkgo"
28 | _ "golang.org/x/oauth2/google"
29 | _ "k8s.io/client-go/plugin/pkg/client/auth/azure"
30 | _ "sigs.k8s.io/controller-tools/cmd/controller-gen"
31 | _ "sigs.k8s.io/kind"
32 | )
33 |
--------------------------------------------------------------------------------
/internal/controller/executor/export_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2023. projectsveltos.io. All rights reserved.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package executor
18 |
19 | var (
20 | FetchResources = fetchResources
21 | GetMatchingResources = getMatchingResources
22 | DeleteMatchingResources = deleteMatchingResources
23 | IsMatch = isMatch
24 | Transform = transform
25 | AggregatedSelection = aggregatedSelection
26 | GetNamespaces = getNamespaces
27 | )
28 |
29 | var (
30 | GetWebexInfo = getWebexInfo
31 | GetSlackInfo = getSlackInfo
32 | )
33 |
34 | func (m *Manager) ClearInternalStruct() {
35 | m.dirty = make([]string, 0)
36 | m.inProgress = make([]string, 0)
37 | m.jobQueue = make([]string, 0)
38 | m.results = make(map[string]error)
39 | }
40 |
41 | func (m *Manager) SetInProgress(inProgress []string) {
42 | m.inProgress = inProgress
43 | }
44 |
45 | func (m *Manager) GetInProgress() []string {
46 | return m.inProgress
47 | }
48 |
49 | func (m *Manager) SetDirty(dirty []string) {
50 | m.dirty = dirty
51 | }
52 |
53 | func (m *Manager) GetDirty() []string {
54 | return m.dirty
55 | }
56 |
57 | func (m *Manager) SetJobQueue(cleanerName string) {
58 | m.jobQueue = []string{cleanerName}
59 | }
60 |
61 | func (m *Manager) GetJobQueue() []string {
62 | return m.jobQueue
63 | }
64 |
65 | func (m *Manager) SetResults(results map[string]error) {
66 | m.results = results
67 | }
68 |
69 | func (m *Manager) GetResults() map[string]error {
70 | return m.results
71 | }
72 |
73 | func GetWebexRoom(info *webexInfo) string {
74 | return info.room
75 | }
76 | func GetWebexToken(info *webexInfo) string {
77 | return info.token
78 | }
79 |
80 | func GetSlackChannelID(info *slackInfo) string {
81 | return info.channelID
82 | }
83 | func GetSlackToken(info *slackInfo) string {
84 | return info.token
85 | }
86 |
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/README.md:
--------------------------------------------------------------------------------
1 | If you want to validate your aggregatedSelection functions:
2 |
3 | 1. create a sub-directory
4 | 2. create a file named __cleaner.yaml__ containing your Cleaner instance
5 | 3. create a file named __resources.yaml__ containing all the resource Cleaner instance will find
6 | 4. create a file named __matching.yaml__ containing all the resources that matches __AggregatedSelection__
7 | 5. run ``make test``
8 |
9 | That will run the exact code Cleaner will run in your cluster.
10 | If you see no error, your Cleaner instance is correct
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/deployment_with_autoscaler/cleaner.yaml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance will find any deployment all namespaces
2 | # with has no associated Autoscaler and instructs Cleaner to
3 | # delete those instances
4 | apiVersion: apps.projectsveltos.io/v1alpha1
5 | kind: Cleaner
6 | metadata:
7 | name: deployment-with-no-autoscaler
8 | spec:
9 | schedule: "* 0 * * *"
10 | action: Delete # Delete matching resources
11 | resourcePolicySet:
12 | resourceSelectors:
13 | - kind: Deployment
14 | group: "apps"
15 | version: v1
16 | - kind: HorizontalPodAutoscaler
17 | group: "autoscaling"
18 | version: v2
19 | aggregatedSelection: |
20 | function evaluate()
21 | local hs = {}
22 | hs.message = ""
23 |
24 | local deployments = {}
25 | local autoscalers = {}
26 | local deploymentWithNoAutoscaler = {}
27 |
28 | -- Separate deployments and services from the resources
29 | for _, resource in ipairs(resources) do
30 | local kind = resource.kind
31 | if kind == "Deployment" then
32 | table.insert(deployments, resource)
33 | elseif kind == "HorizontalPodAutoscaler" then
34 | if resource.spec.scaleTargetRef.kind == "Deployment" then
35 | table.insert(autoscalers, resource)
36 | end
37 | end
38 | end
39 |
40 | -- Check for each deployment if there is a matching HorizontalPodAutoscaler
41 | for _, deployment in ipairs(deployments) do
42 | local deploymentName = deployment.metadata.name
43 | local matchingAutoscaler = false
44 |
45 | for _, autoscaler in ipairs(autoscalers) do
46 | if autoscaler.metadata.namespace == deployment.metadata.namespace then
47 | if autoscaler.spec.scaleTargetRef.name == deployment.metadata.name then
48 | matchingAutoscaler = true
49 | break
50 | end
51 | end
52 | end
53 |
54 | if not matchingAutoscaler then
55 | table.insert(deploymentWithNoAutoscaler, {resource = deployment})
56 | break
57 | end
58 | end
59 |
60 | if #deploymentWithNoAutoscaler > 0 then
61 | hs.resources = deploymentWithNoAutoscaler
62 | end
63 | return hs
64 | end
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/deployment_with_autoscaler/matching.yaml:
--------------------------------------------------------------------------------
1 | # The file matching_resources contains all resources defined in resources.yaml
2 | # which are a match for aggregatedSelection
3 | apiVersion: apps/v1beta1
4 | kind: Deployment
5 | metadata:
6 | name: not-backed-by-autoscaler
7 | namespace: default
8 | spec:
9 | replicas: 5
10 | template:
11 | metadata:
12 | labels:
13 | app: php-apache
14 | spec:
15 | containers:
16 | - name: php-apache
17 | image: php:8.0-apache
18 | resources:
19 | requests:
20 | cpu: 100m
21 | memory: 128Mi
22 | limits:
23 | cpu: 200m
24 | memory: 256Mi
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/deployment_with_autoscaler/resources.yaml:
--------------------------------------------------------------------------------
1 | # This file contains all resources present in the system when Cleaner is processed
2 | apiVersion: autoscaling/v2
3 | kind: HorizontalPodAutoscaler
4 | metadata:
5 | name: php-apache
6 | namespace: default
7 | spec:
8 | scaleTargetRef:
9 | apiVersion: apps/v1beta1
10 | kind: Deployment
11 | name: php-apache
12 | minReplicas: 3
13 | maxReplicas: 10
14 | targetCPUUtilizationPercentage: 80
15 | ---
16 | apiVersion: apps/v1beta1
17 | kind: Deployment
18 | metadata:
19 | name: php-apache
20 | namespace: default
21 | spec:
22 | replicas: 5
23 | template:
24 | metadata:
25 | labels:
26 | app: php-apache
27 | spec:
28 | containers:
29 | - name: php-apache
30 | image: php:8.0-apache
31 | resources:
32 | requests:
33 | cpu: 100m
34 | memory: 128Mi
35 | limits:
36 | cpu: 200m
37 | memory: 256Mi
38 | ---
39 | apiVersion: apps/v1beta1
40 | kind: Deployment
41 | metadata:
42 | name: not-backed-by-autoscaler
43 | namespace: default
44 | spec:
45 | replicas: 5
46 | template:
47 | metadata:
48 | labels:
49 | app: php-apache
50 | spec:
51 | containers:
52 | - name: php-apache
53 | image: php:8.0-apache
54 | resources:
55 | requests:
56 | cpu: 100m
57 | memory: 128Mi
58 | limits:
59 | cpu: 200m
60 | memory: 256Mi
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/left_over_configmap_by_ kustomize_configmapgenerator/cleaner.yaml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance finds all ConfigMaps in a given namespace "test" that start with same prefix "dynamic-config-properties"
2 | # Those ConfigMaps were all generated by kustomize ConfigMapGenerator.
3 | # Cleaner will delete all such instances, leaving only the one was created last (by looking at the creation timestamp)
4 | apiVersion: apps.projectsveltos.io/v1alpha1
5 | kind: Cleaner
6 | metadata:
7 | name: unused-configmaps
8 | spec:
9 | schedule: "* 0 * * *"
10 | action: Delete
11 | resourcePolicySet:
12 | resourceSelectors:
13 | - kind: ConfigMap
14 | group: ""
15 | version: v1
16 | namespace: test # namespace where to look for ConfigMaps
17 | aggregatedSelection: |
18 | namePrefix = "dynamic-config-properties-" -- all ConfigMap with this name prefix
19 |
20 | -- This function returns true if name starts with prefix
21 | function hasPrefix(name, prefix)
22 | local prefixLength = string.len(prefix) -- Get the length of the prefix
23 | return string.sub(name, 1, prefixLength) == prefix
24 | end
25 |
26 | -- Convert creationTimestamp "2023-12-12T09:35:56Z"
27 | function convertTimestampString(timestampStr)
28 | local convertedTimestamp = string.gsub(
29 | timestampStr,
30 | '(%d+)-(%d+)-(%d+)T(%d+):(%d+):(%d+)Z',
31 | function(y, mon, d, h, mi, s)
32 | return os.time({
33 | year = tonumber(y),
34 | month = tonumber(mon),
35 | day = tonumber(d),
36 | hour = tonumber(h),
37 | min = tonumber(mi),
38 | sec = tonumber(s)
39 | })
40 | end
41 | )
42 | return convertedTimestamp
43 | end
44 |
45 | function evaluate()
46 | local hs = {}
47 | hs.message = ""
48 |
49 | local configMaps = {}
50 | local duplicateConfigMaps = {}
51 |
52 | -- Find all ConfigMap with namePrefix
53 | for _, resource in ipairs(resources) do
54 | if hasPrefix(resource.metadata.name, namePrefix) then
55 | table.insert(duplicateConfigMaps, resource)
56 | end
57 | end
58 |
59 | -- Out of duplicate ConfigMaps, find the one that was created last
60 | local mostRecentResource = nil
61 | local latestTimestamp = os.time()
62 |
63 |
64 | for _, configMap in ipairs(duplicateConfigMaps) do
65 | creationTimestamp = convertTimestampString(configMap.metadata.creationTimestamp)
66 |
67 | timeDifference = os.difftime(tonumber(creationTimestamp), latestTimestamp)
68 |
69 | -- Check if the current resource has a later timestamp
70 | if timeDifference < 0 then
71 | mostRecentResource = configMap
72 | latestTimestamp = tonumber(creationTimestamp)
73 | end
74 | end
75 |
76 | local oldConfigMaps = {}
77 | for _, configMap in ipairs(duplicateConfigMaps) do
78 | if configMap.metadata.name ~= mostRecentResource.metadata.name then
79 | print("ConfigMap is duplicate: ", configMap.metadata.name)
80 | table.insert(oldConfigMaps, {resource = configMap})
81 | end
82 | end
83 |
84 | if #oldConfigMaps > 0 then
85 | hs.resources = oldConfigMaps
86 | end
87 | return hs
88 | end
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/left_over_configmap_by_ kustomize_configmapgenerator/matching.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: dynamic-config-properties-1
5 | namespace: test
6 | creationTimestamp: "2024-05-11T12:44:17Z"
7 | data:
8 | SPECIAL_LEVEL: very
9 | SPECIAL_TYPE: charm
10 | ---
11 | apiVersion: v1
12 | kind: ConfigMap
13 | metadata:
14 | name: dynamic-config-properties-2
15 | namespace: test
16 | creationTimestamp: "2024-05-12T12:44:17Z"
17 | data:
18 | SPECIAL_LEVEL: very
19 | SPECIAL_TYPE: charm
20 | ---
21 | apiVersion: v1
22 | kind: ConfigMap
23 | metadata:
24 | name: dynamic-config-properties-3
25 | namespace: test
26 | creationTimestamp: "2024-05-13T12:44:17Z"
27 | data:
28 | SPECIAL_LEVEL: very
29 | SPECIAL_TYPE: charm
30 | ---
31 | apiVersion: v1
32 | kind: ConfigMap
33 | metadata:
34 | name: dynamic-config-properties-5
35 | namespace: test
36 | creationTimestamp: "2024-05-18T12:44:17Z"
37 | data:
38 | SPECIAL_LEVEL: very
39 | SPECIAL_TYPE: charm
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/left_over_configmap_by_ kustomize_configmapgenerator/resources.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: dynamic-config-properties-1
5 | namespace: test
6 | creationTimestamp: "2024-05-11T12:44:17Z"
7 | data:
8 | SPECIAL_LEVEL: very
9 | SPECIAL_TYPE: charm
10 | ---
11 | apiVersion: v1
12 | kind: ConfigMap
13 | metadata:
14 | name: dynamic-config-properties-2
15 | namespace: test
16 | creationTimestamp: "2024-05-12T12:44:17Z"
17 | data:
18 | SPECIAL_LEVEL: very
19 | SPECIAL_TYPE: charm
20 | ---
21 | apiVersion: v1
22 | kind: ConfigMap
23 | metadata:
24 | name: dynamic-config-properties-3
25 | namespace: test
26 | creationTimestamp: "2024-05-13T12:44:17Z"
27 | data:
28 | SPECIAL_LEVEL: very
29 | SPECIAL_TYPE: charm
30 | ---
31 | apiVersion: v1
32 | kind: ConfigMap
33 | metadata:
34 | name: dynamic-config-properties-4
35 | namespace: test
36 | creationTimestamp: "2024-05-10T12:44:17Z"
37 | data:
38 | SPECIAL_LEVEL: very
39 | SPECIAL_TYPE: charm
40 | ---
41 | apiVersion: v1
42 | kind: ConfigMap
43 | metadata:
44 | name: dynamic-config-properties-5
45 | namespace: test
46 | creationTimestamp: "2024-05-18T12:44:17Z"
47 | data:
48 | SPECIAL_LEVEL: very
49 | SPECIAL_TYPE: charm
50 | ---
51 | apiVersion: v1
52 | kind: ConfigMap
53 | metadata:
54 | name: random-name
55 | namespace: test
56 | creationTimestamp: "2024-05-11T12:44:17Z"
57 | data:
58 | SPECIAL_LEVEL: very
59 | SPECIAL_TYPE: charm
60 |
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/orphaned_configmaps/matching.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: matching
5 | namespace: bar
6 | data:
7 | SPECIAL_LEVEL: very
8 | SPECIAL_TYPE: charm
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/orphaned_configmaps/resources.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: config-env
5 | namespace: bar
6 | data:
7 | special.how: very
8 | ---
9 | apiVersion: v1
10 | kind: Pod
11 | metadata:
12 | name: pod-env
13 | namespace: bar
14 | spec:
15 | containers:
16 | - name: test-container
17 | image: registry.k8s.io/busybox
18 | command: [ "/bin/sh", "-c", "env" ]
19 | env:
20 | # Define the environment variable
21 | - name: SPECIAL_LEVEL_KEY
22 | valueFrom:
23 | configMapKeyRef:
24 | # The ConfigMap containing the value you want to assign to SPECIAL_LEVEL_KEY
25 | name: config-env
26 | # Specify the key associated with the value
27 | key: special.how
28 | restartPolicy: Never
29 | ---
30 | apiVersion: v1
31 | kind: ConfigMap
32 | metadata:
33 | name: config-envfrom
34 | namespace: foo
35 | data:
36 | SPECIAL_LEVEL: very
37 | SPECIAL_TYPE: charm
38 | ---
39 | apiVersion: v1
40 | kind: Pod
41 | metadata:
42 | name: pod-envfrom
43 | namespace: foo
44 | spec:
45 | containers:
46 | - name: test-container
47 | image: registry.k8s.io/busybox
48 | command: [ "/bin/sh", "-c", "env" ]
49 | envFrom:
50 | - configMapRef:
51 | name: config-envfrom
52 | restartPolicy: Never
53 | ---
54 | apiVersion: v1
55 | kind: ConfigMap
56 | metadata:
57 | name: config-volume
58 | namespace: baz
59 | data:
60 | SPECIAL_LEVEL: very
61 | SPECIAL_TYPE: charm
62 | ---
63 | apiVersion: v1
64 | kind: Pod
65 | metadata:
66 | name: dapi-test-pod
67 | namespace: baz
68 | spec:
69 | containers:
70 | - name: test-container
71 | image: registry.k8s.io/busybox
72 | command: [ "/bin/sh", "-c", "ls /etc/config/" ]
73 | volumeMounts:
74 | - name: config-volume
75 | mountPath: /etc/config
76 | volumes:
77 | - name: config-volume
78 | configMap:
79 | # Provide the name of the ConfigMap containing the files you want
80 | # to add to the container
81 | name: config-volume
82 | restartPolicy: Never
83 | ---
84 | apiVersion: v1
85 | kind: ConfigMap
86 | metadata:
87 | name: matching
88 | namespace: bar
89 | data:
90 | SPECIAL_LEVEL: very
91 | SPECIAL_TYPE: charm
92 | ---
93 | apiVersion: v1
94 | kind: ConfigMap
95 | metadata:
96 | name: not-matching
97 | namespace: kube-system
98 | data:
99 | SPECIAL_LEVEL: very
100 | SPECIAL_TYPE: charm
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/orphaned_deployments/cleaner.yaml:
--------------------------------------------------------------------------------
1 | # Selects Deployment, Pod, and Service resources, and then filters the results to
2 | # identify deployments that have no pods or services associated with them.
3 | apiVersion: apps.projectsveltos.io/v1alpha1
4 | kind: Cleaner
5 | metadata:
6 | name: orphaned-deployments
7 | spec:
8 | schedule: "* 0 * * *"
9 | action: Delete # Delete matching resources
10 | resourcePolicySet:
11 | resourceSelectors:
12 | - kind: Deployment
13 | group: "apps"
14 | version: v1
15 | - kind: Pod
16 | group: ""
17 | version: v1
18 | - kind: Service
19 | group: ""
20 | version: v1
21 | aggregatedSelection: |
22 | function table_equal(t1, t2)
23 | local metatable = {}
24 | metatable.__eq = function(t1, t2)
25 | if type(t1) ~= "table" or type(t2) ~= "table" then
26 | return false
27 | end
28 |
29 | local keys = {}
30 | for k in pairs(t1) do
31 | keys[k] = true
32 | end
33 |
34 | for k in pairs(t2) do
35 | if not keys[k] then
36 | return false
37 | end
38 | end
39 |
40 | for k, v in pairs(t1) do
41 | if t2[k] ~= v then
42 | return false
43 | end
44 | end
45 |
46 | return true
47 | end
48 |
49 | setmetatable(t1, metatable)
50 | setmetatable(t2, metatable)
51 |
52 | return t1 == t2
53 | end
54 |
55 | function evaluate()
56 | local hs = {}
57 | hs.message = ""
58 |
59 | local deployments = {}
60 | local pods = {}
61 | local services = {}
62 | local orphanedDeployments = {}
63 |
64 | -- Separate deployments, pods, and services from the resources
65 | for _, resource in ipairs(resources) do
66 | local kind = resource.kind
67 | if kind == "Deployment" then
68 | table.insert(deployments, resource)
69 | elseif kind == "Pod" then
70 | table.insert(pods, resource)
71 | elseif kind == "Service" then
72 | table.insert(services, resource)
73 | end
74 | end
75 |
76 | -- Identify deployments that have no pods or services associated with them
77 | for _, deployment in ipairs(deployments) do
78 | local deploymentName = deployment.metadata.name
79 | local hasPod = false
80 | local hasService = false
81 |
82 | for _, pod in ipairs(pods) do
83 | if pod.metadata.namespace == deployment.metadata.namespace then
84 | for _, owner in ipairs(pod.metadata.ownerReferences) do
85 | if owner.name == deploymentName then
86 | hasPod = true
87 | break
88 | end
89 | end
90 | end
91 | end
92 |
93 | for _, service in ipairs(services) do
94 | if service.metadata.namespace == deployment.metadata.namespace then
95 | if table_equal(service.spec.selector, deployment.metadata.labels) then
96 | hasService = true
97 | break
98 | end
99 | end
100 | end
101 |
102 | if not hasPod and not hasService then
103 | table.insert(orphanedDeployments, {resource = deployment})
104 | break
105 | end
106 | end
107 |
108 | if #orphanedDeployments > 0 then
109 | hs.resources = orphanedDeployments
110 | end
111 | return hs
112 | end
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/orphaned_deployments/matching.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: orphaned-deployment
5 | namespace: baz
6 | labels:
7 | app: nginx
8 | spec:
9 | replicas: 3 # Number of replicas
10 | selector:
11 | matchLabels:
12 | app: nginx
13 | template:
14 | metadata:
15 | labels:
16 | app: nginx
17 | spec:
18 | containers:
19 | - name: nginx
20 | image: nginx:latest
21 | ports:
22 | - containerPort: 80
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/orphaned_deployments/resources.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: nginx-pod
5 | namespace: bar
6 | labels:
7 | app: nginx
8 | ownerReferences:
9 | - apiVersion: apps/v1
10 | kind: Deployment
11 | name: nginx-deployment
12 | controller: true
13 | spec:
14 | containers:
15 | - image: nginx:latest
16 | imagePullPolicy: IfNotPresent
17 | ---
18 | apiVersion: apps/v1
19 | kind: Deployment
20 | metadata:
21 | name: nginx-deployment
22 | namespace: bar
23 | labels:
24 | app: nginx
25 | spec:
26 | replicas: 3 # Number of replicas
27 | selector:
28 | matchLabels:
29 | app: nginx
30 | template:
31 | metadata:
32 | labels:
33 | app: nginx
34 | spec:
35 | containers:
36 | - name: nginx
37 | image: nginx:latest
38 | ports:
39 | - containerPort: 80
40 | ---
41 | apiVersion: v1
42 | kind: Service
43 | metadata:
44 | name: zookeeper-service
45 | namespace: foo
46 | spec:
47 | selector:
48 | app: zookeeper
49 | type: NodePort
50 | ports:
51 | - protocol: TCP
52 | port: 80 # Port for HTTP traffic
53 | targetPort: 80 # Port on the pods
54 | ---
55 | apiVersion: apps/v1
56 | kind: Deployment
57 | metadata:
58 | name: zookeeper-deployment
59 | namespace: foo
60 | labels:
61 | app: zookeeper
62 | spec:
63 | replicas: 3 # Number of replicas
64 | selector:
65 | matchLabels:
66 | app: zookeeper
67 | template:
68 | metadata:
69 | labels:
70 | app: zookeeper
71 | spec:
72 | containers:
73 | - name: nginx
74 | image: zookeeper:latest
75 | ports:
76 | - containerPort: 80
77 | ---
78 | apiVersion: apps/v1
79 | kind: Deployment
80 | metadata:
81 | name: orphaned-deployment
82 | namespace: baz
83 | labels:
84 | app: nginx
85 | spec:
86 | replicas: 3 # Number of replicas
87 | selector:
88 | matchLabels:
89 | app: nginx
90 | template:
91 | metadata:
92 | labels:
93 | app: nginx
94 | spec:
95 | containers:
96 | - name: nginx
97 | image: nginx:latest
98 | ports:
99 | - containerPort: 80
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/orphaned_persistent-volume-claims/cleaner.yaml:
--------------------------------------------------------------------------------
1 | # Find all PersistentVolumeClaims currently not
2 | # used by any Pods. It considers all namespaces.
3 | apiVersion: apps.projectsveltos.io/v1alpha1
4 | kind: Cleaner
5 | metadata:
6 | name: stale-persistent-volume-claim
7 | spec:
8 | schedule: "* 0 * * *"
9 | action: Delete # Delete matching resources
10 | resourcePolicySet:
11 | resourceSelectors:
12 | - kind: Pod
13 | group: ""
14 | version: v1
15 | - kind: PersistentVolumeClaim
16 | group: ""
17 | version: v1
18 | aggregatedSelection: |
19 | function isUsed(pvc, pods)
20 | if pods == nil then
21 | return false
22 | end
23 | for _, pod in ipairs(pods) do
24 | if pod.spec.volumes ~= nil then
25 | for _,volume in ipairs(pod.spec.volumes) do
26 | if volume.persistentVolumeClaim ~= nil and volume.persistentVolumeClaim.claimName == pvc.metadata.name then
27 | return true
28 | end
29 | end
30 | end
31 | end
32 | return false
33 | end
34 |
35 | function evaluate()
36 | local hs = {}
37 | hs.message = ""
38 |
39 | local pods = {}
40 | local pvcs = {}
41 | local unusedPVCs = {}
42 |
43 | -- Separate pods and pvcs from the resources
44 | -- Group those by namespace
45 | for _, resource in ipairs(resources) do
46 | local kind = resource.kind
47 | if kind == "Pod" then
48 | if not pods[resource.metadata.namespace] then
49 | pods[resource.metadata.namespace] = {}
50 | end
51 | table.insert(pods[resource.metadata.namespace], resource)
52 | elseif kind == "PersistentVolumeClaim" then
53 | if not pvcs[resource.metadata.namespace] then
54 | pvcs[resource.metadata.namespace] = {}
55 | end
56 | table.insert(pvcs[resource.metadata.namespace], resource)
57 | end
58 | end
59 |
60 | -- Iterate through each namespace and identify unused PVCs
61 | for namespace, perNamespacePVCs in pairs(pvcs) do
62 | for _, pvc in ipairs(perNamespacePVCs) do
63 | if not isUsed(pvc, pods[namespace]) then
64 | table.insert(unusedPVCs, {resource = pvc})
65 | end
66 | end
67 | end
68 |
69 | if #unusedPVCs > 0 then
70 | hs.resources = unusedPVCs
71 | end
72 | return hs
73 | end
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/orphaned_persistent-volume-claims/matching.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: unused
5 | namespace: bar
6 | spec:
7 | storageClassName: manual
8 | accessModes:
9 | - ReadWriteOnce
10 | resources:
11 | requests:
12 | storage: 3Gi
13 |
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/orphaned_secrets/matching.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: matching
5 | namespace: baz
6 | type: Opaque
7 | data:
8 | username: foo
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/unused_clusterrole.yaml/cleaner.yaml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance finds all unused ClusterRole instances.
2 | # An unused ClusterRole is an instance that is not referenced
3 | # by any ClusterRoleBinding or RoleBinding
4 | apiVersion: apps.projectsveltos.io/v1alpha1
5 | kind: Cleaner
6 | metadata:
7 | name: unused-roles
8 | spec:
9 | schedule: "* 0 * * *"
10 | action: Delete
11 | resourcePolicySet:
12 | resourceSelectors:
13 | - kind: ClusterRole
14 | group: "rbac.authorization.k8s.io"
15 | version: v1
16 | - kind: ClusterRoleBinding
17 | group: "rbac.authorization.k8s.io"
18 | version: v1
19 | - kind: RoleBinding
20 | group: "rbac.authorization.k8s.io"
21 | version: v1
22 | aggregatedSelection: |
23 | function evaluate()
24 | local hs = {}
25 | hs.message = ""
26 |
27 | -- Contains list of existing ClusterRoles
28 | local existingClusterRoles = {}
29 | -- Contains list of ClusterRoles currently referenced by
30 | -- roleBindings or ClusterRoleBindings
31 | local usedClusterRoles = {}
32 |
33 | local unusedClusterRoles = {}
34 |
35 | -- Create list of existingClusterRoles and usedClusterRoles
36 | for _, resource in ipairs(resources) do
37 | local kind = resource.kind
38 | if kind == "ClusterRole" then
39 | table.insert(existingClusterRoles, resource)
40 | elseif kind == "ClusterRoleBinding" then
41 | if resource.roleRef.kind == "ClusterRole" then
42 | usedClusterRoles[resource.roleRef.name] = true
43 | end
44 | elseif kind == "RoleBinding" then
45 | if resource.roleRef.kind == "ClusterRole" then
46 | usedClusterRoles[resource.roleRef.name] = true
47 | end
48 | end
49 | end
50 |
51 | -- Iterate over existing clusterRoles and find not used anymore
52 | for _,clusterRole in ipairs(existingClusterRoles) do
53 | if not usedClusterRoles[clusterRole.metadata.name] then
54 | table.insert(unusedClusterRoles, {resource = clusterRole})
55 | end
56 | end
57 |
58 | if #unusedClusterRoles > 0 then
59 | hs.resources = unusedClusterRoles
60 | end
61 | return hs
62 | end
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/unused_clusterrole.yaml/matching.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: unused
5 | rules:
6 | - apiGroups: ["apps"]
7 | resources: ["Deployments", "ReplicaSets"]
8 | verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/unused_clusterrole.yaml/resources.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: used-by-clusterrolebinding
5 | rules:
6 | - apiGroups: ["*"]
7 | resources: ["Pods"]
8 | verbs: ["get", "list", "watch"]
9 | ---
10 | apiVersion: rbac.authorization.k8s.io/v1
11 | kind: ClusterRoleBinding
12 | metadata:
13 | name: example-clusterbinding
14 | subjects:
15 | - kind: ServiceAccount
16 | name: example
17 | namespace: default
18 | roleRef:
19 | kind: ClusterRole
20 | name: used-by-clusterrolebinding
21 | apiGroup: rbac.authorization.k8s.io
22 | ---
23 | apiVersion: rbac.authorization.k8s.io/v1
24 | kind: ClusterRole
25 | metadata:
26 | name: used-by-rolebinding
27 | rules:
28 | - apiGroups: ["apps"]
29 | resources: ["Deployments", "ReplicaSets"]
30 | verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
31 | ---
32 | kind: RoleBinding
33 | apiVersion: rbac.authorization.k8s.io/v1
34 | metadata:
35 | name: example-rolebinding
36 | namespace: default
37 | subjects:
38 | - kind: ServiceAccount
39 | name: example
40 | namespace: default
41 | roleRef:
42 | kind: ClusterRole
43 | name: used-by-rolebinding
44 | apiGroup: rbac.authorization.k8s.io
45 | ---
46 | apiVersion: rbac.authorization.k8s.io/v1
47 | kind: ClusterRole
48 | metadata:
49 | name: unused
50 | rules:
51 | - apiGroups: ["apps"]
52 | resources: ["Deployments", "ReplicaSets"]
53 | verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
54 |
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/unused_horizontal-pod-autoscalers/cleaner.yaml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance find any HorizontalPodAutoscaler instance
2 | # matching no Deployment or StatefulSet and delete those nstances
3 | apiVersion: apps.projectsveltos.io/v1alpha1
4 | kind: Cleaner
5 | metadata:
6 | name: unused-horizontal-pod-autoscalers
7 | spec:
8 | schedule: "* 0 * * *"
9 | action: Delete # Delete matching resources
10 | resourcePolicySet:
11 | resourceSelectors:
12 | - kind: Deployment
13 | group: "apps"
14 | version: v1
15 | - kind: StatefulSet
16 | group: "apps"
17 | version: v1
18 | - kind: HorizontalPodAutoscaler
19 | group: "autoscaling"
20 | version: v2
21 | aggregatedSelection: |
22 | function getKey(namespace, name)
23 | return namespace .. ":" .. name
24 | end
25 |
26 | function evaluate()
27 | local hs = {}
28 | hs.message = ""
29 |
30 | local deployments = {}
31 | local statefulSets = {}
32 | local autoscalers = {}
33 | local unusedAutoscalers = {}
34 |
35 | for _, resource in ipairs(resources) do
36 | local kind = resource.kind
37 | if kind == "Deployment" then
38 | key = getKey(resource.metadata.namespace, resource.metadata.name)
39 | deployments[key] = true
40 | elseif kind == "StatefulSet" then
41 | key = getKey(resource.metadata.namespace, resource.metadata.name)
42 | statefulSets[key] = true
43 | elseif kind == "HorizontalPodAutoscaler" then
44 | table.insert(autoscalers, resource)
45 | end
46 | end
47 |
48 | -- Check for each horizontalPodAutoscaler if there is a matching Deployment or StatefulSet
49 | for _,hpa in ipairs(autoscalers) do
50 | key = getKey(hpa.metadata.namespace, hpa.spec.scaleTargetRef.name)
51 | if hpa.spec.scaleTargetRef.kind == "Deployment" then
52 | if not deployments[key] then
53 | table.insert(unusedAutoscalers, {resource = hpa})
54 | end
55 | elseif hpa.spec.scaleTargetRef.kind == "StatefulSet" then
56 | if not statefulSets[key] then
57 | table.insert(unusedAutoscalers, {resource = hpa})
58 | end
59 | end
60 | end
61 |
62 | if #unusedAutoscalers > 0 then
63 | hs.resources = unusedAutoscalers
64 | end
65 | return hs
66 | end
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/unused_horizontal-pod-autoscalers/matching.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: autoscaling/v2
2 | kind: HorizontalPodAutoscaler
3 | metadata:
4 | name: unused
5 | namespace: foobar
6 | spec:
7 | scaleTargetRef:
8 | apiVersion: apps/v1
9 | kind: StatefulSet
10 | name: my-statefulset
11 | minReplicas: 1
12 | maxReplicas: 5
13 | targetCPUUtilizationPercentage: 80
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/unused_horizontal-pod-autoscalers/resources.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: autoscaling/v2
2 | kind: HorizontalPodAutoscaler
3 | metadata:
4 | name: hpa-deployment
5 | namespace: bar
6 | spec:
7 | scaleTargetRef:
8 | apiVersion: apps/v1
9 | kind: Deployment
10 | name: php-apache
11 | minReplicas: 1
12 | maxReplicas: 10
13 | metrics:
14 | - type: Resource
15 | resource:
16 | name: cpu
17 | target:
18 | type: Utilization
19 | averageUtilization: 50
20 | status:
21 | observedGeneration: 1
22 | lastScaleTime:
23 | currentReplicas: 1
24 | desiredReplicas: 1
25 | currentMetrics:
26 | - type: Resource
27 | resource:
28 | name: cpu
29 | current:
30 | averageUtilization: 0
31 | averageValue: 0
32 | ---
33 | apiVersion: apps/v1
34 | kind: Deployment
35 | metadata:
36 | name: php-apache
37 | namespace: bar
38 | spec:
39 | selector:
40 | matchLabels:
41 | run: php-apache
42 | template:
43 | metadata:
44 | labels:
45 | run: php-apache
46 | spec:
47 | containers:
48 | - name: php-apache
49 | image: registry.k8s.io/hpa-example
50 | ports:
51 | - containerPort: 80
52 | resources:
53 | limits:
54 | cpu: 500m
55 | requests:
56 | cpu: 200m
57 | ---
58 | apiVersion: autoscaling/v2
59 | kind: HorizontalPodAutoscaler
60 | metadata:
61 | name: hpa-statefulset
62 | namespace: foo
63 | spec:
64 | scaleTargetRef:
65 | apiVersion: apps/v1
66 | kind: StatefulSet
67 | name: my-statefulset
68 | minReplicas: 1
69 | maxReplicas: 5
70 | targetCPUUtilizationPercentage: 80
71 | ---
72 | apiVersion: apps/v1
73 | kind: StatefulSet
74 | metadata:
75 | name: my-statefulset
76 | namespace: foo
77 | spec:
78 | selector:
79 | matchLabels:
80 | app: nginx # has to match .spec.template.metadata.labels
81 | serviceName: "nginx"
82 | replicas: 3 # by default is 1
83 | minReadySeconds: 10 # by default is 0
84 | template:
85 | metadata:
86 | labels:
87 | app: nginx # has to match .spec.selector.matchLabels
88 | spec:
89 | terminationGracePeriodSeconds: 10
90 | containers:
91 | - name: nginx
92 | image: registry.k8s.io/nginx-slim:0.8
93 | ports:
94 | - containerPort: 80
95 | name: web
96 | volumeMounts:
97 | - name: www
98 | mountPath: /usr/share/nginx/html
99 | volumeClaimTemplates:
100 | - metadata:
101 | name: www
102 | spec:
103 | accessModes: [ "ReadWriteOnce" ]
104 | storageClassName: "my-storage-class"
105 | resources:
106 | requests:
107 | storage: 1Gi
108 | ---
109 | apiVersion: autoscaling/v2
110 | kind: HorizontalPodAutoscaler
111 | metadata:
112 | name: unused
113 | namespace: foobar
114 | spec:
115 | scaleTargetRef:
116 | apiVersion: apps/v1
117 | kind: StatefulSet
118 | name: my-statefulset
119 | minReplicas: 1
120 | maxReplicas: 5
121 | targetCPUUtilizationPercentage: 80
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/unused_ingresses/matching.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1beta1
2 | kind: Ingress
3 | metadata:
4 | name: ingress-default-backend
5 | namespace: foobar
6 | spec:
7 | defaultBackend:
8 | service:
9 | name: default-backend
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/unused_ingresses/resources.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: ingress-service
5 | namespace: foo
6 | annotations:
7 | nginx.ingress.kubernetes.io/rewrite-target: /
8 | spec:
9 | ingressClassName: nginx-example
10 | rules:
11 | - http:
12 | paths:
13 | - path: /testpath
14 | pathType: Prefix
15 | backend:
16 | service:
17 | name: test
18 | port:
19 | number: 80
20 | ---
21 | apiVersion: v1
22 | kind: Service
23 | metadata:
24 | name: test
25 | namespace: foo
26 | labels:
27 | app: my-app
28 | spec:
29 | type: ClusterIP
30 | selector:
31 | app: my-app
32 | ports:
33 | - protocol: TCP
34 | port: 80
35 | targetPort: 8080
36 | ---
37 | apiVersion: networking.k8s.io/v1beta1
38 | kind: Ingress
39 | metadata:
40 | name: ingress-default-backend
41 | namespace: bar
42 | spec:
43 | defaultBackend:
44 | service:
45 | name: default-backend
46 | ---
47 | apiVersion: v1
48 | kind: Service
49 | metadata:
50 | name: default-backend
51 | namespace: bar
52 | spec:
53 | type: NodePort
54 | selector:
55 | app: default-backend
56 | ports:
57 | - protocol: TCP
58 | port: 80
59 | targetPort: 8080
60 | ---
61 | apiVersion: networking.k8s.io/v1beta1
62 | kind: Ingress
63 | metadata:
64 | name: ingress-default-backend
65 | namespace: foobar
66 | spec:
67 | defaultBackend:
68 | service:
69 | name: default-backend
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/unused_poddisruptionbudget/cleaner.yaml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance finds all PodDisruptionBudget instances which are stale.
2 | # A PodDisruptionBudget is stale if:
3 | # - matches no Deployment instance
4 | # - matches no StatefulSet instance
5 | apiVersion: apps.projectsveltos.io/v1alpha1
6 | kind: Cleaner
7 | metadata:
8 | name: stale-pod-disruption-budgets
9 | spec:
10 | schedule: "* 0 * * *"
11 | action: Delete
12 | resourcePolicySet:
13 | resourceSelectors:
14 | - kind: PodDisruptionBudget
15 | group: "policy"
16 | version: v1
17 | - kind: Deployment
18 | group: "apps"
19 | version: v1
20 | - kind: StatefulSet
21 | group: "apps"
22 | version: v1
23 | aggregatedSelection: |
24 | function isMatch(pdbLabels, destLabels)
25 | for k,v in pairs(pdbLabels) do
26 | if destLabels[k] ~= v then
27 | return false
28 | end
29 | end
30 | return true
31 | end
32 |
33 | function isMatchingAny(pdb, resources)
34 | if resources == nil then
35 | return false
36 | end
37 | for _,resource in ipairs(resources) do
38 | if pdb.metadata.namespace == resource.metadata.namespace then
39 | if isMatch(pdb.spec.selector.matchLabels, resource.spec.template.metadata.labels) then
40 | return true
41 | end
42 | end
43 | end
44 | return false
45 | end
46 |
47 | function evaluate()
48 | local hs = {}
49 | hs.message = ""
50 |
51 | local pdbs= {}
52 | local deployments = {}
53 | local statefulsets = {}
54 | local stalePdbs = {}
55 |
56 | -- Separate pdbs and deployments and statefulsets from the resources
57 | for _, resource in ipairs(resources) do
58 | local kind = resource.kind
59 | if kind == "PodDisruptionBudget" then
60 | table.insert(pdbs, resource)
61 | elseif kind == "Deployment" then
62 | table.insert(deployments, resource)
63 | elseif kind == "StatefulSet" then
64 | table.insert(statefulsets, resource)
65 | end
66 | end
67 |
68 | for _,pdb in ipairs(pdbs) do
69 | if not isMatchingAny(pdb,deployments) and not isMatchingAny(pdb,statefulsets) then
70 | table.insert(stalePdbs, {resource = pdb})
71 | end
72 | end
73 |
74 | if #stalePdbs > 0 then
75 | hs.resources = stalePdbs
76 | end
77 | return hs
78 | end
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/unused_poddisruptionbudget/matching.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: policy/v1
2 | kind: PodDisruptionBudget
3 | metadata:
4 | name: pdb-unused
5 | spec:
6 | minAvailable: 2
7 | selector:
8 | matchLabels:
9 | app: foo
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/unused_poddisruptionbudget/resources.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: policy/v1
2 | kind: PodDisruptionBudget
3 | metadata:
4 | name: pdb-deployment
5 | spec:
6 | minAvailable: 2
7 | selector:
8 | matchLabels:
9 | app: nginx
10 | ---
11 | apiVersion: apps/v1
12 | kind: Deployment
13 | metadata:
14 | name: nginx-deployment
15 | labels:
16 | app: nginx
17 | spec:
18 | replicas: 3 # Number of replicas
19 | selector:
20 | matchLabels:
21 | app: nginx
22 | template:
23 | metadata:
24 | labels:
25 | app: nginx
26 | spec:
27 | containers:
28 | - name: nginx
29 | image: nginx:latest
30 | ports:
31 | - containerPort: 80
32 | ---
33 | apiVersion: policy/v1
34 | kind: PodDisruptionBudget
35 | metadata:
36 | name: pdb-statefulset
37 | spec:
38 | minAvailable: 2
39 | selector:
40 | matchLabels:
41 | app: nginx
42 | ---
43 | apiVersion: apps/v1
44 | kind: StatefulSet
45 | metadata:
46 | name: web
47 | spec:
48 | selector:
49 | matchLabels:
50 | app: zookeper
51 | serviceName: "zookeper"
52 | replicas: 3
53 | minReadySeconds: 10
54 | template:
55 | metadata:
56 | labels:
57 | app: zookeper
58 | spec:
59 | terminationGracePeriodSeconds: 10
60 | containers:
61 | - name: zookeper
62 | image: registry.k8s.io/zookeper:1.0
63 | volumeClaimTemplates:
64 | - metadata:
65 | name: www
66 | spec:
67 | accessModes: [ "ReadWriteOnce" ]
68 | storageClassName: "my-storage-class"
69 | resources:
70 | requests:
71 | storage: 1Gi
72 | ---
73 | apiVersion: policy/v1
74 | kind: PodDisruptionBudget
75 | metadata:
76 | name: pdb-unused
77 | spec:
78 | minAvailable: 2
79 | selector:
80 | matchLabels:
81 | app: foo
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/unused_roles/cleaner.yaml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance finds all unused Role instances.
2 | # All namespaces are considered.
3 | # An unused Role is an instance that is not referenced
4 | # by any RoleBinding
5 | apiVersion: apps.projectsveltos.io/v1alpha1
6 | kind: Cleaner
7 | metadata:
8 | name: unused-roles
9 | spec:
10 | schedule: "* 0 * * *"
11 | action: Delete
12 | resourcePolicySet:
13 | resourceSelectors:
14 | - kind: Role
15 | group: "rbac.authorization.k8s.io"
16 | version: v1
17 | - kind: RoleBinding
18 | group: "rbac.authorization.k8s.io"
19 | version: v1
20 | aggregatedSelection: |
21 | -- Given Role namespace and name returns a unique Key
22 | function getRoleKey(namespace, name)
23 | return namespace .. ":" .. name
24 | end
25 |
26 | function evaluate()
27 | local hs = {}
28 | hs.message = ""
29 |
30 | -- Contains list of existing roles
31 | local existingRoles = {}
32 | -- Contains list of roles currently referenced by roleBindings
33 | local usedRoles = {}
34 |
35 | local unusedRoles = {}
36 |
37 | -- Create list of existingRoles and usedRoles
38 | for _, resource in ipairs(resources) do
39 | local kind = resource.kind
40 | if kind == "Role" then
41 | table.insert(existingRoles, resource)
42 | elseif kind == "RoleBinding" then
43 | if resource.roleRef.kind == "Role" then
44 | roleKey = getRoleKey(resource.metadata.namespace, resource.roleRef.name)
45 | usedRoles[roleKey] = true
46 | end
47 | end
48 | end
49 |
50 | -- Iterate over existing roles and find not used anymore
51 | for _,role in ipairs(existingRoles) do
52 | roleKey = getRoleKey(role.metadata.namespace, role.metadata.name)
53 | if not usedRoles[roleKey] then
54 | table.insert(unusedRoles, {resource = role})
55 | end
56 | end
57 |
58 | if #unusedRoles > 0 then
59 | hs.resources = unusedRoles
60 | end
61 | return hs
62 | end
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/unused_roles/matching.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: Role
3 | metadata:
4 | namespace: default
5 | name: unused-role
6 | rules:
7 | - apiGroups: [""] # "" indicates the core API group
8 | resources: ["pods"]
9 | verbs: ["get", "watch", "list"]
10 | ---
11 | apiVersion: rbac.authorization.k8s.io/v1
12 | kind: Role
13 | metadata:
14 | namespace: default
15 | name: unused-role2
16 | rules:
17 | - apiGroups: [""] # "" indicates the core API group
18 | resources: ["pods"]
19 | verbs: ["get", "watch", "list"]
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/unused_roles/resources.yaml:
--------------------------------------------------------------------------------
1 | kind: Role
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | namespace: default
5 | name: used-role
6 | rules:
7 | - apiGroups: ["*"]
8 | resources: ["pods"]
9 | verbs: ["get", "list", "watch"]
10 | - apiGroups: ["apps"]
11 | resources: ["deployments", "replicasets"]
12 | verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
13 | ---
14 | kind: RoleBinding
15 | apiVersion: rbac.authorization.k8s.io/v1
16 | metadata:
17 | namespace: default
18 | name: example-rolebinding
19 | subjects:
20 | - kind: ServiceAccount
21 | name: example
22 | namespace: default
23 | roleRef:
24 | kind: Role
25 | name: used-role
26 | apiGroup: rbac.authorization.k8s.io
27 | ---
28 | kind: Role
29 | apiVersion: rbac.authorization.k8s.io/v1
30 | metadata:
31 | namespace: bar
32 | name: used-role
33 | rules:
34 | - apiGroups: ["*"]
35 | resources: ["pods"]
36 | verbs: ["get", "list", "watch"]
37 | - apiGroups: ["apps"]
38 | resources: ["deployments", "replicasets"]
39 | verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
40 | ---
41 | kind: RoleBinding
42 | apiVersion: rbac.authorization.k8s.io/v1
43 | metadata:
44 | namespace: bar
45 | name: example-rolebinding
46 | subjects:
47 | - kind: ServiceAccount
48 | name: example
49 | namespace: bar
50 | roleRef:
51 | kind: Role
52 | name: used-role
53 | apiGroup: rbac.authorization.k8s.io
54 | ---
55 | apiVersion: rbac.authorization.k8s.io/v1
56 | kind: Role
57 | metadata:
58 | namespace: default
59 | name: unused-role
60 | rules:
61 | - apiGroups: [""] # "" indicates the core API group
62 | resources: ["pods"]
63 | verbs: ["get", "watch", "list"]
64 | ---
65 | apiVersion: rbac.authorization.k8s.io/v1
66 | kind: Role
67 | metadata:
68 | namespace: default
69 | name: unused-role2
70 | rules:
71 | - apiGroups: [""] # "" indicates the core API group
72 | resources: ["pods"]
73 | verbs: ["get", "watch", "list"]
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/unused_service-accounts/cleaner.yaml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance finds all unused ServiceAccounts instances. All namespaces are considered.
2 | # A ServiceAccount is unused if:
3 | # - used by no Pod instance
4 | # - referenced in no RoleBinding
5 | # - referenced in no ClusterRoleBinding
6 | apiVersion: apps.projectsveltos.io/v1alpha1
7 | kind: Cleaner
8 | metadata:
9 | name: unused-service-accounts
10 | spec:
11 | schedule: "* 0 * * *"
12 | action: Delete
13 | resourcePolicySet:
14 | resourceSelectors:
15 | - kind: Pod
16 | group: ""
17 | version: v1
18 | - kind: ServiceAccount
19 | group: ""
20 | version: v1
21 | - kind: RoleBinding
22 | group: "rbac.authorization.k8s.io"
23 | version: v1
24 | - kind: ClusterRoleBinding
25 | group: "rbac.authorization.k8s.io"
26 | version: v1
27 | aggregatedSelection: |
28 | function getKey(namespace, name)
29 | return namespace .. ":" .. name
30 | end
31 |
32 | function addRoleBindingServiceAccounts(roleBinding, usedServiceAccounts)
33 | if roleBinding.subjects ~= nil then
34 | for _,subject in ipairs(roleBinding.subjects) do
35 | if subject.kind == "ServiceAccount" then
36 | key = getKey(roleBinding.metadata.namespace, subject.name)
37 | usedServiceAccounts[key] = true
38 | end
39 | end
40 | end
41 | end
42 |
43 | function addClusterRoleBindingServiceAccounts(clusterRoleBinding, usedServiceAccounts)
44 | if clusterRoleBinding.subjects ~= nil then
45 | for _,subject in ipairs(clusterRoleBinding.subjects) do
46 | if subject.kind == "ServiceAccount" then
47 | key = getKey(subject.namespace, subject.name)
48 | usedServiceAccounts[key] = true
49 | end
50 | end
51 | end
52 | end
53 |
54 | function addPodServiceAccount(pod, usedServiceAccounts)
55 | if pod.spec.serviceAccountName ~= nil then
56 | serviceAccount = pod.spec.serviceAccountName
57 | else
58 | serviceAccount = "default"
59 | end
60 | key = getKey(pod.metadata.namespace, serviceAccount)
61 | usedServiceAccounts[key] = true
62 | end
63 |
64 | function evaluate()
65 | local hs = {}
66 | hs.message = ""
67 |
68 | local serviceAccounts = {}
69 | local usedServiceAccounts = {}
70 | local unusedServiceAccounts = {}
71 |
72 | for _, resource in ipairs(resources) do
73 | local kind = resource.kind
74 | if kind == "ServiceAccount" then
75 | table.insert(serviceAccounts, resource)
76 | elseif kind == "Pod" then
77 | addPodServiceAccount(resource, usedServiceAccounts)
78 | elseif kind == "RoleBinding" then
79 | addRoleBindingServiceAccounts(resource, usedServiceAccounts)
80 | elseif kind == "ClusterRoleBinding" then
81 | addClusterRoleBindingServiceAccounts(resource, usedServiceAccounts)
82 | end
83 | end
84 |
85 | -- walk all existing serviceAccounts and find the unused ones
86 | for _,serviceAccount in ipairs(serviceAccounts) do
87 | key = getKey(serviceAccount.metadata.namespace, serviceAccount.metadata.name)
88 | if not usedServiceAccounts[key] then
89 | table.insert(unusedServiceAccounts, {resource = serviceAccount})
90 | end
91 | end
92 |
93 | if #unusedServiceAccounts > 0 then
94 | hs.resources = unusedServiceAccounts
95 | end
96 | return hs
97 | end
98 |
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/unused_service-accounts/matching.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: sa-unused
5 | namespace: baz
--------------------------------------------------------------------------------
/internal/controller/executor/validate_aggregatedselection/unused_service-accounts/resources.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: sa-used-by-pod
5 | namespace: bar
6 | ---
7 | apiVersion: v1
8 | kind: Pod
9 | metadata:
10 | name: my-pod
11 | namespace: bar
12 | labels:
13 | app: my-application
14 | spec:
15 | serviceAccountName: sa-used-by-pod
16 | containers:
17 | - name: my-container
18 | image: busybox:latest
19 | command: ["/bin/sh", "-c", "while true; do echo hello from my-pod; sleep 1; done"]
20 | resources:
21 | requests:
22 | cpu: "100m"
23 | memory: "200Mi"
24 | limits:
25 | cpu: "500m"
26 | memory: "1Gi"
27 | ---
28 | apiVersion: v1
29 | kind: ServiceAccount
30 | metadata:
31 | name: sa-used-by-rolebinding
32 | namespace: test
33 | ---
34 | apiVersion: rbac.authorization.k8s.io/v1
35 | kind: RoleBinding
36 | metadata:
37 | name: my-role-binding
38 | namespace: test
39 | roleRef:
40 | apiGroup: rbac.authorization.k8s.io
41 | kind: Role
42 | name: event-gateway
43 | subjects:
44 | - kind: ServiceAccount
45 | name: sa-used-by-rolebinding
46 | ---
47 | apiVersion: v1
48 | kind: ServiceAccount
49 | metadata:
50 | name: sa-used-by-clusterrolebinding
51 | namespace: baz
52 | ---
53 | apiVersion: rbac.authorization.k8s.io/v1
54 | kind: ClusterRoleBinding
55 | metadata:
56 | name: system:kube-dns
57 | roleRef:
58 | apiGroup: rbac.authorization.k8s.io
59 | kind: ClusterRole
60 | name: system:kube-dns
61 | subjects:
62 | - kind: ServiceAccount
63 | name: sa-used-by-clusterrolebinding
64 | namespace: baz
65 | ---
66 | apiVersion: v1
67 | kind: ServiceAccount
68 | metadata:
69 | name: sa-unused
70 | namespace: baz
--------------------------------------------------------------------------------
/internal/controller/executor/validate_resourceselector/README.md:
--------------------------------------------------------------------------------
1 | If you want to validate your evaluate functions:
2 |
3 | 1. create a sub-directory
4 | 2. create a file named __cleaner.yaml__ containing your Cleaner instance
5 | 3. create a file named __matching.yaml__ containing a resource that matches your __Cleaner.ResourcePolicySet.ResourceSelector__
6 | 4. create a file named __non-matching.yaml__ containing a resource that does not matches your __Cleaner.ResourcePolicySet.ResourceSelector__
7 | 5. run ``make test``
8 |
9 | That will run the exact code Cleaner will run in your cluster.
10 | If you see no error, your Cleaner instance is correct
11 |
12 | **This only validates resource __Cleaner.ResourcePolicySet.ResourceSelector__**
13 |
14 | If you need to validate your transform function follow instruction in __validate_transform__ directory
--------------------------------------------------------------------------------
/internal/controller/executor/validate_resourceselector/completed_jobs/cleaner.yaml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance finds any Jobs that:
2 | # - has status.completionTime set
3 | # - has status.succeeded set to a value greater than zero
4 | # - has no running or pending pods
5 | # and instruct Cleaner to delete this Job.
6 | apiVersion: apps.projectsveltos.io/v1alpha1
7 | kind: Cleaner
8 | metadata:
9 | name: completed-jobs
10 | spec:
11 | schedule: "* 0 * * *"
12 | resourcePolicySet:
13 | resourceSelectors:
14 | - kind: Job
15 | group: "batch"
16 | version: v1
17 | evaluate: |
18 | function evaluate()
19 | hs = {}
20 | hs.matching = false
21 | if obj.status ~= nil then
22 | if obj.status.completionTime ~= nil and obj.status.succeeded > 0 then
23 | hs.matching = true
24 | end
25 | end
26 | return hs
27 | end
28 | action: Delete
--------------------------------------------------------------------------------
/internal/controller/executor/validate_resourceselector/completed_jobs/matching.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | name: pi
5 | spec:
6 | template:
7 | spec:
8 | containers:
9 | - name: pi
10 | image: perl:5.34.0
11 | command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
12 | restartPolicy: Never
13 | backoffLimit: 4
14 | status:
15 | conditions:
16 | - type: Complete
17 | status: True
18 | lastProbeTime: "2023-12-15T14:21:53Z"
19 | lastTransitionTime: "2023-12-15T14:21:53Z"
20 | reason: JobSucceeded
21 | message: Job has completed successfully
22 | active: 0
23 | succeeded: 5
24 | failed: 0
25 | completionTime: "2023-12-15T14:21:53Z"
--------------------------------------------------------------------------------
/internal/controller/executor/validate_resourceselector/completed_jobs/non-matching.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | name: pi
5 | spec:
6 | template:
7 | spec:
8 | containers:
9 | - name: pi
10 | image: perl:5.34.0
11 | command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
12 | restartPolicy: Never
13 | backoffLimit: 4
--------------------------------------------------------------------------------
/internal/controller/executor/validate_resourceselector/deleted_pods/cleaner.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps.projectsveltos.io/v1alpha1
2 | kind: Cleaner
3 | metadata:
4 | name: terminating-pods
5 | spec:
6 | schedule: "*/5 * * * *"
7 | resourcePolicySet:
8 | resourceSelectors:
9 | - kind: Pod
10 | group: ""
11 | version: v1
12 | excludeDeleted: false
13 | evaluate: |
14 | function evaluate()
15 | hs = {}
16 | hs.matching = false
17 |
18 | -- Check if the pod has a deletionTimestamp field (i.e., pod is terminating)
19 | if obj.metadata.deletionTimestamp ~= nil then
20 | -- If deletionTimestamp has a value, the pod is terminating
21 | hs.matching = true
22 | end
23 |
24 | return hs
25 | end
26 | action: Delete
27 | deleteOptions:
28 | gracePeriodSeconds: 0
29 | propagationPolicy: Background
30 |
--------------------------------------------------------------------------------
/internal/controller/executor/validate_resourceselector/deleted_pods/matching.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: my-pod
5 | deletionTimestamp: "2024-09-30T10:50:51Z"
6 | spec:
7 | containers:
8 | - name: my-container
9 | image: busybox
--------------------------------------------------------------------------------
/internal/controller/executor/validate_resourceselector/deleted_pods/non-matching.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: my-pod
5 | spec:
6 | containers:
7 | - name: my-container
8 | image: busybox
--------------------------------------------------------------------------------
/internal/controller/executor/validate_resourceselector/deployment_with_zero_replicas/cleaner.yaml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance will find any deployment in any namespace
2 | # with spec.replicas set to 0 and deletes those instances
3 | apiVersion: apps.projectsveltos.io/v1alpha1
4 | kind: Cleaner
5 | metadata:
6 | name: deployment-with-zero-replicas
7 | spec:
8 | schedule: "* 0 * * *"
9 | resourcePolicySet:
10 | resourceSelectors:
11 | - kind: Deployment
12 | group: "apps"
13 | version: v1
14 | evaluate: |
15 | function evaluate()
16 | hs = {}
17 | hs.matching = false
18 | if obj.spec.replicas == 0 then
19 | hs.matching = true
20 | end
21 | return hs
22 | end
23 | action: Delete
--------------------------------------------------------------------------------
/internal/controller/executor/validate_resourceselector/deployment_with_zero_replicas/matching.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1beta1
2 | kind: Deployment
3 | metadata:
4 | name: zero-replicas
5 | spec:
6 | replicas: 0
7 | template:
8 | metadata:
9 | labels:
10 | app: php-apache
11 | spec:
12 | containers:
13 | - name: php-apache
14 | image: php:8.0-apache
15 | resources:
16 | requests:
17 | cpu: 100m
18 | memory: 128Mi
19 | limits:
20 | cpu: 200m
21 | memory: 256Mi
--------------------------------------------------------------------------------
/internal/controller/executor/validate_resourceselector/deployment_with_zero_replicas/non-matching.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1beta1
2 | kind: Deployment
3 | metadata:
4 | name: zero-replicas
5 | spec:
6 | replicas: 5
7 | template:
8 | metadata:
9 | labels:
10 | app: php-apache
11 | spec:
12 | containers:
13 | - name: php-apache
14 | image: php:8.0-apache
15 | resources:
16 | requests:
17 | cpu: 100m
18 | memory: 128Mi
19 | limits:
20 | cpu: 200m
21 | memory: 256Mi
--------------------------------------------------------------------------------
/internal/controller/executor/validate_resourceselector/https_service/cleaner.yaml:
--------------------------------------------------------------------------------
1 | # All services offering https (port 443 or 8443) are a match
2 | apiVersion: apps.projectsveltos.io/v1alpha1
3 | kind: Cleaner
4 | metadata:
5 | name: https-service
6 | spec:
7 | schedule: "* 0 * * *"
8 | resourcePolicySet:
9 | resourceSelectors:
10 | - group: ""
11 | version: "v1"
12 | kind: "Service"
13 | evaluate: |
14 | function evaluate()
15 | hs = {}
16 | hs.matching = false
17 | if obj.spec.ports ~= nil then
18 | for _,p in pairs(obj.spec.ports) do
19 | if p.port == 443 or p.port == 8443 then
20 | hs.matching = true
21 | end
22 | end
23 | end
24 | return hs
25 | end
26 | action: Delete
--------------------------------------------------------------------------------
/internal/controller/executor/validate_resourceselector/https_service/matching.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: my-service
5 | spec:
6 | selector:
7 | app.kubernetes.io/name: MyApp
8 | ports:
9 | - name: http
10 | protocol: TCP
11 | port: 80
12 | targetPort: 9376
13 | - name: https
14 | protocol: TCP
15 | port: 443
16 | targetPort: 9377
--------------------------------------------------------------------------------
/internal/controller/executor/validate_resourceselector/https_service/non-matching.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: my-service
5 | spec:
6 | selector:
7 | app.kubernetes.io/name: MyApp
8 | ports:
9 | - name: http
10 | protocol: TCP
11 | port: 80
12 | targetPort: 9376
13 |
--------------------------------------------------------------------------------
/internal/controller/executor/validate_resourceselector/peristent-volumes/cleaner.yaml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance will find any PersistentVolume with Phase
2 | # set to anything but "Bound" and delete those
3 | apiVersion: apps.projectsveltos.io/v1alpha1
4 | kind: Cleaner
5 | metadata:
6 | name: unbound-persistent-volumes
7 | spec:
8 | schedule: "* 0 * * *"
9 | resourcePolicySet:
10 | resourceSelectors:
11 | - kind: PersistentVolume
12 | group: ""
13 | version: v1
14 | evaluate: |
15 | function evaluate()
16 | hs = {}
17 | hs.matching = false
18 | if obj.status ~= nil and obj.status.phase ~= "Bound" then
19 | hs.matching = true
20 | end
21 | return hs
22 | end
23 | action: Delete
--------------------------------------------------------------------------------
/internal/controller/executor/validate_resourceselector/peristent-volumes/matching.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | name: pv0003
5 | spec:
6 | capacity:
7 | storage: 5Gi
8 | volumeMode: Filesystem
9 | accessModes:
10 | - ReadWriteOnce
11 | persistentVolumeReclaimPolicy: Recycle
12 | storageClassName: slow
13 | mountOptions:
14 | - hard
15 | - nfsvers=4.1
16 | nfs:
17 | path: /tmp
18 | server: 172.17.0.2
19 | status:
20 | phase: Released
--------------------------------------------------------------------------------
/internal/controller/executor/validate_resourceselector/peristent-volumes/non-matching.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | name: pv0003
5 | spec:
6 | capacity:
7 | storage: 5Gi
8 | volumeMode: Filesystem
9 | accessModes:
10 | - ReadWriteOnce
11 | persistentVolumeReclaimPolicy: Recycle
12 | storageClassName: slow
13 | mountOptions:
14 | - hard
15 | - nfsvers=4.1
16 | nfs:
17 | path: /tmp
18 | server: 172.17.0.2
19 | status:
20 | phase: Bound
--------------------------------------------------------------------------------
/internal/controller/executor/validate_resourceselector/statefulset_with_zero_replicas/cleaner.yaml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance will find any StatefulSets in any namespace
2 | # with spec.replicas set to 0 and deletes those instances
3 | apiVersion: apps.projectsveltos.io/v1alpha1
4 | kind: Cleaner
5 | metadata:
6 | name: statefulset-with-zero-replicas
7 | spec:
8 | schedule: "* 0 * * *"
9 | resourcePolicySet:
10 | resourceSelectors:
11 | - kind: StatefulSet
12 | group: "apps"
13 | version: v1
14 | evaluate: |
15 | function evaluate()
16 | hs = {}
17 | hs.matching = false
18 | if obj.spec.replicas == 0 then
19 | hs.matching = true
20 | end
21 | return hs
22 | end
23 | action: Delete
--------------------------------------------------------------------------------
/internal/controller/executor/validate_resourceselector/statefulset_with_zero_replicas/matching.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: StatefulSet
3 | metadata:
4 | name: matching
5 | spec:
6 | selector:
7 | matchLabels:
8 | app: nginx # has to match .spec.template.metadata.labels
9 | serviceName: "nginx"
10 | replicas: 0
11 | minReadySeconds: 10 # by default is 0
12 | template:
13 | metadata:
14 | labels:
15 | app: nginx # has to match .spec.selector.matchLabels
16 | spec:
17 | terminationGracePeriodSeconds: 10
18 | containers:
19 | - name: nginx
20 | image: registry.k8s.io/nginx-slim:0.8
21 | ports:
22 | - containerPort: 80
23 | name: web
24 | volumeMounts:
25 | - name: www
26 | mountPath: /usr/share/nginx/html
27 | volumeClaimTemplates:
28 | - metadata:
29 | name: www
30 | spec:
31 | accessModes: [ "ReadWriteOnce" ]
32 | storageClassName: "my-storage-class"
33 | resources:
34 | requests:
35 | storage: 1Gi
--------------------------------------------------------------------------------
/internal/controller/executor/validate_resourceselector/statefulset_with_zero_replicas/non-matching.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: StatefulSet
3 | metadata:
4 | name: matching
5 | spec:
6 | selector:
7 | matchLabels:
8 | app: nginx # has to match .spec.template.metadata.labels
9 | serviceName: "nginx"
10 | replicas: 3
11 | minReadySeconds: 10 # by default is 0
12 | template:
13 | metadata:
14 | labels:
15 | app: nginx # has to match .spec.selector.matchLabels
16 | spec:
17 | terminationGracePeriodSeconds: 10
18 | containers:
19 | - name: nginx
20 | image: registry.k8s.io/nginx-slim:0.8
21 | ports:
22 | - containerPort: 80
23 | name: web
24 | volumeMounts:
25 | - name: www
26 | mountPath: /usr/share/nginx/html
27 | volumeClaimTemplates:
28 | - metadata:
29 | name: www
30 | spec:
31 | accessModes: [ "ReadWriteOnce" ]
32 | storageClassName: "my-storage-class"
33 | resources:
34 | requests:
35 | storage: 1Gi
--------------------------------------------------------------------------------
/internal/controller/executor/validate_resourceselector/time_based_delete/cleaner.yaml:
--------------------------------------------------------------------------------
1 | # This Cleaner instance finds any Pod that:
2 | # - has been running for longer than 24 hour
3 | # and instruct Cleaner to delete this Pod.
4 | apiVersion: apps.projectsveltos.io/v1alpha1
5 | kind: Cleaner
6 | metadata:
7 | name: pods-from-job
8 | spec:
9 | schedule: "* 0 * * *"
10 | resourcePolicySet:
11 | resourceSelectors:
12 | - kind: Pod
13 | group: ""
14 | version: v1
15 | evaluate: |
16 | -- Convert creationTimestamp "2023-12-12T09:35:56Z"
17 | function convertTimestampString(timestampStr)
18 | local convertedTimestamp = string.gsub(
19 | timestampStr,
20 | '(%d+)-(%d+)-(%d+)T(%d+):(%d+):(%d+)Z',
21 | function(y, mon, d, h, mi, s)
22 | return os.time({
23 | year = tonumber(y),
24 | month = tonumber(mon),
25 | day = tonumber(d),
26 | hour = tonumber(h),
27 | min = tonumber(mi),
28 | sec = tonumber(s)
29 | })
30 | end
31 | )
32 | return convertedTimestamp
33 | end
34 |
35 | function evaluate()
36 | hs = {}
37 | hs.matching = false
38 |
39 | -- any resource older than this time will be removed
40 | local removeAfterHour = 24
41 |
42 | currentTime = os.time()
43 |
44 | creationTimestamp = convertTimestampString(obj.metadata.creationTimestamp)
45 |
46 | hs.message = creationTimestamp
47 | print('creationTimestamp: ' .. creationTimestamp)
48 | print('currentTime: ' .. currentTime)
49 |
50 | timeDifference = os.difftime(currentTime, tonumber(creationTimestamp))
51 |
52 | print('timeDifference: ' .. timeDifference)
53 |
54 | -- if resource has been running for over 24 hours
55 | if timeDifference > removeAfterHour*60*60 then
56 | hs.matching = true
57 | end
58 |
59 |
60 | return hs
61 | end
62 | action: Delete
63 |
--------------------------------------------------------------------------------
/internal/controller/executor/validate_resourceselector/time_based_delete/matching.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: nginx
5 | creationTimestamp: "2023-12-19T12:44:17Z"
6 | spec:
7 | containers:
8 | - name: nginx
9 | image: nginx:1.14.2
10 | ports:
11 | - containerPort: 80
--------------------------------------------------------------------------------
/internal/controller/executor/validate_transform/README.md:
--------------------------------------------------------------------------------
1 | If you want to validate your transform functions:
2 |
3 | 1. create a sub-directory
4 | 2. create a file named __cleaner.yaml__ containing your Cleaner instance
5 | 3. create a file named __matching.yaml__ containing a resource that matches your __Cleaner.ResourcePolicySet.ResourceSelector__
6 | 4. create a file named __updated.yaml__ containing the expected resource after __Cleaner.ResourcePolicySet.Tranform__ is executed
7 | 5. run ``make test``
8 |
9 | That will run the exact code Cleaner will run in your cluster.
10 | If you see no error, your Cleaner instance is correct
11 |
12 | **This validates both __Cleaner.ResourcePolicySet.ResourceSelector__ and __Cleaner.ResourcePolicySet.Transform__**
13 |
14 | If you need to validate your aggregatedSelection function follow instruction in __validate_aggregatedselection__ directory
--------------------------------------------------------------------------------
/internal/controller/executor/validate_transform/service_selector/cleaner.yaml:
--------------------------------------------------------------------------------
1 | # Find all Services with selector app: version1 and update it to app: version1
2 | apiVersion: apps.projectsveltos.io/v1alpha1
3 | kind: Cleaner
4 | metadata:
5 | name: cleaner-sample3
6 | spec:
7 | schedule: "* 0 * * *"
8 | resourcePolicySet:
9 | resourceSelectors:
10 | - namespace: foo
11 | kind: Service
12 | group: ""
13 | version: v1
14 | evaluate: |
15 | -- Define how resources will be selected
16 | function evaluate()
17 | hs = {}
18 | hs.matching = false
19 | if obj.spec.selector ~= nil then
20 | if obj.spec.selector["app"] == "version1" then
21 | hs.matching = true
22 | end
23 | end
24 | return hs
25 | end
26 | action: Transform # Update matching resources
27 | transform: |
28 | -- Define how resources will be updated
29 | function transform()
30 | hs = {}
31 | obj.spec.selector["app"] = "version2"
32 | hs.resource = obj
33 | return hs
34 | end
--------------------------------------------------------------------------------
/internal/controller/executor/validate_transform/service_selector/matching.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: my-service
5 | labels:
6 | app: version1
7 | spec:
8 | selector:
9 | app: version1
10 | ports:
11 | - protocol: TCP
12 | port: 80
13 | targetPort: 80
--------------------------------------------------------------------------------
/internal/controller/executor/validate_transform/service_selector/updated.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: my-service
5 | labels:
6 | app: version1
7 | spec:
8 | selector:
9 | app: version2
10 | ports:
11 | - protocol: TCP
12 | port: 80
13 | targetPort: 80
--------------------------------------------------------------------------------
/internal/controller/export_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2023. projectsveltos.io. All rights reserved.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package controller
18 |
19 | var (
20 | ShouldSchedule = shouldSchedule
21 | GetNextScheduleTime = getNextScheduleTime
22 |
23 | AddFinalizer = (*CleanerReconciler).addFinalizer
24 | RemoveReport = (*CleanerReconciler).removeReport
25 | )
26 |
--------------------------------------------------------------------------------
/internal/controller/suite_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2023. projectsveltos.io. All rights reserved.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package controller_test
18 |
19 | import (
20 | "path/filepath"
21 | "testing"
22 |
23 | . "github.com/onsi/ginkgo/v2"
24 | . "github.com/onsi/gomega"
25 |
26 | "k8s.io/client-go/kubernetes/scheme"
27 | "k8s.io/client-go/rest"
28 | "sigs.k8s.io/cluster-api/util"
29 | "sigs.k8s.io/controller-runtime/pkg/client"
30 | "sigs.k8s.io/controller-runtime/pkg/envtest"
31 | logf "sigs.k8s.io/controller-runtime/pkg/log"
32 | "sigs.k8s.io/controller-runtime/pkg/log/zap"
33 |
34 | appsv1alpha1 "gianlucam76/k8s-cleaner/api/v1alpha1"
35 | //+kubebuilder:scaffold:imports
36 | )
37 |
38 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to
39 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
40 |
41 | var cfg *rest.Config
42 | var k8sClient client.Client
43 | var testEnv *envtest.Environment
44 |
45 | func TestControllers(t *testing.T) {
46 | RegisterFailHandler(Fail)
47 |
48 | RunSpecs(t, "Controller Suite")
49 | }
50 |
51 | var _ = BeforeSuite(func() {
52 | logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
53 |
54 | By("bootstrapping test environment")
55 | testEnv = &envtest.Environment{
56 | CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
57 | ErrorIfCRDPathMissing: true,
58 | }
59 |
60 | var err error
61 | // cfg is defined in this file globally.
62 | cfg, err = testEnv.Start()
63 | Expect(err).NotTo(HaveOccurred())
64 | Expect(cfg).NotTo(BeNil())
65 |
66 | err = appsv1alpha1.AddToScheme(scheme.Scheme)
67 | Expect(err).NotTo(HaveOccurred())
68 |
69 | //+kubebuilder:scaffold:scheme
70 |
71 | k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
72 | Expect(err).NotTo(HaveOccurred())
73 |
74 | })
75 |
76 | var _ = AfterSuite(func() {
77 | By("tearing down the test environment")
78 | err := testEnv.Stop()
79 | Expect(err).NotTo(HaveOccurred())
80 | })
81 |
82 | func randomString() string {
83 | const length = 10
84 | return util.RandomString(length)
85 | }
86 |
--------------------------------------------------------------------------------
/pkg/scope/pruner.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2023. projectsveltos.io. All rights reserved.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package scope
18 |
19 | import (
20 | "context"
21 |
22 | "github.com/go-logr/logr"
23 | "github.com/pkg/errors"
24 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
25 | "sigs.k8s.io/cluster-api/util/patch"
26 | "sigs.k8s.io/controller-runtime/pkg/client"
27 |
28 | appsv1alpha1 "gianlucam76/k8s-cleaner/api/v1alpha1"
29 | )
30 |
31 | // CleanerScopeParams defines the input parameters used to create a new Cleaner Scope.
32 | type CleanerScopeParams struct {
33 | Client client.Client
34 | Logger logr.Logger
35 | Cleaner *appsv1alpha1.Cleaner
36 | ControllerName string
37 | }
38 |
39 | // NewCleanerScope creates a new Cleaner Scope from the supplied parameters.
40 | // This is meant to be called for each reconcile iteration.
41 | func NewCleanerScope(params CleanerScopeParams) (*CleanerScope, error) {
42 | if params.Client == nil {
43 | return nil, errors.New("client is required when creating a CleanerScope")
44 | }
45 | if params.Cleaner == nil {
46 | return nil, errors.New("failed to generate new scope from nil Cleaner")
47 | }
48 |
49 | helper, err := patch.NewHelper(params.Cleaner, params.Client)
50 | if err != nil {
51 | return nil, errors.Wrap(err, "failed to init patch helper")
52 | }
53 | return &CleanerScope{
54 | Logger: params.Logger,
55 | client: params.Client,
56 | Cleaner: params.Cleaner,
57 | patchHelper: helper,
58 | controllerName: params.ControllerName,
59 | }, nil
60 | }
61 |
62 | // CleanerScope defines the basic context for an actuator to operate upon.
63 | type CleanerScope struct {
64 | logr.Logger
65 | client client.Client
66 | patchHelper *patch.Helper
67 | Cleaner *appsv1alpha1.Cleaner
68 | controllerName string
69 | }
70 |
71 | // PatchObject persists the feature configuration and status.
72 | func (s *CleanerScope) PatchObject(ctx context.Context) error {
73 | return s.patchHelper.Patch(
74 | ctx,
75 | s.Cleaner,
76 | )
77 | }
78 |
79 | // Close closes the current scope persisting the Cleaner configuration and status.
80 | func (s *CleanerScope) Close(ctx context.Context) error {
81 | return s.PatchObject(ctx)
82 | }
83 |
84 | // SetLastRunTime set LastRunTime field
85 | func (s *CleanerScope) SetLastRunTime(lastRunTime *metav1.Time) {
86 | s.Cleaner.Status.LastRunTime = lastRunTime
87 | }
88 |
89 | // SetNextScheduleTime sets NextScheduleTime field
90 | func (s *CleanerScope) SetNextScheduleTime(lastRunTime *metav1.Time) {
91 | s.Cleaner.Status.NextScheduleTime = lastRunTime
92 | }
93 |
94 | // SetFailureMessage sets FasilureMessage field
95 | func (s *CleanerScope) SetFailureMessage(failureMessage *string) {
96 | s.Cleaner.Status.FailureMessage = failureMessage
97 | }
98 |
--------------------------------------------------------------------------------
/renovate.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json",
3 | "extends": [
4 | "config:recommended"
5 | ]
6 | }
7 |
--------------------------------------------------------------------------------
/test/fv/fv_suite_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2023. projectsveltos.io. All rights reserved.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package fv_test
18 |
19 | import (
20 | "context"
21 | "fmt"
22 | "testing"
23 | "time"
24 |
25 | . "github.com/onsi/ginkgo/v2"
26 | . "github.com/onsi/gomega"
27 |
28 | "github.com/TwiN/go-color"
29 | ginkgotypes "github.com/onsi/ginkgo/v2/types"
30 | corev1 "k8s.io/api/core/v1"
31 | "k8s.io/apimachinery/pkg/runtime"
32 | "k8s.io/apimachinery/pkg/types"
33 | clientgoscheme "k8s.io/client-go/kubernetes/scheme"
34 | "k8s.io/klog/v2"
35 | "sigs.k8s.io/cluster-api/util"
36 | ctrl "sigs.k8s.io/controller-runtime"
37 | "sigs.k8s.io/controller-runtime/pkg/client"
38 |
39 | appsv1alpha1 "gianlucam76/k8s-cleaner/api/v1alpha1"
40 | )
41 |
42 | var (
43 | k8sClient client.Client
44 | scheme *runtime.Scheme
45 | )
46 |
47 | const (
48 | timeout = 2 * time.Minute
49 | pollingInterval = 5 * time.Second
50 | )
51 |
52 | func TestFv(t *testing.T) {
53 | RegisterFailHandler(Fail)
54 |
55 | suiteConfig, reporterConfig := GinkgoConfiguration()
56 | reporterConfig.FullTrace = true
57 | reporterConfig.JSONReport = "out.json"
58 | report := func(report ginkgotypes.Report) {
59 | for i := range report.SpecReports {
60 | specReport := report.SpecReports[i]
61 | if specReport.State.String() == "skipped" {
62 | GinkgoWriter.Printf(color.Colorize(color.Blue, fmt.Sprintf("[Skipped]: %s\n", specReport.FullText())))
63 | }
64 | }
65 | for i := range report.SpecReports {
66 | specReport := report.SpecReports[i]
67 | if specReport.Failed() {
68 | GinkgoWriter.Printf(color.Colorize(color.Red, fmt.Sprintf("[Failed]: %s\n", specReport.FullText())))
69 | }
70 | }
71 | }
72 | ReportAfterSuite("report", report)
73 |
74 | RunSpecs(t, "FV Suite", suiteConfig, reporterConfig)
75 | }
76 |
77 | var _ = BeforeSuite(func() {
78 | restConfig := ctrl.GetConfigOrDie()
79 | // To get rid of the annoying request.go log
80 | restConfig.QPS = 100
81 | restConfig.Burst = 100
82 |
83 | scheme = runtime.NewScheme()
84 |
85 | ctrl.SetLogger(klog.Background())
86 |
87 | Expect(clientgoscheme.AddToScheme(scheme)).To(Succeed())
88 | Expect(appsv1alpha1.AddToScheme(scheme)).To(Succeed())
89 |
90 | var err error
91 | k8sClient, err = client.New(restConfig, client.Options{Scheme: scheme})
92 | Expect(err).NotTo(HaveOccurred())
93 | })
94 |
95 | func randomString() string {
96 | const length = 10
97 | return util.RandomString(length)
98 | }
99 |
100 | func deleteCleaner(cleanerName string) {
101 | currentCleaner := &appsv1alpha1.Cleaner{}
102 |
103 | Expect(k8sClient.Get(context.TODO(),
104 | types.NamespacedName{Name: cleanerName}, currentCleaner)).To(Succeed())
105 |
106 | Expect(k8sClient.Delete(context.TODO(), currentCleaner)).To(Succeed())
107 | }
108 |
109 | func deleteNamespace(name string) {
110 | currentNamespace := &corev1.Namespace{}
111 |
112 | Expect(k8sClient.Get(context.TODO(),
113 | types.NamespacedName{Name: name}, currentNamespace)).To(Succeed())
114 |
115 | Expect(k8sClient.Delete(context.TODO(), currentNamespace)).To(Succeed())
116 | }
117 |
--------------------------------------------------------------------------------
/test/kind-cluster.yaml:
--------------------------------------------------------------------------------
1 | kind: Cluster
2 | apiVersion: kind.x-k8s.io/v1alpha4
3 | networking:
4 | podSubnet: "10.110.0.0/16"
5 | serviceSubnet: "10.115.0.0/16"
6 | nodes:
7 | - role: control-plane
8 | kubeadmConfigPatches:
9 | - |
10 | kind: ClusterConfiguration
11 | apiServer:
12 | extraArgs:
13 | v: "10"
14 | image: kindest/node:K8S_VERSION
15 | extraMounts:
16 | - hostPath: /var/run/docker.sock
17 | containerPath: /var/run/docker.sock
18 | - hostPath: /usr/share/zoneinfo
19 | containerPath: /usr/share/zoneinfo
20 | - hostPath: /tmp/k8s-pruner
21 | containerPath: /collection
22 | - role: worker
23 | image: kindest/node:K8S_VERSION
24 | extraMounts:
25 | - hostPath: /var/run/docker.sock
26 | containerPath: /var/run/docker.sock
27 | - hostPath: /usr/share/zoneinfo
28 | containerPath: /usr/share/zoneinfo
29 | - hostPath: /tmp/k8s-pruner
30 | containerPath: /collection
31 |
--------------------------------------------------------------------------------