├── .github ├── renovate.json └── workflows │ ├── codeql.yml │ ├── label-all-new-issues.yml │ ├── release.yml │ ├── renovate-vault.yml │ ├── scorecard.yml │ └── tests.yml ├── .gitignore ├── .golangci.yaml ├── CODEOWNERS ├── LICENSE ├── Makefile ├── README.md ├── chart ├── Chart.yaml ├── README.md ├── app-readme.md ├── templates │ ├── _helpers.tpl │ ├── alertingrule.yaml │ ├── benchmark-aks-1.0.yaml │ ├── benchmark-cis-1.8.yaml │ ├── benchmark-cis-1.9.yaml │ ├── benchmark-eks-1.2.0.yaml │ ├── benchmark-eks-1.5.0.yaml │ ├── benchmark-gke-1.2.0.yaml │ ├── benchmark-gke-1.6.0.yaml │ ├── benchmark-k3s-cis-1.8-hardened.yaml │ ├── benchmark-k3s-cis-1.8-permissive.yaml │ ├── benchmark-k3s-cis-1.9.yaml │ ├── benchmark-rke-cis-1.8-hardened.yaml │ ├── benchmark-rke-cis-1.8-permissive.yaml │ ├── benchmark-rke2-cis-1.8-hardened.yaml │ ├── benchmark-rke2-cis-1.8-permissive.yaml │ ├── benchmark-rke2-cis-1.9.yaml │ ├── cis-roles.yaml │ ├── configmap.yaml │ ├── deployment.yaml │ ├── network_policy_allow_all.yaml │ ├── patch_default_serviceaccount.yaml │ ├── rbac.yaml │ ├── scanprofile-cis-1.8.yaml │ ├── scanprofile-cis-1.9.yaml │ ├── scanprofile-k3s-cis-1.8-hardened.yml │ ├── scanprofile-k3s-cis-1.8-permissive.yml │ ├── scanprofile-k3s-cis-1.9.yaml │ ├── scanprofile-rke-1.8-hardened.yaml │ ├── scanprofile-rke-1.8-permissive.yaml │ ├── scanprofile-rke2-cis-1.8-hardened.yml │ ├── scanprofile-rke2-cis-1.8-permissive.yml │ ├── scanprofile-rke2-cis-1.9.yaml │ ├── scanprofileaks.yml │ ├── scanprofileeks-1.5.0.yml │ ├── scanprofileeks.yml │ ├── scanprofilegke-1.6.0.yml │ ├── scanprofilegke.yml │ ├── serviceaccount.yaml │ └── validate-install-crd.yaml └── values.yaml ├── crds ├── clusterscan.yaml ├── clusterscanbenchmark.yaml ├── clusterscanprofile.yaml └── clusterscanreport.yaml ├── go.mod ├── go.sum ├── hack ├── boilerplate.go.txt ├── e2e ├── make │ ├── build.mk │ ├── deps.mk │ └── tools.mk └── upload-gh ├── main.go ├── package └── Dockerfile ├── pkg ├── apis │ └── cis.cattle.io │ │ ├── constants.go │ │ ├── v1 │ │ ├── doc.go │ │ ├── types.go │ │ ├── zz_generated_deepcopy.go │ │ ├── zz_generated_list_types.go │ │ └── zz_generated_register.go │ │ └── zz_generated_register.go ├── codegen │ ├── cleanup │ │ └── main.go │ └── main.go ├── condition │ └── condition.go ├── crds │ └── crd.go ├── generated │ └── controllers │ │ └── cis.cattle.io │ │ ├── factory.go │ │ ├── interface.go │ │ └── v1 │ │ ├── clusterscan.go │ │ ├── clusterscanbenchmark.go │ │ ├── clusterscanprofile.go │ │ ├── clusterscanreport.go │ │ └── interface.go └── securityscan │ ├── alert │ ├── prometheusrule.go │ └── templates │ │ └── prometheusrule.template │ ├── controller.go │ ├── core │ ├── configmap.go │ ├── service.go │ └── templates │ │ ├── cisscanConfig.template │ │ ├── pluginConfig.template │ │ └── service.template │ ├── job │ └── job.go │ ├── jobHandler.go │ ├── podHandler.go │ ├── scan │ └── clusterscan.go │ ├── scanHandler.go │ ├── scanMetricsHandler.go │ └── scheduledScanHandler.go └── tests ├── Dockerfile.k3s ├── k3d-expected.json ├── k3s-bench-test.yaml └── kube-apiserver /.github/renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "github>rancher/renovate-config//rancher-main#release" 5 | ], 6 | "baseBranches": [ 7 | "main" 8 | ], 9 | "ignoreDeps":[ 10 | "github.com/rancher/lasso" 11 | ], 12 | "packageRules": [ 13 | { 14 | "matchBaseBranches": ["release/v1.4"], 15 | "extends": ["github>rancher/renovate-config//rancher-2.11#release"] 16 | }, 17 | { 18 | "matchBaseBranches": ["release/v1.3"], 19 | "extends": ["github>rancher/renovate-config//rancher-2.10#release"] 20 | }, 21 | { 22 | "matchBaseBranches": ["release/v1.2"], 23 | "extends": ["github>rancher/renovate-config//rancher-2.9#release"] 24 | } 25 | ], 26 | "vulnerabilityAlerts": { 27 | "enabled": true 28 | }, 29 | "osvVulnerabilityAlerts": true 30 | } 31 | -------------------------------------------------------------------------------- /.github/workflows/codeql.yml: -------------------------------------------------------------------------------- 1 | name: CodeQL 2 | on: 3 | workflow_call: 4 | pull_request: 5 | 6 | push: 7 | branches: 8 | - main 9 | 10 | schedule: 11 | - cron: '00 9 * * 2' 12 | 13 | permissions: {} 14 | 15 | jobs: 16 | analyze: 17 | name: Analyze 18 | runs-on: ubuntu-latest 19 | 20 | permissions: 21 | actions: read 22 | contents: read 23 | security-events: write 24 | 25 | strategy: 26 | fail-fast: false 27 | matrix: 28 | language: [ 'go', 'actions' ] 29 | 30 | steps: 31 | - name: Checkout code 32 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 33 | 34 | - name: Initialize CodeQL 35 | uses: github/codeql-action/init@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 36 | with: 37 | languages: ${{ matrix.language }} 38 | # xref: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs 39 | # xref: https://codeql.github.com/codeql-query-help/go/ 40 | queries: security-and-quality 41 | 42 | - name: Manual Build 43 | run: go build ./... 44 | 45 | - name: Perform CodeQL Analysis 46 | uses: github/codeql-action/analyze@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 47 | with: 48 | category: "/language:${{matrix.language}}" 49 | -------------------------------------------------------------------------------- /.github/workflows/label-all-new-issues.yml: -------------------------------------------------------------------------------- 1 | name: Label issue 2 | on: 3 | issues: 4 | types: 5 | - opened 6 | - reopened 7 | 8 | permissions: {} 9 | 10 | jobs: 11 | label_issues: 12 | runs-on: ubuntu-latest 13 | permissions: 14 | issues: write 15 | steps: 16 | - name: Label issue 17 | id: run 18 | run: gh issue edit -R ${GITHUB_REPOSITORY} --add-label ${LABEL} ${{ github.event.issue.number }} 19 | env: 20 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 21 | LABEL: "team/security" 22 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v*' 7 | 8 | permissions: {} 9 | 10 | jobs: 11 | 12 | publish: 13 | runs-on: ubuntu-latest 14 | permissions: 15 | contents: read 16 | # write is needed for: 17 | # - OIDC for cosign's use in ecm-distro-tools/publish-image. 18 | # - Read vault secrets in rancher-eio/read-vault-secrets. 19 | id-token: write 20 | 21 | strategy: 22 | matrix: 23 | include: 24 | # Three images are created: 25 | # - Multi-arch manifest for both amd64 and arm64 26 | - tag-suffix: "" 27 | platforms: linux/amd64,linux/arm64 28 | # - arm64 manifest 29 | - tag-suffix: "-arm64" 30 | platforms: linux/arm64 31 | # - amd64 manifest 32 | - tag-suffix: "-amd64" 33 | platforms: linux/amd64 34 | 35 | steps: 36 | - name: Checkout code 37 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 38 | 39 | - name: Load Secrets from Vault 40 | uses: rancher-eio/read-vault-secrets@main 41 | with: 42 | secrets: | 43 | secret/data/github/repo/${{ github.repository }}/dockerhub/rancher/credentials username | DOCKER_USERNAME ; 44 | secret/data/github/repo/${{ github.repository }}/dockerhub/rancher/credentials password | DOCKER_PASSWORD ; 45 | secret/data/github/repo/${{ github.repository }}/rancher-prime-registry/credentials registry | PRIME_REGISTRY ; 46 | secret/data/github/repo/${{ github.repository }}/rancher-prime-registry/credentials username | PRIME_REGISTRY_USERNAME ; 47 | secret/data/github/repo/${{ github.repository }}/rancher-prime-registry/credentials password | PRIME_REGISTRY_PASSWORD 48 | 49 | - name: Publish manifest 50 | uses: rancher/ecm-distro-tools/actions/publish-image@master 51 | with: 52 | image: cis-operator 53 | tag: ${{ github.ref_name }}${{ matrix.tag-suffix }} 54 | platforms: ${{ matrix.platforms }} 55 | 56 | public-registry: docker.io 57 | public-repo: rancher 58 | public-username: ${{ env.DOCKER_USERNAME }} 59 | public-password: ${{ env.DOCKER_PASSWORD }} 60 | 61 | prime-registry: ${{ env.PRIME_REGISTRY }} 62 | prime-repo: rancher 63 | prime-username: ${{ env.PRIME_REGISTRY_USERNAME }} 64 | prime-password: ${{ env.PRIME_REGISTRY_PASSWORD }} 65 | 66 | publish-assets: 67 | runs-on: ubuntu-latest 68 | permissions: 69 | contents: write # Upload artefacts to release. 70 | 71 | steps: 72 | - name: Checkout code 73 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 74 | 75 | - run: make upload 76 | env: 77 | GH_TOKEN: ${{ github.token }} 78 | -------------------------------------------------------------------------------- /.github/workflows/renovate-vault.yml: -------------------------------------------------------------------------------- 1 | name: Renovate 2 | on: 3 | workflow_dispatch: 4 | inputs: 5 | logLevel: 6 | description: "Override default log level" 7 | required: false 8 | default: info 9 | type: choice 10 | options: 11 | - info 12 | - debug 13 | overrideSchedule: 14 | description: "Override all schedules" 15 | required: false 16 | default: "false" 17 | type: choice 18 | options: 19 | - "false" 20 | - "true" 21 | configMigration: 22 | description: "Toggle PRs for config migration" 23 | required: false 24 | default: "true" 25 | type: choice 26 | options: 27 | - "false" 28 | - "true" 29 | renovateConfig: 30 | description: "Define a custom renovate config file" 31 | required: false 32 | default: ".github/renovate.json" 33 | type: string 34 | 35 | schedule: 36 | - cron: '30 4,6 * * 2-4' 37 | 38 | permissions: 39 | contents: read 40 | id-token: write 41 | 42 | jobs: 43 | call-workflow: 44 | uses: rancher/renovate-config/.github/workflows/renovate-vault.yml@release 45 | with: 46 | configMigration: ${{ inputs.configMigration || 'true' }} 47 | logLevel: ${{ inputs.logLevel || 'info' }} 48 | overrideSchedule: ${{ github.event.inputs.overrideSchedule == 'true' && '{''schedule'':null}' || '' }} 49 | renovateConfig: ${{ inputs.renovateConfig || '.github/renovate.json' }} 50 | secrets: 51 | override-token: "${{ secrets.RENOVATE_FORK_GH_TOKEN || '' }}" 52 | -------------------------------------------------------------------------------- /.github/workflows/scorecard.yml: -------------------------------------------------------------------------------- 1 | name: Scorecard supply-chain security 2 | on: 3 | # For Branch-Protection check. Only the default branch is supported. See 4 | # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection 5 | branch_protection_rule: 6 | # To guarantee Maintained check is occasionally updated. See 7 | # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained 8 | schedule: 9 | - cron: '20 5 * * 0' 10 | push: 11 | branches: [ "main" ] 12 | 13 | # Declare default permissions as read only. 14 | permissions: read-all 15 | 16 | jobs: 17 | analysis: 18 | name: Scorecard analysis 19 | runs-on: ubuntu-latest 20 | permissions: 21 | # Needed to upload the results to code-scanning dashboard. 22 | security-events: write 23 | # Needed to publish results and get a badge (see publish_results below). 24 | id-token: write 25 | 26 | steps: 27 | - name: "Checkout code" 28 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 29 | with: 30 | persist-credentials: false 31 | 32 | - name: "Run analysis" 33 | uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2 34 | with: 35 | results_file: results.sarif 36 | results_format: sarif 37 | # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: 38 | # - you want to enable the Branch-Protection check on a *public* repository 39 | # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional. 40 | # repo_token: ${{ secrets.SCORECARD_TOKEN }} 41 | 42 | # Public repositories: 43 | # - Publish results to OpenSSF REST API for easy access by consumers 44 | # - Allows the repository to include the Scorecard badge. 45 | # - See https://github.com/ossf/scorecard-action#publishing-results. 46 | publish_results: true 47 | 48 | # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF 49 | # format to the repository Actions tab. 50 | - name: "Upload artifact" 51 | uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 52 | with: 53 | name: SARIF file 54 | path: results.sarif 55 | retention-days: 5 56 | 57 | # Upload the results to GitHub's code scanning dashboard (optional). 58 | # Commenting out will disable upload of results to your repo's Code Scanning dashboard 59 | - name: "Upload to code-scanning" 60 | uses: github/codeql-action/upload-sarif@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 61 | with: 62 | sarif_file: results.sarif 63 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: 4 | push: 5 | pull_request: 6 | 7 | permissions: 8 | contents: read 9 | 10 | jobs: 11 | validate: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Checkout code 15 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 16 | 17 | - name: Install Go 18 | uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 19 | with: 20 | go-version: 'stable' 21 | - run: make validate 22 | 23 | test: 24 | strategy: 25 | fail-fast: false 26 | matrix: 27 | # Run tests across all platforms, to ensure developers 28 | # can use any environment to run basic tests. 29 | platform: [ubuntu-latest, windows-latest, macos-latest] 30 | 31 | runs-on: ${{ matrix.platform }} 32 | needs: [ validate ] 33 | 34 | steps: 35 | - name: Checkout code 36 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 37 | 38 | - name: Install Go 39 | uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 40 | with: 41 | go-version: 'stable' 42 | 43 | - run: make test 44 | 45 | e2e: 46 | strategy: 47 | fail-fast: false 48 | matrix: 49 | # Works on macos-12, but takes a very long time to run so this 50 | # is currently disabled. 51 | platform: [ubuntu-latest] 52 | 53 | runs-on: ${{ matrix.platform }} 54 | needs: [ validate ] 55 | 56 | steps: 57 | - name: Setup docker (MacOS only) 58 | if: runner.os == 'macos' 59 | run: | 60 | brew install docker 61 | colima start 62 | - name: Setup QEMU 63 | uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0 64 | - name: Setup Docker Buildx 65 | uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0 66 | - name: Checkout code 67 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 68 | 69 | - name: Test building images 70 | run: make test-image 71 | 72 | - name: E2E Tests 73 | run: make e2e 74 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.dapper 2 | /.cache 3 | /bin 4 | /dist 5 | *.swp 6 | .idea 7 | /cis-operator 8 | /build 9 | -------------------------------------------------------------------------------- /.golangci.yaml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | linters: 3 | default: none 4 | enable: 5 | - asasalint 6 | - asciicheck 7 | - bidichk 8 | - gosec 9 | - govet 10 | - ineffassign 11 | - misspell 12 | exclusions: 13 | rules: 14 | - path: 'main.go' 15 | text: 'G114: Use of net/http serve function that has no support for setting timeouts' 16 | formatters: 17 | enable: 18 | - gofmt 19 | - goimports 20 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Default Code Ownership 2 | * @rancher/rancher-security 3 | 4 | # Code Ownership for merging renovate PRs 5 | /go.mod @rancher/infracloud-team @rancher/rancher-security 6 | /go.sum @rancher/infracloud-team @rancher/rancher-security 7 | /hack/ @rancher/infracloud-team @rancher/rancher-security 8 | /package/Dockerfile @rancher/infracloud-team @rancher/rancher-security 9 | /tests/ @rancher/infracloud-team @rancher/rancher-security 10 | 11 | # rancher-cis-benchmark chart Ownership 12 | /chart/ @rancher/infracloud-team @rancher/rancher-security 13 | /crds/ @rancher/infracloud-team @rancher/rancher-security 14 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | <<<<<<< HEAD 178 | ======= 179 | 180 | APPENDIX: How to apply the Apache License to your work. 181 | 182 | To apply the Apache License to your work, attach the following 183 | boilerplate notice, with the fields enclosed by brackets "[]" 184 | replaced with your own identifying information. (Don't include 185 | the brackets!) The text should be enclosed in the appropriate 186 | comment syntax for the file format. We also recommend that a 187 | file or class name and description of purpose be included on the 188 | same "printed page" as the copyright notice for easier 189 | identification within third-party archives. 190 | 191 | Copyright [yyyy] [name of copyright owner] 192 | 193 | Licensed under the Apache License, Version 2.0 (the "License"); 194 | you may not use this file except in compliance with the License. 195 | You may obtain a copy of the License at 196 | 197 | http://www.apache.org/licenses/LICENSE-2.0 198 | 199 | Unless required by applicable law or agreed to in writing, software 200 | distributed under the License is distributed on an "AS IS" BASIS, 201 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 202 | See the License for the specific language governing permissions and 203 | limitations under the License. 204 | 205 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # To avoid poluting the Makefile, versions and checksums for tooling and 2 | # dependencies are defined at hack/make/deps.mk. 3 | include hack/make/deps.mk 4 | 5 | # Include logic that can be reused across projects. 6 | include hack/make/build.mk 7 | include hack/make/tools.mk 8 | 9 | # Define target platforms, image builder and the fully qualified image name. 10 | TARGET_PLATFORMS ?= linux/amd64,linux/arm64 11 | 12 | REPO ?= rancher 13 | IMAGE = $(REPO)/cis-operator:$(TAG) 14 | TARGET_BIN ?= build/bin/cis-operator 15 | ARCH ?= $(shell docker info --format '{{.ClientInfo.Arch}}') 16 | 17 | # TARGET_ARCHS defines all GOARCH used for releasing binaries. 18 | TARGET_ARCHS = arm64 amd64 19 | BUILD_ACTION = --load 20 | 21 | .DEFAULT_GOAL := ci 22 | ci: build test validate e2e ## run the targets needed to validate a PR in CI. 23 | 24 | clean: ## clean up project. 25 | rm -rf bin build 26 | 27 | test: ## run unit tests. 28 | @echo "Running tests" 29 | $(GO) test -race -cover ./... 30 | 31 | .PHONY: build 32 | build: ## build project and output binary to TARGET_BIN. 33 | CGO_ENABLED=0 $(GO) build -trimpath -tags "$(GO_TAGS)" -ldflags "$(LINKFLAGS)" -o $(TARGET_BIN) 34 | 35 | test-image: 36 | # Instead of loading image, target all platforms, effectivelly testing 37 | # the build for the target architectures. 38 | $(MAKE) build-image BUILD_ACTION="--platform=$(TARGET_PLATFORMS)" 39 | 40 | build-image: buildx-machine ## build (and load) the container image targeting the current platform. 41 | $(IMAGE_BUILDER) build -f package/Dockerfile \ 42 | --builder $(MACHINE) $(IMAGE_ARGS) \ 43 | --build-arg VERSION=$(VERSION) -t "$(IMAGE)" $(BUILD_ACTION) . 44 | @echo "Built $(IMAGE)" 45 | 46 | push-image: buildx-machine ## build the container image targeting all platforms defined by TARGET_PLATFORMS and push to a registry. 47 | $(IMAGE_BUILDER) build -f package/Dockerfile \ 48 | --builder $(MACHINE) $(IMAGE_ARGS) $(IID_FILE_FLAG) $(BUILDX_ARGS) \ 49 | --build-arg VERSION=$(VERSION) --platform=$(TARGET_PLATFORMS) -t "$(IMAGE)" --push . 50 | @echo "Pushed $(IMAGE)" 51 | 52 | e2e: $(K3D) $(KUBECTL) $(HELM) build-image ## Run E2E tests. 53 | K3D=$(K3D) KUBECTL=$(KUBECTL) HELM=$(HELM) VERSION=$(VERSION) \ 54 | IMAGE=$(IMAGE) \ 55 | ./hack/e2e 56 | 57 | generate: ## Run code generation logic. 58 | $(GO) generate ./... 59 | 60 | validate: validate-lint generate validate-dirty ## Run validation checks. 61 | 62 | validate-lint: $(GOLANGCI) 63 | $(GOLANGCI) run --timeout=2m 64 | 65 | validate-dirty: 66 | ifdef DIRTY 67 | @echo Git is dirty 68 | @git --no-pager status 69 | @git --no-pager diff 70 | @exit 1 71 | endif 72 | 73 | upload: clean ## Build and upload artefacts to the GitHub release. 74 | $(MAKE) $(addsuffix -upload, $(TARGET_ARCHS)) 75 | 76 | %-upload: 77 | TARGET_BIN=build/bin/cis-operator-$(subst :,/,$*) \ 78 | GOARCH=$(subst :,/,$*) GOOS=linux \ 79 | $(MAKE) build 80 | 81 | TAG=$(TAG) \ 82 | ./hack/upload-gh $(subst :,/,$*) 83 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # cis-operator [![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/rancher/cis-operator/badge)](https://scorecard.dev/viewer/?uri=github.com/rancher/cis-operator) 2 | 3 | The cis-operator enables running CIS benchmark security scans on a Kubernetes cluster and generate compliance reports that can be downloaded. 4 | Benchmarks tests and the execution logic lives on [rancher/security-scan]. 5 | 6 | ## Building 7 | 8 | `make` 9 | 10 | 11 | ## Running 12 | 1. Install the custom resource definitions: 13 | - `kubectl apply -f crds/` 14 | 2. Install the operator 15 | `./bin/cis-operator` 16 | 17 | 18 | ## Branches and Releases 19 | ### General information 20 | The current branch strategy for `rancher/cis-operator` is laid out below: 21 | 22 | | Branch | Tag |Security-Scan | Rancher | 23 | |-----------------------|----------|-----------------------|---------------------------| 24 | | `main` | `head` |`main` branch (`head`)`| `main` branch (`head`) | 25 | | `release/v1.3` | `v1.3.x` |`v0.5.x` | `v2.10.x` | 26 | | `release/v1.2` | `v1.2.x` |`v0.4.x` | `v2.9.x` | 27 | | `release/v1.1` | `v1.1.x` |`v0.3.x` | `v2.8.x` | 28 | | `master` (deprecated) | `v1.0.x` |`v0.2.x` | `v2.7.x`,`v2.8.x`,`v2.9.x`| 29 | 30 | Note that it aligns with Rancher Manager releases to maximize compatibility 31 | within the ecosystem. This includes k8s dependencies that the Rancher release 32 | aims to support, meaning that cis-operator should use the same k8s minor release 33 | that the Rancher release line it aims to support. 34 | 35 | Active development takes place against `main`. Release branches are only used for 36 | bug fixes and security-related dependency bumps. 37 | 38 | Refer to the [Support Compatibility Matrix](https://www.suse.com/suse-rancher/support-matrix/) 39 | for official compatibility information. 40 | 41 | ### How future release branches should be generated 42 | Follow these guidelines when releasing new branches: 43 | 1. Name convention to be used: `release/v1.x.x`. 44 | 2. Update the [Branch and Releases](https://github.com/rancher/cis-operator#branches-and-releases) table with the new branches and remove the no longer needed branches. 45 | 46 | ## License 47 | Copyright (c) 2019 [Rancher Labs, Inc.](http://rancher.com) 48 | 49 | Licensed under the Apache License, Version 2.0 (the "License"); 50 | you may not use this file except in compliance with the License. 51 | You may obtain a copy of the License at 52 | 53 | [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) 54 | 55 | Unless required by applicable law or agreed to in writing, software 56 | distributed under the License is distributed on an "AS IS" BASIS, 57 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 58 | See the License for the specific language governing permissions and 59 | limitations under the License. 60 | 61 | [rancher/security-scan]: https://github.com/rancher/security-scan 62 | -------------------------------------------------------------------------------- /chart/Chart.yaml: -------------------------------------------------------------------------------- 1 | annotations: 2 | catalog.cattle.io/auto-install: rancher-cis-benchmark-crd=match 3 | catalog.cattle.io/certified: rancher 4 | catalog.cattle.io/display-name: CIS Benchmark 5 | catalog.cattle.io/kube-version: '>= 1.30.0-0 < 1.33.0-0' 6 | catalog.cattle.io/namespace: cis-operator-system 7 | catalog.cattle.io/os: linux 8 | catalog.cattle.io/permits-os: linux,windows 9 | catalog.cattle.io/provides-gvr: cis.cattle.io.clusterscans/v1 10 | catalog.cattle.io/rancher-version: '>= 2.11.0-0 < 2.12.0-0' 11 | catalog.cattle.io/release-name: rancher-cis-benchmark 12 | catalog.cattle.io/type: cluster-tool 13 | catalog.cattle.io/ui-component: rancher-cis-benchmark 14 | apiVersion: v1 15 | appVersion: v8.0.0-rc.1 16 | description: The cis-operator enables running CIS benchmark security scans on a kubernetes 17 | cluster 18 | icon: https://charts.rancher.io/assets/logos/cis-kube-bench.svg 19 | keywords: 20 | - security 21 | name: rancher-cis-benchmark 22 | version: 8.0.0-rc.1 23 | -------------------------------------------------------------------------------- /chart/README.md: -------------------------------------------------------------------------------- 1 | # Rancher CIS Benchmark Chart 2 | 3 | The cis-operator enables running CIS benchmark security scans on a kubernetes cluster and generate compliance reports that can be downloaded. 4 | 5 | # Installation 6 | 7 | ``` 8 | helm install rancher-cis-benchmark ./ --create-namespace -n cis-operator-system 9 | ``` 10 | -------------------------------------------------------------------------------- /chart/app-readme.md: -------------------------------------------------------------------------------- 1 | 2 | # Rancher CIS Benchmarks 3 | 4 | This chart enables security scanning of the cluster using [CIS (Center for Internet Security) benchmarks](https://www.cisecurity.org/benchmark/kubernetes/). 5 | 6 | For more information on how to use the feature, refer to our [docs](https://ranchermanager.docs.rancher.com/how-to-guides/advanced-user-guides/cis-scan-guides). 7 | 8 | This chart installs the following components: 9 | 10 | - [cis-operator](https://github.com/rancher/cis-operator) - The cis-operator handles launching the [kube-bench](https://github.com/aquasecurity/kube-bench) tool that runs a suite of CIS tests on the nodes of your Kubernetes cluster. After scans finish, the cis-operator generates a compliance report that can be downloaded. 11 | - Scans - A scan is a CRD (`ClusterScan`) that defines when to trigger CIS scans on the cluster based on the defined profile. A report is created after the scan is completed. 12 | - Profiles - A profile is a CRD (`ClusterScanProfile`) that defines the configuration for the CIS scan, which is the benchmark versions to use and any specific tests to skip in that benchmark. This chart installs a few default `ClusterScanProfile` custom resources with no skipped tests, which can immediately be used to launch CIS scans. 13 | - Benchmark Versions - A benchmark version is a CRD (`ClusterScanBenchmark`) that defines the CIS benchmark version to run using kube-bench as well as the valid configuration parameters for that benchmark. This chart installs a few default `ClusterScanBenchmark` custom resources. 14 | - Alerting Resources - Rancher's CIS Benchmark application lets you run a cluster scan on a schedule, and send alerts when scans finish. 15 | - If you want to enable alerts to be delivered when a cluster scan completes, you need to ensure that [Rancher's Monitoring and Alerting](https://rancher.com/docs/rancher/v2.x/en/monitoring-alerting/v2.5/) application is pre-installed and the [Receivers and Routes](https://rancher.com/docs/rancher/v2.x/en/monitoring-alerting/v2.5/configuration/#alertmanager-config) are configured to send out alerts. 16 | - Additionally, you need to set `alerts: true` in the Values YAML while installing or upgrading this chart. 17 | 18 | ## CIS Kubernetes Benchmark support 19 | 20 | | Source | Kubernetes distribution | scan profile | Kubernetes versions | 21 | |--------|-------------------------|--------------------------------------------------------------------------------------------------------------------|---------------------| 22 | | CIS | any | [cis-1.9](https://github.com/aquasecurity/kube-bench/tree/main/cfg/cis-1.9) | v1.27+ | 23 | | CIS | any | [cis-1.8](https://github.com/aquasecurity/kube-bench/tree/main/cfg/cis-1.8) | v1.26 | 24 | | CIS | rke | [rke-cis-1.8-permissive](https://github.com/rancher/security-scan/tree/release/v0.5/package/cfg/rke-cis-1.8-permissive) | rke1-v1.26+ | 25 | | CIS | rke | [rke-cis-1.8-hardened](https://github.com/rancher/security-scan/tree/release/v0.5/package/cfg/rke-cis-1.8-hardened) | rke1-v1.26+ | 26 | | CIS | rke2 | [rke2-cis-1.9](https://github.com/rancher/security-scan/tree/release/v0.5/package/cfg/rke2-cis-1.9) | rke2-v1.27+ | 27 | | CIS | rke2 | [rke2-cis-1.8-permissive](https://github.com/rancher/security-scan/tree/release/v0.5/package/cfg/rke2-cis-1.8-permissive) | rke2-v1.26 | 28 | | CIS | rke2 | [rke2-cis-1.8-hardened](https://github.com/rancher/security-scan/tree/release/v0.5/package/cfg/rke2-cis-1.8-hardened) | rke2-v1.26 | 29 | | CIS | k3s | [k3s-cis-1.9](https://github.com/rancher/security-scan/tree/release/v0.5/package/cfg/k3s-cis-1.9) | k3s-v1.27+ | 30 | | CIS | k3s | [k3s-cis-1.8-permissive](https://github.com/rancher/security-scan/tree/release/v0.5/package/cfg/k3s-cis-1.8-permissive) | k3s-v1.26 | 31 | | CIS | k3s | [k3s-cis-1.8-hardened](https://github.com/rancher/security-scan/tree/release/v0.5/package/cfg/k3s-cis-1.8-hardened) | k3s-v1.26 | 32 | | CIS | eks | [eks-1.5.0](https://github.com/aquasecurity/kube-bench/tree/main/cfg/eks-1.5.0) | eks-1.27.0+ | 33 | | CIS | eks | [eks-1.2.0](https://github.com/aquasecurity/kube-bench/tree/main/cfg/eks-1.2.0) | eks | 34 | | CIS | aks | [aks-1.0](https://github.com/aquasecurity/kube-bench/tree/main/cfg/aks-1.0) | aks | 35 | | CIS | gke | [gke-1.2.0](https://github.com/aquasecurity/kube-bench/tree/main/cfg/gke-1.2.0) | gke-1.20 | 36 | | CIS | gke | [gke-1.6.0](https://github.com/aquasecurity/kube-bench/tree/main/cfg/gke-1.6.0) | gke-1.29+ | 37 | -------------------------------------------------------------------------------- /chart/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* Ensure namespace is set the same everywhere */}} 2 | {{- define "cis.namespace" -}} 3 | {{- .Release.Namespace | default "cis-operator-system" -}} 4 | {{- end -}} 5 | 6 | {{- define "system_default_registry" -}} 7 | {{- if .Values.global.cattle.systemDefaultRegistry -}} 8 | {{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} 9 | {{- else -}} 10 | {{- "" -}} 11 | {{- end -}} 12 | {{- end -}} 13 | 14 | {{/* 15 | Windows cluster will add default taint for linux nodes, 16 | add below linux tolerations to workloads could be scheduled to those linux nodes 17 | */}} 18 | {{- define "linux-node-tolerations" -}} 19 | - key: "cattle.io/os" 20 | value: "linux" 21 | effect: "NoSchedule" 22 | operator: "Equal" 23 | {{- end -}} 24 | 25 | {{- define "linux-node-selector" -}} 26 | kubernetes.io/os: linux 27 | {{- end -}} 28 | -------------------------------------------------------------------------------- /chart/templates/alertingrule.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.alerts.enabled -}} 2 | --- 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: PodMonitor 5 | metadata: 6 | name: rancher-cis-pod-monitor 7 | namespace: {{ template "cis.namespace" . }} 8 | spec: 9 | selector: 10 | matchLabels: 11 | cis.cattle.io/operator: cis-operator 12 | podMetricsEndpoints: 13 | - port: cismetrics 14 | {{- end }} 15 | -------------------------------------------------------------------------------- /chart/templates/benchmark-aks-1.0.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanBenchmark 4 | metadata: 5 | name: aks-1.0 6 | spec: 7 | clusterProvider: aks 8 | minKubernetesVersion: "1.15.0" 9 | -------------------------------------------------------------------------------- /chart/templates/benchmark-cis-1.8.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanBenchmark 4 | metadata: 5 | name: cis-1.8 6 | spec: 7 | clusterProvider: "" 8 | minKubernetesVersion: "1.26.0" 9 | maxKubernetesVersion: "1.26.x" 10 | -------------------------------------------------------------------------------- /chart/templates/benchmark-cis-1.9.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanBenchmark 4 | metadata: 5 | name: cis-1.9 6 | spec: 7 | clusterProvider: "" 8 | minKubernetesVersion: "1.27.0" 9 | -------------------------------------------------------------------------------- /chart/templates/benchmark-eks-1.2.0.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanBenchmark 4 | metadata: 5 | name: eks-1.2.0 6 | spec: 7 | clusterProvider: eks 8 | minKubernetesVersion: "1.15.0" 9 | maxKubernetesVersion: "1.26.x" 10 | -------------------------------------------------------------------------------- /chart/templates/benchmark-eks-1.5.0.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanBenchmark 4 | metadata: 5 | name: eks-1.5.0 6 | spec: 7 | clusterProvider: eks 8 | minKubernetesVersion: "1.27.0" 9 | -------------------------------------------------------------------------------- /chart/templates/benchmark-gke-1.2.0.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanBenchmark 4 | metadata: 5 | name: gke-1.2.0 6 | spec: 7 | clusterProvider: gke 8 | minKubernetesVersion: "1.15.0" 9 | maxKubernetesVersion: "1.28.x" 10 | -------------------------------------------------------------------------------- /chart/templates/benchmark-gke-1.6.0.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanBenchmark 4 | metadata: 5 | name: gke-1.6.0 6 | spec: 7 | clusterProvider: gke 8 | minKubernetesVersion: "1.29.0" 9 | -------------------------------------------------------------------------------- /chart/templates/benchmark-k3s-cis-1.8-hardened.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanBenchmark 4 | metadata: 5 | name: k3s-cis-1.8-hardened 6 | spec: 7 | clusterProvider: k3s 8 | minKubernetesVersion: "1.26.0" 9 | maxKubernetesVersion: "1.26.x" 10 | -------------------------------------------------------------------------------- /chart/templates/benchmark-k3s-cis-1.8-permissive.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanBenchmark 4 | metadata: 5 | name: k3s-cis-1.8-permissive 6 | spec: 7 | clusterProvider: k3s 8 | minKubernetesVersion: "1.26.0" 9 | maxKubernetesVersion: "1.26.x" 10 | -------------------------------------------------------------------------------- /chart/templates/benchmark-k3s-cis-1.9.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanBenchmark 4 | metadata: 5 | name: k3s-cis-1.9 6 | spec: 7 | clusterProvider: k3s 8 | minKubernetesVersion: "1.27.0" 9 | -------------------------------------------------------------------------------- /chart/templates/benchmark-rke-cis-1.8-hardened.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanBenchmark 4 | metadata: 5 | name: rke-cis-1.8-hardened 6 | spec: 7 | clusterProvider: rke 8 | minKubernetesVersion: "1.26.0" 9 | -------------------------------------------------------------------------------- /chart/templates/benchmark-rke-cis-1.8-permissive.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanBenchmark 4 | metadata: 5 | name: rke-cis-1.8-permissive 6 | spec: 7 | clusterProvider: rke 8 | minKubernetesVersion: "1.26.0" 9 | -------------------------------------------------------------------------------- /chart/templates/benchmark-rke2-cis-1.8-hardened.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanBenchmark 4 | metadata: 5 | name: rke2-cis-1.8-hardened 6 | spec: 7 | clusterProvider: rke2 8 | minKubernetesVersion: "1.26.0" 9 | maxKubernetesVersion: "1.26.x" 10 | -------------------------------------------------------------------------------- /chart/templates/benchmark-rke2-cis-1.8-permissive.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanBenchmark 4 | metadata: 5 | name: rke2-cis-1.8-permissive 6 | spec: 7 | clusterProvider: rke2 8 | minKubernetesVersion: "1.26.0" 9 | maxKubernetesVersion: "1.26.x" 10 | -------------------------------------------------------------------------------- /chart/templates/benchmark-rke2-cis-1.9.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanBenchmark 4 | metadata: 5 | name: rke2-cis-1.9 6 | spec: 7 | clusterProvider: rke2 8 | minKubernetesVersion: "1.27.0" 9 | -------------------------------------------------------------------------------- /chart/templates/cis-roles.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: cis-admin 6 | rules: 7 | - apiGroups: 8 | - cis.cattle.io 9 | resources: 10 | - clusterscanbenchmarks 11 | - clusterscanprofiles 12 | - clusterscans 13 | - clusterscanreports 14 | verbs: ["create", "update", "delete", "patch","get", "watch", "list"] 15 | - apiGroups: 16 | - catalog.cattle.io 17 | resources: ["apps"] 18 | resourceNames: ["rancher-cis-benchmark"] 19 | verbs: ["get", "watch", "list"] 20 | - apiGroups: 21 | - "" 22 | resources: 23 | - configmaps 24 | verbs: 25 | - '*' 26 | --- 27 | apiVersion: rbac.authorization.k8s.io/v1 28 | kind: ClusterRole 29 | metadata: 30 | name: cis-view 31 | rules: 32 | - apiGroups: 33 | - cis.cattle.io 34 | resources: 35 | - clusterscanbenchmarks 36 | - clusterscanprofiles 37 | - clusterscans 38 | - clusterscanreports 39 | verbs: ["get", "watch", "list"] 40 | - apiGroups: 41 | - catalog.cattle.io 42 | resources: ["apps"] 43 | resourceNames: ["rancher-cis-benchmark"] 44 | verbs: ["get", "watch", "list"] 45 | - apiGroups: 46 | - "" 47 | resources: 48 | - configmaps 49 | verbs: ["get", "watch", "list"] 50 | -------------------------------------------------------------------------------- /chart/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | name: default-clusterscanprofiles 5 | namespace: {{ template "cis.namespace" . }} 6 | data: 7 | # Default ClusterScanProfiles per cluster provider type 8 | rke: |- 9 | <1.21.0: rke-profile-permissive-1.20 10 | >=1.21.0: rke-profile-permissive-1.8 11 | rke2: |- 12 | <1.21.0: rke2-cis-1.20-profile-permissive 13 | >=1.21.0: rke2-cis-1.9-profile 14 | eks: "eks-profile-1.5.0" 15 | gke: "gke-profile-1.6.0" 16 | aks: "aks-profile" 17 | k3s: "k3s-cis-1.9-profile" 18 | default: "cis-1.9-profile" 19 | -------------------------------------------------------------------------------- /chart/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: cis-operator 5 | namespace: {{ template "cis.namespace" . }} 6 | labels: 7 | cis.cattle.io/operator: cis-operator 8 | spec: 9 | selector: 10 | matchLabels: 11 | cis.cattle.io/operator: cis-operator 12 | template: 13 | metadata: 14 | labels: 15 | cis.cattle.io/operator: cis-operator 16 | spec: 17 | serviceAccountName: cis-operator-serviceaccount 18 | containers: 19 | - name: cis-operator 20 | image: '{{ template "system_default_registry" . }}{{ .Values.image.cisoperator.repository }}:{{ .Values.image.cisoperator.tag }}' 21 | imagePullPolicy: IfNotPresent 22 | ports: 23 | - name: cismetrics 24 | containerPort: {{ .Values.alerts.metricsPort }} 25 | env: 26 | - name: SECURITY_SCAN_IMAGE 27 | value: {{ template "system_default_registry" . }}{{ .Values.image.securityScan.repository }} 28 | - name: SECURITY_SCAN_IMAGE_TAG 29 | value: {{ .Values.image.securityScan.tag }} 30 | - name: SONOBUOY_IMAGE 31 | value: {{ template "system_default_registry" . }}{{ .Values.image.sonobuoy.repository }} 32 | - name: SONOBUOY_IMAGE_TAG 33 | value: {{ .Values.image.sonobuoy.tag }} 34 | - name: CIS_ALERTS_METRICS_PORT 35 | value: '{{ .Values.alerts.metricsPort }}' 36 | - name: CIS_ALERTS_SEVERITY 37 | value: {{ .Values.alerts.severity }} 38 | - name: CIS_ALERTS_ENABLED 39 | value: {{ .Values.alerts.enabled | default "false" | quote }} 40 | - name: CLUSTER_NAME 41 | value: '{{ .Values.global.cattle.clusterName }}' 42 | - name: CIS_OPERATOR_DEBUG 43 | value: '{{ .Values.image.cisoperator.debug }}' 44 | {{- if .Values.securityScanJob.overrideTolerations }} 45 | - name: SECURITY_SCAN_JOB_TOLERATIONS 46 | value: '{{ .Values.securityScanJob.tolerations | toJson }}' 47 | {{- end }} 48 | resources: 49 | {{- toYaml .Values.resources | nindent 12 }} 50 | nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} 51 | {{- if .Values.nodeSelector }} 52 | {{ toYaml .Values.nodeSelector | indent 8 }} 53 | {{- end }} 54 | tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} 55 | {{- if .Values.tolerations }} 56 | {{ toYaml .Values.tolerations | indent 8 }} 57 | {{- end }} 58 | {{- with .Values.affinity }} 59 | affinity: 60 | {{- toYaml . | nindent 8 }} 61 | {{- end }} 62 | -------------------------------------------------------------------------------- /chart/templates/network_policy_allow_all.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.k8s.io/v1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: default-allow-all 6 | namespace: {{ template "cis.namespace" . }} 7 | spec: 8 | podSelector: {} 9 | ingress: 10 | - {} 11 | egress: 12 | - {} 13 | policyTypes: 14 | - Ingress 15 | - Egress 16 | -------------------------------------------------------------------------------- /chart/templates/patch_default_serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | name: patch-sa 6 | annotations: 7 | "helm.sh/hook": post-install, post-upgrade 8 | "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation 9 | spec: 10 | template: 11 | spec: 12 | serviceAccountName: cis-operator-serviceaccount 13 | nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} 14 | {{- if .Values.nodeSelector }} 15 | {{ toYaml .Values.nodeSelector | indent 8 }} 16 | {{- end }} 17 | tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} 18 | {{- if .Values.tolerations }} 19 | {{ toYaml .Values.tolerations | indent 8 }} 20 | {{- end }} 21 | restartPolicy: Never 22 | containers: 23 | - name: sa 24 | image: "{{ template "system_default_registry" . }}{{ .Values.global.kubectl.repository }}:{{ .Values.global.kubectl.tag }}" 25 | imagePullPolicy: {{ .Values.global.imagePullPolicy }} 26 | command: ["kubectl", "patch", "serviceaccount", "default", "-p", "{\"automountServiceAccountToken\": false}"] 27 | args: ["-n", {{ template "cis.namespace" . }}] 28 | 29 | backoffLimit: 1 30 | -------------------------------------------------------------------------------- /chart/templates/rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: rancher-cis-benchmark 6 | app.kubernetes.io/instance: release-name 7 | name: cis-operator-clusterrole 8 | rules: 9 | - apiGroups: 10 | - "cis.cattle.io" 11 | resources: 12 | - "*" 13 | verbs: 14 | - "*" 15 | - apiGroups: 16 | - "" 17 | resources: 18 | - "pods" 19 | - "services" 20 | - "configmaps" 21 | - "nodes" 22 | - "serviceaccounts" 23 | verbs: 24 | - "get" 25 | - "list" 26 | - "create" 27 | - "update" 28 | - "watch" 29 | - "patch" 30 | - apiGroups: 31 | - "rbac.authorization.k8s.io" 32 | resources: 33 | - "rolebindings" 34 | - "clusterrolebindings" 35 | - "clusterroles" 36 | - "roles" 37 | verbs: 38 | - "get" 39 | - "list" 40 | - apiGroups: 41 | - "batch" 42 | resources: 43 | - "jobs" 44 | verbs: 45 | - "list" 46 | - "create" 47 | - "patch" 48 | - "update" 49 | - "watch" 50 | --- 51 | apiVersion: rbac.authorization.k8s.io/v1 52 | kind: ClusterRole 53 | metadata: 54 | labels: 55 | app.kubernetes.io/name: rancher-cis-benchmark 56 | app.kubernetes.io/instance: release-name 57 | name: cis-scan-ns 58 | rules: 59 | - apiGroups: 60 | - "" 61 | resources: 62 | - "namespaces" 63 | - "nodes" 64 | - "pods" 65 | - "serviceaccounts" 66 | - "services" 67 | - "replicationcontrollers" 68 | verbs: 69 | - "get" 70 | - "list" 71 | - "watch" 72 | - apiGroups: 73 | - "rbac.authorization.k8s.io" 74 | resources: 75 | - "rolebindings" 76 | - "clusterrolebindings" 77 | - "clusterroles" 78 | - "roles" 79 | verbs: 80 | - "get" 81 | - "list" 82 | - apiGroups: 83 | - "batch" 84 | resources: 85 | - "jobs" 86 | - "cronjobs" 87 | verbs: 88 | - "list" 89 | - apiGroups: 90 | - "apps" 91 | resources: 92 | - "daemonsets" 93 | - "deployments" 94 | - "replicasets" 95 | - "statefulsets" 96 | verbs: 97 | - "list" 98 | - apiGroups: 99 | - "autoscaling" 100 | resources: 101 | - "horizontalpodautoscalers" 102 | verbs: 103 | - "list" 104 | - apiGroups: 105 | - "networking.k8s.io" 106 | resources: 107 | - "networkpolicies" 108 | verbs: 109 | - "get" 110 | - "list" 111 | - "watch" 112 | --- 113 | apiVersion: rbac.authorization.k8s.io/v1 114 | kind: Role 115 | metadata: 116 | name: cis-operator-role 117 | labels: 118 | app.kubernetes.io/name: rancher-cis-benchmark 119 | app.kubernetes.io/instance: release-name 120 | namespace: {{ template "cis.namespace" . }} 121 | rules: 122 | - apiGroups: 123 | - "" 124 | resources: 125 | - "services" 126 | verbs: 127 | - "watch" 128 | - "list" 129 | - "get" 130 | - "patch" 131 | - apiGroups: 132 | - "batch" 133 | resources: 134 | - "jobs" 135 | verbs: 136 | - "watch" 137 | - "list" 138 | - "get" 139 | - "delete" 140 | - apiGroups: 141 | - "" 142 | resources: 143 | - "configmaps" 144 | - "pods" 145 | - "secrets" 146 | verbs: 147 | - "*" 148 | - apiGroups: 149 | - "apps" 150 | resources: 151 | - "daemonsets" 152 | verbs: 153 | - "*" 154 | - apiGroups: 155 | - monitoring.coreos.com 156 | resources: 157 | - prometheusrules 158 | verbs: 159 | - create 160 | --- 161 | apiVersion: rbac.authorization.k8s.io/v1 162 | kind: ClusterRoleBinding 163 | metadata: 164 | labels: 165 | app.kubernetes.io/name: rancher-cis-benchmark 166 | app.kubernetes.io/instance: release-name 167 | name: cis-operator-clusterrolebinding 168 | roleRef: 169 | apiGroup: rbac.authorization.k8s.io 170 | kind: ClusterRole 171 | name: cis-operator-clusterrole 172 | subjects: 173 | - kind: ServiceAccount 174 | name: cis-operator-serviceaccount 175 | namespace: {{ template "cis.namespace" . }} 176 | --- 177 | apiVersion: rbac.authorization.k8s.io/v1 178 | kind: ClusterRoleBinding 179 | metadata: 180 | name: cis-scan-ns 181 | labels: 182 | app.kubernetes.io/name: rancher-cis-benchmark 183 | app.kubernetes.io/instance: release-name 184 | roleRef: 185 | apiGroup: rbac.authorization.k8s.io 186 | kind: ClusterRole 187 | name: cis-scan-ns 188 | subjects: 189 | - kind: ServiceAccount 190 | name: cis-serviceaccount 191 | namespace: {{ template "cis.namespace" . }} 192 | --- 193 | apiVersion: rbac.authorization.k8s.io/v1 194 | kind: RoleBinding 195 | metadata: 196 | labels: 197 | app.kubernetes.io/name: rancher-cis-benchmark 198 | app.kubernetes.io/instance: release-name 199 | name: cis-operator-rolebinding 200 | namespace: {{ template "cis.namespace" . }} 201 | roleRef: 202 | apiGroup: rbac.authorization.k8s.io 203 | kind: Role 204 | name: cis-operator-role 205 | subjects: 206 | - kind: ServiceAccount 207 | name: cis-serviceaccount 208 | namespace: {{ template "cis.namespace" . }} 209 | - kind: ServiceAccount 210 | name: cis-operator-serviceaccount 211 | namespace: {{ template "cis.namespace" . }} 212 | -------------------------------------------------------------------------------- /chart/templates/scanprofile-cis-1.8.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanProfile 4 | metadata: 5 | name: cis-1.8-profile 6 | annotations: 7 | clusterscanprofile.cis.cattle.io/builtin: "true" 8 | spec: 9 | benchmarkVersion: cis-1.8 10 | -------------------------------------------------------------------------------- /chart/templates/scanprofile-cis-1.9.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanProfile 4 | metadata: 5 | name: cis-1.9-profile 6 | annotations: 7 | clusterscanprofile.cis.cattle.io/builtin: "true" 8 | spec: 9 | benchmarkVersion: cis-1.9 10 | -------------------------------------------------------------------------------- /chart/templates/scanprofile-k3s-cis-1.8-hardened.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanProfile 4 | metadata: 5 | name: k3s-cis-1.8-profile-hardened 6 | annotations: 7 | clusterscanprofile.cis.cattle.io/builtin: "true" 8 | spec: 9 | benchmarkVersion: k3s-cis-1.8-hardened 10 | -------------------------------------------------------------------------------- /chart/templates/scanprofile-k3s-cis-1.8-permissive.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanProfile 4 | metadata: 5 | name: k3s-cis-1.8-profile-permissive 6 | annotations: 7 | clusterscanprofile.cis.cattle.io/builtin: "true" 8 | spec: 9 | benchmarkVersion: k3s-cis-1.8-permissive 10 | -------------------------------------------------------------------------------- /chart/templates/scanprofile-k3s-cis-1.9.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanProfile 4 | metadata: 5 | name: k3s-cis-1.9-profile 6 | annotations: 7 | clusterscanprofile.cis.cattle.io/builtin: "true" 8 | spec: 9 | benchmarkVersion: k3s-cis-1.9 10 | -------------------------------------------------------------------------------- /chart/templates/scanprofile-rke-1.8-hardened.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanProfile 4 | metadata: 5 | name: rke-profile-hardened-1.8 6 | annotations: 7 | clusterscanprofile.cis.cattle.io/builtin: "true" 8 | spec: 9 | benchmarkVersion: rke-cis-1.8-hardened 10 | -------------------------------------------------------------------------------- /chart/templates/scanprofile-rke-1.8-permissive.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanProfile 4 | metadata: 5 | name: rke-profile-permissive-1.8 6 | annotations: 7 | clusterscanprofile.cis.cattle.io/builtin: "true" 8 | spec: 9 | benchmarkVersion: rke-cis-1.8-permissive 10 | -------------------------------------------------------------------------------- /chart/templates/scanprofile-rke2-cis-1.8-hardened.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanProfile 4 | metadata: 5 | name: rke2-cis-1.8-profile-hardened 6 | annotations: 7 | clusterscanprofile.cis.cattle.io/builtin: "true" 8 | spec: 9 | benchmarkVersion: rke2-cis-1.8-hardened 10 | -------------------------------------------------------------------------------- /chart/templates/scanprofile-rke2-cis-1.8-permissive.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanProfile 4 | metadata: 5 | name: rke2-cis-1.8-profile-permissive 6 | annotations: 7 | clusterscanprofile.cis.cattle.io/builtin: "true" 8 | spec: 9 | benchmarkVersion: rke2-cis-1.8-permissive 10 | -------------------------------------------------------------------------------- /chart/templates/scanprofile-rke2-cis-1.9.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanProfile 4 | metadata: 5 | name: rke2-cis-1.9-profile 6 | annotations: 7 | clusterscanprofile.cis.cattle.io/builtin: "true" 8 | spec: 9 | benchmarkVersion: rke2-cis-1.9 10 | -------------------------------------------------------------------------------- /chart/templates/scanprofileaks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanProfile 4 | metadata: 5 | name: aks-profile 6 | annotations: 7 | clusterscanprofile.cis.cattle.io/builtin: "true" 8 | spec: 9 | benchmarkVersion: aks-1.0 10 | -------------------------------------------------------------------------------- /chart/templates/scanprofileeks-1.5.0.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanProfile 4 | metadata: 5 | name: eks-profile-1.5.0 6 | annotations: 7 | clusterscanprofile.cis.cattle.io/builtin: "true" 8 | spec: 9 | benchmarkVersion: eks-1.5.0 10 | -------------------------------------------------------------------------------- /chart/templates/scanprofileeks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanProfile 4 | metadata: 5 | name: eks-profile 6 | annotations: 7 | clusterscanprofile.cis.cattle.io/builtin: "true" 8 | spec: 9 | benchmarkVersion: eks-1.2.0 10 | -------------------------------------------------------------------------------- /chart/templates/scanprofilegke-1.6.0.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanProfile 4 | metadata: 5 | name: gke-profile-1.6.0 6 | annotations: 7 | clusterscanprofile.cis.cattle.io/builtin: "true" 8 | spec: 9 | benchmarkVersion: gke-1.6.0 10 | -------------------------------------------------------------------------------- /chart/templates/scanprofilegke.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cis.cattle.io/v1 3 | kind: ClusterScanProfile 4 | metadata: 5 | name: gke-profile 6 | annotations: 7 | clusterscanprofile.cis.cattle.io/builtin: "true" 8 | spec: 9 | benchmarkVersion: gke-1.2.0 10 | -------------------------------------------------------------------------------- /chart/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | namespace: {{ template "cis.namespace" . }} 5 | name: cis-operator-serviceaccount 6 | --- 7 | apiVersion: v1 8 | kind: ServiceAccount 9 | metadata: 10 | namespace: {{ template "cis.namespace" . }} 11 | labels: 12 | app.kubernetes.io/name: rancher-cis-benchmark 13 | app.kubernetes.io/instance: release-name 14 | name: cis-serviceaccount 15 | -------------------------------------------------------------------------------- /chart/templates/validate-install-crd.yaml: -------------------------------------------------------------------------------- 1 | #{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}} 2 | # {{- $found := dict -}} 3 | # {{- set $found "cis.cattle.io/v1/ClusterScan" false -}} 4 | # {{- set $found "cis.cattle.io/v1/ClusterScanBenchmark" false -}} 5 | # {{- set $found "cis.cattle.io/v1/ClusterScanProfile" false -}} 6 | # {{- set $found "cis.cattle.io/v1/ClusterScanReport" false -}} 7 | # {{- range .Capabilities.APIVersions -}} 8 | # {{- if hasKey $found (toString .) -}} 9 | # {{- set $found (toString .) true -}} 10 | # {{- end -}} 11 | # {{- end -}} 12 | # {{- range $_, $exists := $found -}} 13 | # {{- if (eq $exists false) -}} 14 | # {{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}} 15 | # {{- end -}} 16 | # {{- end -}} 17 | #{{- end -}} 18 | -------------------------------------------------------------------------------- /chart/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for rancher-cis-benchmark. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | image: 6 | cisoperator: 7 | repository: rancher/cis-operator 8 | tag: v1.4.0-rc.1 9 | securityScan: 10 | repository: rancher/security-scan 11 | tag: v0.6.0-rc.2 12 | sonobuoy: 13 | repository: rancher/mirrored-sonobuoy-sonobuoy 14 | tag: v0.57.3 15 | 16 | resources: {} 17 | # We usually recommend not to specify default resources and to leave this as a conscious 18 | # choice for the user. This also increases chances charts run on environments with little 19 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 20 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 21 | # limits: 22 | # cpu: 100m 23 | # memory: 128Mi 24 | # requests: 25 | # cpu: 100m 26 | # memory: 128Mi 27 | 28 | ## Node labels for pod assignment 29 | ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ 30 | ## 31 | nodeSelector: {} 32 | 33 | ## List of node taints to tolerate (requires Kubernetes >= 1.6) 34 | tolerations: [] 35 | 36 | securityScanJob: 37 | overrideTolerations: false 38 | tolerations: [] 39 | 40 | affinity: {} 41 | 42 | global: 43 | cattle: 44 | systemDefaultRegistry: "" 45 | clusterName: "" 46 | kubectl: 47 | repository: rancher/kubectl 48 | tag: v1.31.9 49 | 50 | alerts: 51 | enabled: false 52 | severity: warning 53 | metricsPort: 8080 54 | -------------------------------------------------------------------------------- /crds/clusterscan.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: clusterscans.cis.cattle.io 5 | spec: 6 | group: cis.cattle.io 7 | names: 8 | kind: ClusterScan 9 | plural: clusterscans 10 | singular: clusterscan 11 | scope: Cluster 12 | versions: 13 | - additionalPrinterColumns: 14 | - jsonPath: .status.lastRunScanProfileName 15 | name: ClusterScanProfile 16 | type: string 17 | - jsonPath: .status.summary.total 18 | name: Total 19 | type: string 20 | - jsonPath: .status.summary.pass 21 | name: Pass 22 | type: string 23 | - jsonPath: .status.summary.fail 24 | name: Fail 25 | type: string 26 | - jsonPath: .status.summary.skip 27 | name: Skip 28 | type: string 29 | - jsonPath: .status.summary.warn 30 | name: Warn 31 | type: string 32 | - jsonPath: .status.summary.notApplicable 33 | name: Not Applicable 34 | type: string 35 | - jsonPath: .status.lastRunTimestamp 36 | name: LastRunTimestamp 37 | type: string 38 | - jsonPath: .spec.scheduledScanConfig.cronSchedule 39 | name: CronSchedule 40 | type: string 41 | name: v1 42 | schema: 43 | openAPIV3Schema: 44 | properties: 45 | spec: 46 | properties: 47 | scanProfileName: 48 | nullable: true 49 | type: string 50 | scheduledScanConfig: 51 | nullable: true 52 | properties: 53 | cronSchedule: 54 | nullable: true 55 | type: string 56 | retentionCount: 57 | type: integer 58 | scanAlertRule: 59 | nullable: true 60 | properties: 61 | alertOnComplete: 62 | type: boolean 63 | alertOnFailure: 64 | type: boolean 65 | type: object 66 | type: object 67 | scoreWarning: 68 | enum: 69 | - pass 70 | - fail 71 | nullable: true 72 | type: string 73 | type: object 74 | status: 75 | properties: 76 | NextScanAt: 77 | nullable: true 78 | type: string 79 | ScanAlertingRuleName: 80 | nullable: true 81 | type: string 82 | conditions: 83 | items: 84 | properties: 85 | lastTransitionTime: 86 | nullable: true 87 | type: string 88 | lastUpdateTime: 89 | nullable: true 90 | type: string 91 | message: 92 | nullable: true 93 | type: string 94 | reason: 95 | nullable: true 96 | type: string 97 | status: 98 | nullable: true 99 | type: string 100 | type: 101 | nullable: true 102 | type: string 103 | type: object 104 | nullable: true 105 | type: array 106 | display: 107 | nullable: true 108 | properties: 109 | error: 110 | type: boolean 111 | message: 112 | nullable: true 113 | type: string 114 | state: 115 | nullable: true 116 | type: string 117 | transitioning: 118 | type: boolean 119 | type: object 120 | lastRunScanProfileName: 121 | nullable: true 122 | type: string 123 | lastRunTimestamp: 124 | nullable: true 125 | type: string 126 | observedGeneration: 127 | type: integer 128 | summary: 129 | nullable: true 130 | properties: 131 | fail: 132 | type: integer 133 | notApplicable: 134 | type: integer 135 | pass: 136 | type: integer 137 | skip: 138 | type: integer 139 | total: 140 | type: integer 141 | warn: 142 | type: integer 143 | type: object 144 | type: object 145 | type: object 146 | served: true 147 | storage: true 148 | subresources: 149 | status: {} 150 | -------------------------------------------------------------------------------- /crds/clusterscanbenchmark.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: clusterscanbenchmarks.cis.cattle.io 5 | spec: 6 | group: cis.cattle.io 7 | names: 8 | kind: ClusterScanBenchmark 9 | plural: clusterscanbenchmarks 10 | singular: clusterscanbenchmark 11 | scope: Cluster 12 | versions: 13 | - additionalPrinterColumns: 14 | - jsonPath: .spec.clusterProvider 15 | name: ClusterProvider 16 | type: string 17 | - jsonPath: .spec.minKubernetesVersion 18 | name: MinKubernetesVersion 19 | type: string 20 | - jsonPath: .spec.maxKubernetesVersion 21 | name: MaxKubernetesVersion 22 | type: string 23 | - jsonPath: .spec.customBenchmarkConfigMapName 24 | name: customBenchmarkConfigMapName 25 | type: string 26 | - jsonPath: .spec.customBenchmarkConfigMapNamespace 27 | name: customBenchmarkConfigMapNamespace 28 | type: string 29 | name: v1 30 | schema: 31 | openAPIV3Schema: 32 | properties: 33 | spec: 34 | properties: 35 | clusterProvider: 36 | nullable: true 37 | type: string 38 | customBenchmarkConfigMapName: 39 | nullable: true 40 | type: string 41 | customBenchmarkConfigMapNamespace: 42 | nullable: true 43 | type: string 44 | maxKubernetesVersion: 45 | nullable: true 46 | type: string 47 | minKubernetesVersion: 48 | nullable: true 49 | type: string 50 | type: object 51 | type: object 52 | served: true 53 | storage: true 54 | subresources: 55 | status: {} 56 | -------------------------------------------------------------------------------- /crds/clusterscanprofile.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: clusterscanprofiles.cis.cattle.io 5 | spec: 6 | group: cis.cattle.io 7 | names: 8 | kind: ClusterScanProfile 9 | plural: clusterscanprofiles 10 | singular: clusterscanprofile 11 | scope: Cluster 12 | versions: 13 | - additionalPrinterColumns: 14 | - jsonPath: .spec.benchmarkVersion 15 | name: BenchmarkVersion 16 | type: string 17 | name: v1 18 | schema: 19 | openAPIV3Schema: 20 | properties: 21 | spec: 22 | properties: 23 | benchmarkVersion: 24 | nullable: true 25 | type: string 26 | skipTests: 27 | items: 28 | nullable: true 29 | type: string 30 | nullable: true 31 | type: array 32 | type: object 33 | type: object 34 | served: true 35 | storage: true 36 | subresources: 37 | status: {} 38 | -------------------------------------------------------------------------------- /crds/clusterscanreport.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: clusterscanreports.cis.cattle.io 5 | spec: 6 | group: cis.cattle.io 7 | names: 8 | kind: ClusterScanReport 9 | plural: clusterscanreports 10 | singular: clusterscanreport 11 | scope: Cluster 12 | versions: 13 | - additionalPrinterColumns: 14 | - jsonPath: .spec.lastRunTimestamp 15 | name: LastRunTimestamp 16 | type: string 17 | - jsonPath: .spec.benchmarkVersion 18 | name: BenchmarkVersion 19 | type: string 20 | name: v1 21 | schema: 22 | openAPIV3Schema: 23 | properties: 24 | spec: 25 | properties: 26 | benchmarkVersion: 27 | nullable: true 28 | type: string 29 | lastRunTimestamp: 30 | nullable: true 31 | type: string 32 | reportJSON: 33 | nullable: true 34 | type: string 35 | type: object 36 | type: object 37 | served: true 38 | storage: true 39 | subresources: 40 | status: {} 41 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/rancher/cis-operator 2 | 3 | go 1.23.6 4 | 5 | require ( 6 | github.com/blang/semver v3.5.1+incompatible 7 | github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.81.0 8 | github.com/prometheus-operator/prometheus-operator/pkg/client v0.81.0 9 | github.com/prometheus/client_golang v1.22.0 10 | github.com/rancher/kubernetes-provider-detector v0.1.5 11 | github.com/rancher/lasso v0.2.1 12 | github.com/rancher/security-scan v0.6.1 13 | github.com/rancher/wrangler/v3 v3.2.0 14 | github.com/robfig/cron v1.2.0 15 | github.com/sirupsen/logrus v1.9.3 16 | github.com/urfave/cli/v2 v2.27.6 17 | golang.org/x/crypto/x509roots/fallback v0.0.0-20250531095911-4f9f0ca9fcfb 18 | k8s.io/api v0.32.5 19 | k8s.io/apiextensions-apiserver v0.32.5 20 | k8s.io/apimachinery v0.32.5 21 | k8s.io/client-go v0.32.5 22 | ) 23 | 24 | require ( 25 | github.com/aquasecurity/kube-bench v0.10.4 // indirect 26 | github.com/aws/aws-sdk-go-v2 v1.36.3 // indirect 27 | github.com/aws/aws-sdk-go-v2/service/securityhub v1.57.0 // indirect 28 | github.com/aws/smithy-go v1.22.2 // indirect 29 | github.com/beorn7/perks v1.0.1 // indirect 30 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 31 | github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect 32 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 33 | github.com/emicklei/go-restful/v3 v3.12.1 // indirect 34 | github.com/evanphx/json-patch v5.9.11+incompatible // indirect 35 | github.com/fsnotify/fsnotify v1.8.0 // indirect 36 | github.com/fxamacker/cbor/v2 v2.7.0 // indirect 37 | github.com/ghodss/yaml v1.0.0 // indirect 38 | github.com/go-logr/logr v1.4.2 // indirect 39 | github.com/go-openapi/jsonpointer v0.21.0 // indirect 40 | github.com/go-openapi/jsonreference v0.21.0 // indirect 41 | github.com/go-openapi/swag v0.23.0 // indirect 42 | github.com/go-viper/mapstructure/v2 v2.2.1 // indirect 43 | github.com/gogo/protobuf v1.3.2 // indirect 44 | github.com/golang/glog v1.2.4 // indirect 45 | github.com/golang/protobuf v1.5.4 // indirect 46 | github.com/google/gnostic-models v0.6.9 // indirect 47 | github.com/google/go-cmp v0.7.0 // indirect 48 | github.com/google/gofuzz v1.2.0 // indirect 49 | github.com/google/uuid v1.6.0 // indirect 50 | github.com/josharian/intern v1.0.0 // indirect 51 | github.com/json-iterator/go v1.1.12 // indirect 52 | github.com/mailru/easyjson v0.9.0 // indirect 53 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 54 | github.com/modern-go/reflect2 v1.0.2 // indirect 55 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 56 | github.com/onsi/ginkgo v1.16.5 // indirect 57 | github.com/pelletier/go-toml/v2 v2.2.3 // indirect 58 | github.com/pkg/errors v0.9.1 // indirect 59 | github.com/prometheus/client_model v0.6.1 // indirect 60 | github.com/prometheus/common v0.62.0 // indirect 61 | github.com/prometheus/procfs v0.15.1 // indirect 62 | github.com/russross/blackfriday/v2 v2.1.0 // indirect 63 | github.com/sagikazarmark/locafero v0.7.0 // indirect 64 | github.com/sourcegraph/conc v0.3.0 // indirect 65 | github.com/spf13/afero v1.12.0 // indirect 66 | github.com/spf13/cast v1.7.1 // indirect 67 | github.com/spf13/pflag v1.0.6 // indirect 68 | github.com/spf13/viper v1.20.1 // indirect 69 | github.com/subosito/gotenv v1.6.0 // indirect 70 | github.com/x448/float16 v0.8.4 // indirect 71 | github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect 72 | go.uber.org/multierr v1.11.0 // indirect 73 | golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f // indirect 74 | golang.org/x/mod v0.23.0 // indirect 75 | golang.org/x/net v0.38.0 // indirect 76 | golang.org/x/oauth2 v0.27.0 // indirect 77 | golang.org/x/sync v0.12.0 // indirect 78 | golang.org/x/sys v0.31.0 // indirect 79 | golang.org/x/term v0.30.0 // indirect 80 | golang.org/x/text v0.23.0 // indirect 81 | golang.org/x/time v0.9.0 // indirect 82 | golang.org/x/tools v0.30.0 // indirect 83 | google.golang.org/protobuf v1.36.5 // indirect 84 | gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect 85 | gopkg.in/inf.v0 v0.9.1 // indirect 86 | gopkg.in/yaml.v2 v2.4.0 // indirect 87 | gopkg.in/yaml.v3 v3.0.1 // indirect 88 | k8s.io/code-generator v0.32.5 // indirect 89 | k8s.io/gengo v0.0.0-20250130153323-76c5745d3511 // indirect 90 | k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect 91 | k8s.io/klog/v2 v2.130.1 // indirect 92 | k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect 93 | k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect 94 | sigs.k8s.io/controller-runtime v0.20.3 // indirect 95 | sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect 96 | sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect 97 | sigs.k8s.io/yaml v1.4.0 // indirect 98 | ) 99 | -------------------------------------------------------------------------------- /hack/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright YEAR Rancher Labs, Inc. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | -------------------------------------------------------------------------------- /hack/e2e: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eox pipefail 3 | 4 | # If specific binaries not passed on, falls back to default which would need 5 | # to exist within PATH. 6 | export RUNNER="${RUNNER:-docker}" 7 | export KUBECTL="${KUBECTL:-kubectl}" 8 | export HELM="${HELM:-helm}" 9 | export K3D="${K3D:-k3d}" 10 | 11 | export ARCH="${ARCH:-amd64}" 12 | export IMAGE="${IMAGE}" 13 | 14 | export SECURITY_SCAN_IMAGE="$(yq .image.securityScan.repository < ./chart/values.yaml):$(yq .image.securityScan.tag < ./chart/values.yaml)" 15 | export SONOBUOY_IMAGE="$(yq .image.sonobuoy.repository < ./chart/values.yaml):$(yq .image.sonobuoy.tag < ./chart/values.yaml)" 16 | 17 | CLUSTER_NAME="cis-op-e2e-${RANDOM}" 18 | E2E_TIMEOUT_SECONDS=200 19 | 20 | CANCELLING="" 21 | 22 | function cleanup() { 23 | CANCELLING="true" 24 | echo "Cleaning up cluster..." 25 | ${K3D} cluster rm "${CLUSTER_NAME}" 26 | } 27 | trap cleanup EXIT 28 | 29 | function pull_image() { 30 | EXTERNAL_IMAGE=$1 31 | echo "> Pull and import ${EXTERNAL_IMAGE} into cluster" 32 | ${RUNNER} pull "${EXTERNAL_IMAGE}" 33 | ${K3D} image import "${EXTERNAL_IMAGE}" -c "${CLUSTER_NAME}" 34 | } 35 | 36 | function dump_logs() { 37 | ${KUBECTL} get pods -n cis-operator-system --show-labels 38 | echo "RUNNER LOGS:" 39 | ${KUBECTL} logs -n cis-operator-system -l app.kubernetes.io/instance=security-scan-runner-k3s-e2e-scan || true 40 | echo "SONOBUOY LOGS (rancher-kube-bench):" 41 | ${KUBECTL} logs -n cis-operator-system -l component=sonobuoy -c rancher-kube-bench || true 42 | echo "SONOBUOY LOGS (sonobuoy-worker):" 43 | ${KUBECTL} logs -n cis-operator-system -l component=sonobuoy -c sonobuoy-worker || true 44 | } 45 | 46 | echo "Running E2E tests" 47 | sleep "${E2E_TIMEOUT_SECONDS}" && cleanup | false & 48 | 49 | ${RUNNER} build -t local-k3s -f tests/Dockerfile.k3s tests 50 | 51 | echo "> Spinning up k3d cluster" 52 | # After a few executions k3d can have problems with evictions: 53 | # https://k3d.io/v5.0.1/faq/faq/#pods-evicted-due-to-lack-of-disk-space 54 | ${K3D} cluster create "${CLUSTER_NAME}" --no-lb --kubeconfig-update-default --image local-k3s \ 55 | --k3s-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%@server:0' \ 56 | --k3s-arg '--kubelet-arg=eviction-minimum-reclaim=imagefs.available=1%,nodefs.available=1%@server:0' 57 | 58 | # Load built image into k3d. 59 | echo "> Import ${IMAGE} into cluster" 60 | ${K3D} image import "${IMAGE}" -c "${CLUSTER_NAME}" 61 | 62 | pull_image "${SECURITY_SCAN_IMAGE}" 63 | pull_image "${SONOBUOY_IMAGE}" 64 | 65 | # cis-operator may have intermittent issues if key components 66 | # from the cluster aren't ready. 67 | echo "> Wait for k3d base components to be ready" 68 | ${KUBECTL} wait node "k3d-${CLUSTER_NAME}-server-0" --for=condition=ready --timeout=45s 69 | ${KUBECTL} wait --timeout=60s --for=condition=ready -n kube-system pod -l app=local-path-provisioner 70 | ${KUBECTL} wait --timeout=60s --for=condition=ready -n kube-system pod -l k8s-app=kube-dns 71 | 72 | echo "> Deploying cis-operator" 73 | ${KUBECTL} apply -f ./crds 74 | 75 | ${HELM} install --create-namespace --namespace cis-operator-system \ 76 | --set "image.cisoperator.repository=${IMAGE%%:*}" \ 77 | --set "image.cisoperator.tag=${IMAGE#*:}" \ 78 | cis-benchmark ./chart 79 | 80 | echo "> Wait for cis-operator to be ready" 81 | # Can't kubectl wait before the deployment schedules the pod, so 82 | # wait 10 seconds for that to happen first. 83 | sleep 10 84 | ${KUBECTL} wait --for=condition=ready -n cis-operator-system pod -l cis.cattle.io/operator=cis-operator --timeout=30s 85 | 86 | echo "> Create ClusterScan" 87 | ${KUBECTL} apply -f tests/k3s-bench-test.yaml 88 | 89 | ${RUNNER} exec "k3d-${CLUSTER_NAME}-server-0" /usr/local/bin/kube-apiserver & 90 | 91 | # Keep trying to check if the ClusterScan had any tests that passed. 92 | # This is a good indication that all the mechanics of cis-operator 93 | # are working as expected. 94 | # 95 | # As soon as passing tests are detected, exit the e2e. If none is found, 96 | # the tests will eventually timeout based on E2E_TIMEOUT_SECONDS. 97 | while (true) 98 | do 99 | if [ -n "${CANCELLING}" ]; then 100 | break 101 | fi 102 | 103 | json=$(${KUBECTL} get ClusterScan k3s-e2e-scan -o jsonpath='{.status.summary}') 104 | if [ -n "${json}" ]; then 105 | passed=$(echo "${json}" | jq '.pass') 106 | total=$(echo "${json}" | jq '.total') 107 | fail=$(echo "${json}" | jq '.fail') 108 | 109 | if [ "${passed}" -gt "0" ]; then 110 | echo "> cis-operator worked successfully" 111 | 112 | ${KUBECTL} get ClusterScan -o yaml 113 | ${KUBECTL} get ClusterScanReport -o yaml -A || true 114 | 115 | # Compare expected vs actual results 116 | diff <(jq -S . tests/k3d-expected.json) <(echo "${json}" | jq -S .) || true 117 | 118 | exit 0 119 | fi 120 | 121 | if [ "${total}" == "${fail}" ]; then 122 | echo "ERR: ALL TESTS FAILED!" 123 | exit 1 124 | fi 125 | fi 126 | 127 | dump_logs 128 | sleep 2 129 | done 130 | -------------------------------------------------------------------------------- /hack/make/build.mk: -------------------------------------------------------------------------------- 1 | ifeq ($(VERSION),) 2 | # Define VERSION, which is used for image tags or to bake it into the 3 | # compiled binary to enable the printing of the application version, 4 | # via the --version flag. 5 | CHANGES = $(shell git status --porcelain --untracked-files=no) 6 | ifneq ($(CHANGES),) 7 | DIRTY = -dirty 8 | endif 9 | 10 | # Prioritise DRONE_TAG for backwards compatibility. However, the git tag 11 | # command should be able to gather the current tag, except when the git 12 | # clone operation was done with "--no-tags". 13 | ifneq ($(DRONE_TAG),) 14 | GIT_TAG = $(DRONE_TAG) 15 | else 16 | GIT_TAG = $(shell git tag -l --contains HEAD | head -n 1) 17 | endif 18 | 19 | COMMIT = $(shell git rev-parse --short HEAD) 20 | VERSION = $(COMMIT)$(DIRTY) 21 | 22 | # Override VERSION with the Git tag if the current HEAD has a tag pointing to 23 | # it AND the worktree isn't dirty. 24 | ifneq ($(GIT_TAG),) 25 | ifeq ($(DIRTY),) 26 | VERSION = $(GIT_TAG) 27 | endif 28 | endif 29 | endif 30 | 31 | RUNNER := docker 32 | IMAGE_BUILDER := $(RUNNER) buildx 33 | MACHINE := rancher 34 | BUILDX_ARGS ?= --sbom=true --attest type=provenance,mode=max 35 | 36 | ifeq ($(TAG),) 37 | TAG = $(VERSION) 38 | ifneq ($(DIRTY),) 39 | TAG = dev 40 | endif 41 | endif 42 | 43 | GO := go 44 | 45 | # Leans on Pure Go for the network stack and os/user. For more information: 46 | # - https://github.com/golang/go/blob/4cd201b14b6216e72ffa175747c20d1191e5eb57/src/net/net.go#L39-L81 47 | # - https://github.com/golang/go/blob/4cd201b14b6216e72ffa175747c20d1191e5eb57/src/os/user/user.go#L6-L17 48 | GO_TAGS := netgo osusergo 49 | LINKFLAGS := -X github.com/rancher/cis-operator.Version=$(VERSION) \ 50 | -X github.com/rancher/cis-operator.GitCommit=$(COMMIT) 51 | 52 | # Statically link the binary, unless when building in Darwin. 53 | ifneq ($(shell uname -s), Darwin) 54 | LINKFLAGS := $(LINKFLAGS) -extldflags -static -w -s 55 | endif 56 | 57 | # Define the target platforms that can be used across the ecosystem. 58 | # Note that what would actually be used for a given project will be 59 | # defined in TARGET_PLATFORMS, and must be a subset of the below: 60 | DEFAULT_PLATFORMS := linux/amd64,linux/arm64,linux/x390s,linux/riscv64 61 | 62 | .PHONY: help 63 | help: ## display Makefile's help. 64 | @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) 65 | 66 | buildx-machine: ## create rancher dockerbuildx machine targeting platform defined by DEFAULT_PLATFORMS. 67 | @docker buildx ls | grep $(MACHINE) || \ 68 | docker buildx create --name=$(MACHINE) --platform=$(DEFAULT_PLATFORMS) 69 | -------------------------------------------------------------------------------- /hack/make/deps.mk: -------------------------------------------------------------------------------- 1 | # renovate: datasource=github-release-attachments depName=golangci/golangci-lint 2 | GOLANGCI_VERSION = v2.1.6 3 | # renovate: datasource=github-release-attachments depName=k3d-io/k3d 4 | K3D_VERSION = v5.8.3 5 | 6 | KUBECTL_VERSION = 1.31.9 7 | # renovate: datasource=github-release-attachments depName=helm/helm 8 | HELM_VERSION = v3.18.2 9 | -------------------------------------------------------------------------------- /hack/make/tools.mk: -------------------------------------------------------------------------------- 1 | TOOLS_BIN := $(shell mkdir -p build/tools && realpath build/tools) 2 | OS_NAME = $(shell uname -s | tr A-Z a-z) 3 | OS_ARCH = $(shell uname -m) 4 | 5 | ifeq ($(OS_ARCH),x86_64) 6 | OS_ARCH = amd64 7 | endif 8 | ifeq ($(OS_ARCH),aarch64) 9 | OS_ARCH = arm64 10 | endif 11 | 12 | K3D = $(TOOLS_BIN)/k3d-$(K3D_VERSION) 13 | $(K3D): 14 | rm -f $(TOOLS_BIN)/k3d* 15 | curl -s https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | \ 16 | PATH=$(PATH):$(TOOLS_BIN) K3D_INSTALL_DIR="$(TOOLS_BIN)" TAG="$(K3D_VERSION)" USE_SUDO=false bash 17 | mv $(TOOLS_BIN)/k3d $(TOOLS_BIN)/k3d-$(K3D_VERSION) 18 | 19 | GOLANGCI = $(TOOLS_BIN)/golangci-lint-$(GOLANGCI_VERSION) 20 | $(GOLANGCI): 21 | rm -f $(TOOLS_BIN)/golangci-lint* 22 | curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(TOOLS_BIN) $(GOLANGCI_VERSION) 23 | mv $(TOOLS_BIN)/golangci-lint $(TOOLS_BIN)/golangci-lint-$(GOLANGCI_VERSION) 24 | 25 | KUBECTL = $(TOOLS_BIN)/kubectl-$(KUBECTL_VERSION) 26 | $(KUBECTL): 27 | rm -f $(TOOLS_BIN)/kubectl* 28 | curl --output $(KUBECTL) -sSfL "https://dl.k8s.io/release/v$(KUBECTL_VERSION)/bin/$(OS_NAME)/$(OS_ARCH)/kubectl" 29 | echo "$(shell curl -L "https://dl.k8s.io/release/v$(KUBECTL_VERSION)/bin/$(OS_NAME)/$(OS_ARCH)/kubectl.sha256") $(KUBECTL)" | shasum -a 256 -c - 30 | chmod u+x $(KUBECTL) 31 | 32 | HELM = $(TOOLS_BIN)/helm-$(HELM_VERSION) 33 | $(HELM): 34 | rm -rf $(TOOLS_BIN)/helm* 35 | mkdir -p $(TOOLS_BIN)/tmp-helm 36 | curl --output $(TOOLS_BIN)/helm-$(HELM_VERSION)-$(OS_NAME)-$(OS_ARCH).tar.gz -sSfL "https://get.helm.sh/helm-$(HELM_VERSION)-$(OS_NAME)-$(OS_ARCH).tar.gz" 37 | $(call indirect-value,HELM_SUM) 38 | cd $(TOOLS_BIN) && echo "$(shell curl -L "https://get.helm.sh/helm-$(HELM_VERSION)-$(OS_NAME)-$(OS_ARCH).tar.gz.sha256sum")" | shasum -a 256 -c - 39 | tar -xf $(TOOLS_BIN)/helm-$(HELM_VERSION)-$(OS_NAME)-$(OS_ARCH).tar.gz --strip-components 1 -C $(TOOLS_BIN)/tmp-helm 40 | mv $(TOOLS_BIN)/tmp-helm/helm $(HELM) 41 | chmod u+x $(HELM) 42 | rm -rf $(TOOLS_BIN)/helm-$(HELM_VERSION)-$(OS_NAME)-$(OS_ARCH).tar.gz $(TOOLS_BIN)/tmp-helm 43 | 44 | # go-install-tool will 'go install' any package $2 and install it as $1. 45 | define go-install-tool 46 | @[ -f $(1) ] || { \ 47 | set -e ;\ 48 | echo "Downloading $(2)" ;\ 49 | GOBIN=$(TOOLS_BIN) go install $(2) ;\ 50 | } 51 | endef 52 | -------------------------------------------------------------------------------- /hack/upload-gh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eo pipefail 3 | 4 | BASE_DIR="build/bin" 5 | 6 | function usage(){ 7 | echo "$0 " 8 | exit 1 9 | } 10 | 11 | function check_input(){ 12 | [ -z "${TAG}" ] && echo "TAG is not set." && exit 1 13 | return 0 14 | } 15 | 16 | function checksum_file(){ 17 | arch=$1 18 | 19 | if [[ ! -f "${BASE_DIR}/cis-operator-${arch}" ]]; then 20 | echo "file ${BASE_DIR}/cis-operator-${arch} not found" 21 | exit 1 22 | fi 23 | 24 | sha256sum "${BASE_DIR}/cis-operator-${arch}" | sed "s;${BASE_DIR}/;;g" \ 25 | > "${BASE_DIR}/sha256sum-${arch}.txt" 26 | } 27 | 28 | function upload_files(){ 29 | arch=$1 30 | gh release upload "${TAG}" "${BASE_DIR}/cis-operator-${arch}" 31 | gh release upload "${TAG}" "${BASE_DIR}/sha256sum-${arch}.txt" 32 | } 33 | 34 | function main() 35 | { 36 | check_input 37 | checksum_file "$1" 38 | upload_files "$1" 39 | } 40 | 41 | [[ -z "$1" ]] && usage 42 | 43 | main "$1" 44 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | //go:generate go run pkg/codegen/cleanup/main.go 2 | //go:generate /bin/rm -rf pkg/generated 3 | //go:generate go run pkg/codegen/main.go 4 | 5 | package main 6 | 7 | import ( 8 | "context" 9 | "encoding/json" 10 | "errors" 11 | "fmt" 12 | "os" 13 | "time" 14 | 15 | "github.com/rancher/wrangler/v3/pkg/kubeconfig" 16 | "github.com/rancher/wrangler/v3/pkg/signals" 17 | "github.com/sirupsen/logrus" 18 | "github.com/urfave/cli/v2" 19 | 20 | "log" 21 | "net/http" 22 | 23 | "github.com/prometheus/client_golang/prometheus/promhttp" 24 | 25 | cisoperatorapiv1 "github.com/rancher/cis-operator/pkg/apis/cis.cattle.io/v1" 26 | cisoperator "github.com/rancher/cis-operator/pkg/securityscan" 27 | 28 | // Automatically sets fallback trusted x509 roots, in case they are 29 | // not available at runtime. This is required to establish trust 30 | // when deployed into a scratch container. 31 | _ "golang.org/x/crypto/x509roots/fallback" 32 | 33 | // Embed a copy of the timezone database, so that it does not depend 34 | // on it being available at runtime. 35 | _ "time/tzdata" 36 | 37 | corev1 "k8s.io/api/core/v1" 38 | ) 39 | 40 | var ( 41 | Version = "v0.0.0-dev" 42 | GitCommit = "HEAD" 43 | kubeConfig string 44 | threads int 45 | name string 46 | metricsPort string 47 | alertSeverity string 48 | debug bool 49 | securityScanImage string 50 | securityScanImageTag string 51 | sonobuoyImage string 52 | sonobuoyImageTag string 53 | clusterName string 54 | securityScanJobTolerationsVal string 55 | ) 56 | 57 | func main() { 58 | app := cli.NewApp() 59 | app.Name = "cis-operator" 60 | app.Version = fmt.Sprintf("%s (%s)", Version, GitCommit) 61 | app.Usage = "cis-operator needs help!" 62 | app.Flags = []cli.Flag{ 63 | &cli.StringFlag{ 64 | Name: "kubeconfig", 65 | EnvVars: []string{"KUBECONFIG"}, 66 | Destination: &kubeConfig, 67 | }, 68 | &cli.IntFlag{ 69 | Name: "threads", 70 | EnvVars: []string{"CIS_OPERATOR_THREADS"}, 71 | Value: 2, 72 | Destination: &threads, 73 | }, 74 | &cli.StringFlag{ 75 | Name: "name", 76 | EnvVars: []string{"CIS_OPERATOR_NAME"}, 77 | Value: "cis-operator", 78 | Destination: &name, 79 | }, 80 | &cli.StringFlag{ 81 | Name: "security-scan-image", 82 | EnvVars: []string{"SECURITY_SCAN_IMAGE"}, 83 | Value: "rancher/security-scan", 84 | Destination: &securityScanImage, 85 | }, 86 | &cli.StringFlag{ 87 | Name: "security-scan-image-tag", 88 | EnvVars: []string{"SECURITY_SCAN_IMAGE_TAG"}, 89 | Value: "latest", 90 | Destination: &securityScanImageTag, 91 | }, 92 | &cli.StringFlag{ 93 | Name: "sonobuoy-image", 94 | EnvVars: []string{"SONOBUOY_IMAGE"}, 95 | Value: "rancher/sonobuoy-sonobuoy", 96 | Destination: &sonobuoyImage, 97 | }, 98 | &cli.StringFlag{ 99 | Name: "sonobuoy-image-tag", 100 | EnvVars: []string{"SONOBUOY_IMAGE_TAG"}, 101 | Value: "latest", 102 | Destination: &sonobuoyImageTag, 103 | }, 104 | &cli.StringFlag{ 105 | Name: "cis_metrics_port", 106 | EnvVars: []string{"CIS_METRICS_PORT"}, 107 | Value: "8080", 108 | Destination: &metricsPort, 109 | }, 110 | &cli.BoolFlag{ 111 | Name: "debug", 112 | EnvVars: []string{"CIS_OPERATOR_DEBUG"}, 113 | Destination: &debug, 114 | }, 115 | &cli.StringFlag{ 116 | Name: "alertSeverity", 117 | EnvVars: []string{"CIS_ALERTS_SEVERITY"}, 118 | Value: "warning", 119 | Destination: &alertSeverity, 120 | }, 121 | &cli.StringFlag{ 122 | Name: "clusterName", 123 | EnvVars: []string{"CLUSTER_NAME"}, 124 | Value: "", 125 | Destination: &clusterName, 126 | }, 127 | &cli.StringFlag{ 128 | Name: "security-scan-job-tolerations", 129 | EnvVars: []string{"SECURITY_SCAN_JOB_TOLERATIONS"}, 130 | Value: "", 131 | Destination: &securityScanJobTolerationsVal, 132 | }, 133 | &cli.BoolFlag{ 134 | Name: "alertEnabled", 135 | EnvVars: []string{"CIS_ALERTS_ENABLED"}, 136 | }, 137 | } 138 | app.Action = run 139 | 140 | if err := app.Run(os.Args); err != nil { 141 | logrus.Fatal(err) 142 | } 143 | } 144 | 145 | func run(c *cli.Context) error { 146 | logrus.Info("Starting CIS-Operator") 147 | 148 | ctx := context.Background() 149 | handler := signals.SetupSignalHandler() 150 | go func() { 151 | <-handler 152 | ctx.Done() 153 | }() 154 | 155 | if debug { 156 | logrus.SetLevel(logrus.DebugLevel) 157 | } 158 | kubeConfig = c.String("kubeconfig") 159 | threads = c.Int("threads") 160 | securityScanImage = c.String("security-scan-image") 161 | securityScanImageTag = c.String("security-scan-image-tag") 162 | sonobuoyImage = c.String("sonobuoy-image") 163 | sonobuoyImageTag = c.String("sonobuoy-image-tag") 164 | name = c.String("name") 165 | 166 | securityScanJobTolerations := []corev1.Toleration{{ 167 | Operator: corev1.TolerationOpExists, 168 | }} 169 | 170 | securityScanJobTolerationsVal = c.String("security-scan-job-tolerations") 171 | 172 | if securityScanJobTolerationsVal != "" { 173 | err := json.Unmarshal([]byte(securityScanJobTolerationsVal), &securityScanJobTolerations) 174 | if err != nil { 175 | logrus.Fatalf("invalid value received for security-scan-job-tolerations flag:%s", err.Error()) 176 | } 177 | } 178 | 179 | kubeConfig, err := kubeconfig.GetNonInteractiveClientConfig(kubeConfig).ClientConfig() 180 | if err != nil { 181 | logrus.Fatalf("failed to find kubeconfig: %v", err) 182 | } 183 | 184 | imgConfig := &cisoperatorapiv1.ScanImageConfig{ 185 | SecurityScanImage: securityScanImage, 186 | SecurityScanImageTag: securityScanImageTag, 187 | SonobuoyImage: sonobuoyImage, 188 | SonobuoyImageTag: sonobuoyImageTag, 189 | AlertSeverity: alertSeverity, 190 | ClusterName: clusterName, 191 | AlertEnabled: c.Bool("alertEnabled"), 192 | } 193 | 194 | if err := validateConfig(imgConfig); err != nil { 195 | logrus.Fatalf("Error starting CIS-Operator: %v", err) 196 | } 197 | 198 | ctl, err := cisoperator.NewController(ctx, kubeConfig, cisoperatorapiv1.ClusterScanNS, name, imgConfig, securityScanJobTolerations) 199 | if err != nil { 200 | logrus.Fatalf("Error building controller: %s", err.Error()) 201 | } 202 | 203 | if err := ctl.Start(ctx, threads, 2*time.Hour); err != nil { 204 | logrus.Fatalf("Error starting: %v", err) 205 | } 206 | 207 | http.Handle("/metrics", promhttp.Handler()) 208 | if err := http.ListenAndServe(":"+metricsPort, nil); err != nil { 209 | log.Fatal(err) 210 | } 211 | 212 | <-handler 213 | ctx.Done() 214 | logrus.Info("Registered CIS controller") 215 | return nil 216 | } 217 | 218 | func validateConfig(imgConfig *cisoperatorapiv1.ScanImageConfig) error { 219 | if imgConfig.SecurityScanImage == "" { 220 | return errors.New("No Security-Scan Image specified") 221 | } 222 | if imgConfig.SonobuoyImage == "" { 223 | return errors.New("No Sonobuoy tool Image specified") 224 | } 225 | return nil 226 | } 227 | -------------------------------------------------------------------------------- /package/Dockerfile: -------------------------------------------------------------------------------- 1 | # Image that provides cross compilation tooling. 2 | FROM --platform=$BUILDPLATFORM rancher/mirrored-tonistiigi-xx:1.5.0 AS xx 3 | 4 | FROM --platform=$BUILDPLATFORM registry.suse.com/bci/golang:1.24 AS builder 5 | 6 | # There is no real need for containers to fully comply with the 7 | # Filesystem Hierarchy Standard (FHS). However, some applications 8 | # could malfunction if some specific basic dirs are not available. 9 | # Therefore, create top level structure. 10 | # 11 | # https://refspecs.linuxfoundation.org/FHS_3.0/fhs/index.html 12 | RUN mkdir -p /final/boot && \ 13 | mkdir -p /final/etc && \ 14 | mkdir -p /final/home && \ 15 | mkdir -p /final/lib && \ 16 | mkdir -p /final/lib64 && \ 17 | mkdir -p /final/media && \ 18 | mkdir -p /final/mnt && \ 19 | mkdir -p /final/opt && \ 20 | mkdir -p /final/run && \ 21 | mkdir -p /final/usr/sbin && \ 22 | mkdir -p /final/var/lib/nobody 23 | 24 | # Some dirs require very specific permissions. 25 | RUN install -dv -m 0750 /final/root && \ 26 | install -dv -m 1777 /final/tmp /final/var/tmp 27 | 28 | # Keep name search configured in line with BCI. 29 | RUN cp /etc/nsswitch.conf /final/etc 30 | 31 | # Differs from BCI, by removing /bin/sh from root: 32 | RUN echo "root:x:0:0:root:/root:/usr/bin/false\nnobody:x:65534:65534:nobody:/var/lib/nobody:/usr/bin/false" > /final/etc/passwd 33 | 34 | RUN cp /etc/shadow /final/etc 35 | RUN cp /etc/group /final/etc 36 | 37 | WORKDIR /src 38 | COPY go.sum \ 39 | go.mod \ 40 | Makefile \ 41 | /src 42 | RUN go mod download 43 | 44 | COPY pkg /src/pkg 45 | COPY hack /src/hack 46 | COPY main.go /src 47 | 48 | # Ensures that the binary that was built was cross-compiled correctly 49 | # and is valid on the target platform. 50 | COPY --from=xx / / 51 | 52 | ARG TARGETPLATFORM 53 | RUN xx-go --wrap && mkdir -p /run/lock 54 | 55 | # By setting the version as an argument, we can avoid running the version logic 56 | # a second time (inside the Docker build process). Therefore, removing the need 57 | # to access the .git dir. 58 | ARG VERSION 59 | RUN VERSION=${VERSION} TARGET_BIN=/final/usr/bin/cis-operator make build 60 | RUN xx-verify --static /final/usr/bin/cis-operator 61 | 62 | FROM scratch AS final 63 | 64 | COPY --from=builder /final/ / 65 | 66 | # Aligns nobody user ID with BCI. 67 | USER 65534:65534 68 | ENV PATH=/usr/bin 69 | 70 | CMD ["/usr/bin/cis-operator"] 71 | -------------------------------------------------------------------------------- /pkg/apis/cis.cattle.io/constants.go: -------------------------------------------------------------------------------- 1 | package cis 2 | 3 | const ( 4 | // LabelController is the name of the cis controller. 5 | LabelController = GroupName + `/controller` 6 | 7 | // LabelNode is the node being upgraded. 8 | LabelProfile = GroupName + `/clusterscanprofile` 9 | 10 | // LabelPlan is the plan being applied. 11 | LabelClusterScan = GroupName + `/scan` 12 | 13 | SonobuoyCompletionAnnotation = "field.cattle.io/sonobuoyDone" 14 | ) 15 | -------------------------------------------------------------------------------- /pkg/apis/cis.cattle.io/v1/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2025 Rancher Labs, Inc. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | // +k8s:deepcopy-gen=package 20 | // +groupName=cis.cattle.io 21 | package v1 22 | -------------------------------------------------------------------------------- /pkg/apis/cis.cattle.io/v1/types.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | condition "github.com/rancher/cis-operator/pkg/condition" 5 | "github.com/rancher/wrangler/v3/pkg/genericcondition" 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | ) 8 | 9 | const ( 10 | ClusterProviderRKE = "rke" 11 | ClusterProviderEKS = "eks" 12 | ClusterProviderGKE = "gke" 13 | ClusterProviderAKS = "aks" 14 | ClusterProviderK3s = "k3s" 15 | 16 | CISV1NS = "security-scan" 17 | ClusterScanNS = "cis-operator-system" 18 | ClusterScanSA = "cis-serviceaccount" 19 | ClusterScanConfigMap = "cis-s-config-cm" 20 | ClusterScanPluginsConfigMap = "cis-s-plugins-cm" 21 | ClusterScanUserSkipConfigMap = "cis-s-user-skip-cm" 22 | DefaultClusterScanProfileConfigMap = "default-clusterscanprofiles" 23 | ClusterScanService = "service-rancher-cis-benchmark" 24 | DefaultScanOutputFileName = "output.json" 25 | DefaultRetention = 3 26 | DefaultCronSchedule = "0 0 * * *" 27 | CustomBenchmarkBaseDir = "/etc/kbs/custombenchmark/cfg" 28 | CustomBenchmarkConfigMap = "cis-bmark-cm" 29 | 30 | ClusterScanConditionCreated = condition.Cond("Created") 31 | ClusterScanConditionPending = condition.Cond("Pending") 32 | ClusterScanConditionRunCompleted = condition.Cond("RunCompleted") 33 | ClusterScanConditionComplete = condition.Cond("Complete") 34 | ClusterScanConditionFailed = condition.Cond("Failed") 35 | ClusterScanConditionAlerted = condition.Cond("Alerted") 36 | ClusterScanConditionReconciling = condition.Cond("Reconciling") 37 | ClusterScanConditionStalled = condition.Cond("Stalled") 38 | 39 | ClusterScanFailOnWarning = "fail" 40 | ClusterScanPassOnWarning = "pass" 41 | ) 42 | 43 | // +genclient 44 | // +genclient:nonNamespaced 45 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 46 | 47 | type ClusterScan struct { 48 | metav1.TypeMeta `json:",inline"` 49 | metav1.ObjectMeta `json:"metadata,omitempty"` 50 | 51 | Spec ClusterScanSpec `json:"spec"` 52 | Status ClusterScanStatus `yaml:"status" json:"status,omitempty"` 53 | } 54 | 55 | type ClusterScanSpec struct { 56 | // scan profile to use 57 | ScanProfileName string `json:"scanProfileName,omitempty"` 58 | //config for scheduled scan 59 | ScheduledScanConfig *ScheduledScanConfig `yaml:"scheduled_scan_config" json:"scheduledScanConfig,omitempty"` 60 | // Specify if tests with "warn" output should be counted towards scan failure 61 | ScoreWarning string `yaml:"score_warning" json:"scoreWarning,omitempty"` 62 | } 63 | 64 | type ClusterScanStatus struct { 65 | Display *ClusterScanStatusDisplay `json:"display,omitempty"` 66 | LastRunTimestamp string `yaml:"last_run_timestamp" json:"lastRunTimestamp"` 67 | LastRunScanProfileName string `json:"lastRunScanProfileName,omitempty"` 68 | Summary *ClusterScanSummary `json:"summary,omitempty"` 69 | ObservedGeneration int64 `json:"observedGeneration"` 70 | Conditions []genericcondition.GenericCondition `json:"conditions,omitempty"` 71 | NextScanAt string `json:"NextScanAt"` 72 | ScanAlertingRuleName string `json:"ScanAlertingRuleName"` 73 | } 74 | 75 | type ClusterScanStatusDisplay struct { 76 | State string `json:"state"` 77 | Message string `json:"message"` 78 | Error bool `json:"error"` 79 | Transitioning bool `json:"transitioning"` 80 | } 81 | 82 | type ClusterScanSummary struct { 83 | Total int `json:"total"` 84 | Pass int `json:"pass"` 85 | Fail int `json:"fail"` 86 | Skip int `json:"skip"` 87 | Warn int `json:"warn"` 88 | NotApplicable int `json:"notApplicable"` 89 | } 90 | 91 | type ScheduledScanConfig struct { 92 | // Cron Expression for Schedule 93 | CronSchedule string `yaml:"cron_schedule" json:"cronSchedule,omitempty"` 94 | // Number of past scans to keep 95 | RetentionCount int `yaml:"retentionCount" json:"retentionCount,omitempty"` 96 | //configure the alerts to be sent out 97 | ScanAlertRule *ClusterScanAlertRule `json:"scanAlertRule,omitempty"` 98 | } 99 | 100 | type ClusterScanAlertRule struct { 101 | AlertOnComplete bool `json:"alertOnComplete,omitempty"` 102 | AlertOnFailure bool `json:"alertOnFailure,omitempty"` 103 | } 104 | 105 | // +genclient 106 | // +genclient:nonNamespaced 107 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 108 | 109 | type ClusterScanBenchmark struct { 110 | metav1.TypeMeta `json:",inline"` 111 | metav1.ObjectMeta `json:"metadata,omitempty"` 112 | 113 | Spec ClusterScanBenchmarkSpec `json:"spec"` 114 | } 115 | 116 | type ClusterScanBenchmarkSpec struct { 117 | ClusterProvider string `json:"clusterProvider,omitempty"` 118 | MinKubernetesVersion string `json:"minKubernetesVersion,omitempty"` 119 | MaxKubernetesVersion string `json:"maxKubernetesVersion,omitempty"` 120 | 121 | CustomBenchmarkConfigMapName string `json:"customBenchmarkConfigMapName,omitempty"` 122 | CustomBenchmarkConfigMapNamespace string `json:"customBenchmarkConfigMapNamespace,omitempty"` 123 | } 124 | 125 | // +genclient 126 | // +genclient:nonNamespaced 127 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 128 | 129 | type ClusterScanProfile struct { 130 | metav1.TypeMeta `json:",inline"` 131 | metav1.ObjectMeta `json:"metadata,omitempty"` 132 | 133 | Spec ClusterScanProfileSpec `json:"spec"` 134 | } 135 | 136 | type ClusterScanProfileSpec struct { 137 | BenchmarkVersion string `json:"benchmarkVersion,omitempty"` 138 | SkipTests []string `json:"skipTests,omitempty"` 139 | } 140 | 141 | // +genclient 142 | // +genclient:nonNamespaced 143 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 144 | 145 | type ClusterScanReport struct { 146 | metav1.TypeMeta `json:",inline"` 147 | metav1.ObjectMeta `json:"metadata,omitempty"` 148 | 149 | Spec ClusterScanReportSpec `json:"spec"` 150 | } 151 | 152 | type ClusterScanReportSpec struct { 153 | BenchmarkVersion string `json:"benchmarkVersion,omitempty"` 154 | LastRunTimestamp string `yaml:"last_run_timestamp" json:"lastRunTimestamp"` 155 | ReportJSON string `json:"reportJSON"` 156 | } 157 | 158 | type ScanImageConfig struct { 159 | SecurityScanImage string 160 | SecurityScanImageTag string 161 | SonobuoyImage string 162 | SonobuoyImageTag string 163 | AlertSeverity string 164 | ClusterName string 165 | AlertEnabled bool 166 | } 167 | -------------------------------------------------------------------------------- /pkg/apis/cis.cattle.io/v1/zz_generated_list_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2025 Rancher Labs, Inc. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | // +k8s:deepcopy-gen=package 20 | // +groupName=cis.cattle.io 21 | package v1 22 | 23 | import ( 24 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 25 | ) 26 | 27 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 28 | 29 | // ClusterScanList is a list of ClusterScan resources 30 | type ClusterScanList struct { 31 | metav1.TypeMeta `json:",inline"` 32 | metav1.ListMeta `json:"metadata"` 33 | 34 | Items []ClusterScan `json:"items"` 35 | } 36 | 37 | func NewClusterScan(namespace, name string, obj ClusterScan) *ClusterScan { 38 | obj.APIVersion, obj.Kind = SchemeGroupVersion.WithKind("ClusterScan").ToAPIVersionAndKind() 39 | obj.Name = name 40 | obj.Namespace = namespace 41 | return &obj 42 | } 43 | 44 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 45 | 46 | // ClusterScanProfileList is a list of ClusterScanProfile resources 47 | type ClusterScanProfileList struct { 48 | metav1.TypeMeta `json:",inline"` 49 | metav1.ListMeta `json:"metadata"` 50 | 51 | Items []ClusterScanProfile `json:"items"` 52 | } 53 | 54 | func NewClusterScanProfile(namespace, name string, obj ClusterScanProfile) *ClusterScanProfile { 55 | obj.APIVersion, obj.Kind = SchemeGroupVersion.WithKind("ClusterScanProfile").ToAPIVersionAndKind() 56 | obj.Name = name 57 | obj.Namespace = namespace 58 | return &obj 59 | } 60 | 61 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 62 | 63 | // ClusterScanReportList is a list of ClusterScanReport resources 64 | type ClusterScanReportList struct { 65 | metav1.TypeMeta `json:",inline"` 66 | metav1.ListMeta `json:"metadata"` 67 | 68 | Items []ClusterScanReport `json:"items"` 69 | } 70 | 71 | func NewClusterScanReport(namespace, name string, obj ClusterScanReport) *ClusterScanReport { 72 | obj.APIVersion, obj.Kind = SchemeGroupVersion.WithKind("ClusterScanReport").ToAPIVersionAndKind() 73 | obj.Name = name 74 | obj.Namespace = namespace 75 | return &obj 76 | } 77 | 78 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 79 | 80 | // ClusterScanBenchmarkList is a list of ClusterScanBenchmark resources 81 | type ClusterScanBenchmarkList struct { 82 | metav1.TypeMeta `json:",inline"` 83 | metav1.ListMeta `json:"metadata"` 84 | 85 | Items []ClusterScanBenchmark `json:"items"` 86 | } 87 | 88 | func NewClusterScanBenchmark(namespace, name string, obj ClusterScanBenchmark) *ClusterScanBenchmark { 89 | obj.APIVersion, obj.Kind = SchemeGroupVersion.WithKind("ClusterScanBenchmark").ToAPIVersionAndKind() 90 | obj.Name = name 91 | obj.Namespace = namespace 92 | return &obj 93 | } 94 | -------------------------------------------------------------------------------- /pkg/apis/cis.cattle.io/v1/zz_generated_register.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2025 Rancher Labs, Inc. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | // +k8s:deepcopy-gen=package 20 | // +groupName=cis.cattle.io 21 | package v1 22 | 23 | import ( 24 | cis "github.com/rancher/cis-operator/pkg/apis/cis.cattle.io" 25 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 26 | "k8s.io/apimachinery/pkg/runtime" 27 | "k8s.io/apimachinery/pkg/runtime/schema" 28 | ) 29 | 30 | var ( 31 | ClusterScanResourceName = "clusterscans" 32 | ClusterScanBenchmarkResourceName = "clusterscanbenchmarks" 33 | ClusterScanProfileResourceName = "clusterscanprofiles" 34 | ClusterScanReportResourceName = "clusterscanreports" 35 | ) 36 | 37 | // SchemeGroupVersion is group version used to register these objects 38 | var SchemeGroupVersion = schema.GroupVersion{Group: cis.GroupName, Version: "v1"} 39 | 40 | // Kind takes an unqualified kind and returns back a Group qualified GroupKind 41 | func Kind(kind string) schema.GroupKind { 42 | return SchemeGroupVersion.WithKind(kind).GroupKind() 43 | } 44 | 45 | // Resource takes an unqualified resource and returns a Group qualified GroupResource 46 | func Resource(resource string) schema.GroupResource { 47 | return SchemeGroupVersion.WithResource(resource).GroupResource() 48 | } 49 | 50 | var ( 51 | SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) 52 | AddToScheme = SchemeBuilder.AddToScheme 53 | ) 54 | 55 | // Adds the list of known types to Scheme. 56 | func addKnownTypes(scheme *runtime.Scheme) error { 57 | scheme.AddKnownTypes(SchemeGroupVersion, 58 | &ClusterScan{}, 59 | &ClusterScanList{}, 60 | &ClusterScanBenchmark{}, 61 | &ClusterScanBenchmarkList{}, 62 | &ClusterScanProfile{}, 63 | &ClusterScanProfileList{}, 64 | &ClusterScanReport{}, 65 | &ClusterScanReportList{}, 66 | ) 67 | metav1.AddToGroupVersion(scheme, SchemeGroupVersion) 68 | return nil 69 | } 70 | -------------------------------------------------------------------------------- /pkg/apis/cis.cattle.io/zz_generated_register.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2025 Rancher Labs, Inc. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package cis 20 | 21 | const ( 22 | // Package-wide consts from generator "zz_generated_register". 23 | GroupName = "cis.cattle.io" 24 | ) 25 | -------------------------------------------------------------------------------- /pkg/codegen/cleanup/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/rancher/wrangler/v3/pkg/cleanup" 7 | "github.com/sirupsen/logrus" 8 | ) 9 | 10 | func main() { 11 | if err := cleanup.Cleanup("./pkg/apis"); err != nil { 12 | logrus.Fatal(err) 13 | } 14 | if err := os.RemoveAll("./pkg/generated"); err != nil { 15 | logrus.Fatal(err) 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /pkg/codegen/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | 6 | controllergen "github.com/rancher/wrangler/v3/pkg/controller-gen" 7 | "github.com/rancher/wrangler/v3/pkg/controller-gen/args" 8 | 9 | v1 "github.com/rancher/cis-operator/pkg/apis/cis.cattle.io/v1" 10 | "github.com/rancher/cis-operator/pkg/crds" 11 | ) 12 | 13 | func main() { 14 | os.Unsetenv("GOPATH") 15 | controllergen.Run(args.Options{ 16 | OutputPackage: "github.com/rancher/cis-operator/pkg/generated", 17 | Boilerplate: "hack/boilerplate.go.txt", 18 | Groups: map[string]args.Group{ 19 | "cis.cattle.io": { 20 | Types: []interface{}{ 21 | v1.ClusterScan{}, 22 | v1.ClusterScanProfile{}, 23 | v1.ClusterScanReport{}, 24 | v1.ClusterScanBenchmark{}, 25 | }, 26 | GenerateTypes: true, 27 | }, 28 | }, 29 | }) 30 | 31 | err := crds.WriteCRD() 32 | if err != nil { 33 | panic(err) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /pkg/condition/condition.go: -------------------------------------------------------------------------------- 1 | package condition 2 | 3 | // adapted from rancher/wrangler 4 | 5 | import ( 6 | "reflect" 7 | "time" 8 | 9 | "github.com/sirupsen/logrus" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | ) 12 | 13 | type Cond string 14 | 15 | func (c Cond) GetStatus(obj interface{}) string { 16 | return getStatus(obj, string(c)) 17 | } 18 | 19 | func (c Cond) SetError(obj interface{}, reason string, err error) { 20 | if err == nil { 21 | c.True(obj) 22 | c.Message(obj, "") 23 | c.Reason(obj, reason) 24 | return 25 | } 26 | if reason == "" { 27 | reason = "Error" 28 | } 29 | c.False(obj) 30 | c.Message(obj, err.Error()) 31 | c.Reason(obj, reason) 32 | } 33 | 34 | func (c Cond) MatchesError(obj interface{}, reason string, err error) bool { 35 | if err == nil { 36 | return c.IsTrue(obj) && 37 | c.GetMessage(obj) == "" && 38 | c.GetReason(obj) == reason 39 | } 40 | if reason == "" { 41 | reason = "Error" 42 | } 43 | return c.IsFalse(obj) && 44 | c.GetMessage(obj) == err.Error() && 45 | c.GetReason(obj) == reason 46 | } 47 | 48 | func (c Cond) SetStatus(obj interface{}, status string) { 49 | setStatus(obj, string(c), status) 50 | } 51 | 52 | func (c Cond) SetStatusBool(obj interface{}, val bool) { 53 | if val { 54 | setStatus(obj, string(c), "True") 55 | } else { 56 | setStatus(obj, string(c), "False") 57 | } 58 | } 59 | 60 | func (c Cond) True(obj interface{}) { 61 | setStatus(obj, string(c), "True") 62 | } 63 | 64 | func (c Cond) IsTrue(obj interface{}) bool { 65 | return getStatus(obj, string(c)) == "True" 66 | } 67 | 68 | func (c Cond) False(obj interface{}) { 69 | setStatus(obj, string(c), "False") 70 | } 71 | 72 | func (c Cond) IsFalse(obj interface{}) bool { 73 | return getStatus(obj, string(c)) == "False" 74 | } 75 | 76 | func (c Cond) Unknown(obj interface{}) { 77 | setStatus(obj, string(c), "Unknown") 78 | } 79 | 80 | func (c Cond) IsUnknown(obj interface{}) bool { 81 | return getStatus(obj, string(c)) == "Unknown" 82 | } 83 | 84 | func (c Cond) LastUpdated(obj interface{}, ts string) { 85 | setTS(obj, string(c), ts) 86 | } 87 | 88 | func (c Cond) GetLastUpdated(obj interface{}) string { 89 | return getTS(obj, string(c)) 90 | } 91 | 92 | func (c Cond) GetLastTransitionTime(obj interface{}) time.Time { 93 | return getLastTransitionTime(obj, string(c)) 94 | } 95 | 96 | func (c Cond) CreateUnknownIfNotExists(obj interface{}) { 97 | condSlice := getValue(obj, "Status", "Conditions") 98 | cond := findCond(obj, condSlice, string(c)) 99 | if cond == nil { 100 | c.Unknown(obj) 101 | } 102 | } 103 | 104 | func (c Cond) Reason(obj interface{}, reason string) { 105 | cond := findOrCreateCond(obj, string(c)) 106 | getFieldValue(cond, "Reason").SetString(reason) 107 | } 108 | 109 | func (c Cond) GetReason(obj interface{}) string { 110 | cond := findOrNotCreateCond(obj, string(c)) 111 | if cond == nil { 112 | return "" 113 | } 114 | return getFieldValue(*cond, "Reason").String() 115 | } 116 | 117 | func (c Cond) SetMessageIfBlank(obj interface{}, message string) { 118 | if c.GetMessage(obj) == "" { 119 | c.Message(obj, message) 120 | } 121 | } 122 | 123 | func (c Cond) Message(obj interface{}, message string) { 124 | cond := findOrCreateCond(obj, string(c)) 125 | setValue(cond, "Message", message) 126 | } 127 | 128 | func (c Cond) GetMessage(obj interface{}) string { 129 | cond := findOrNotCreateCond(obj, string(c)) 130 | if cond == nil { 131 | return "" 132 | } 133 | return getFieldValue(*cond, "Message").String() 134 | } 135 | 136 | func touchTS(value reflect.Value) { 137 | now := time.Now().Format(time.RFC3339) 138 | getFieldValue(value, "LastUpdateTime").SetString(now) 139 | } 140 | 141 | func getStatus(obj interface{}, condName string) string { 142 | cond := findOrNotCreateCond(obj, condName) 143 | if cond == nil { 144 | return "" 145 | } 146 | return getFieldValue(*cond, "Status").String() 147 | } 148 | 149 | func setTS(obj interface{}, condName, ts string) { 150 | cond := findOrCreateCond(obj, condName) 151 | getFieldValue(cond, "LastUpdateTime").SetString(ts) 152 | } 153 | 154 | func getTS(obj interface{}, condName string) string { 155 | cond := findOrNotCreateCond(obj, condName) 156 | if cond == nil { 157 | return "" 158 | } 159 | return getFieldValue(*cond, "LastUpdateTime").String() 160 | } 161 | 162 | func getLastTransitionTime(obj interface{}, condName string) time.Time { 163 | cond := findOrNotCreateCond(obj, condName) 164 | if cond == nil { 165 | return time.Time{} 166 | } 167 | value := getFieldValue(*cond, "LastTransitionTime").Interface() 168 | if value == nil { 169 | return time.Time{} 170 | } 171 | return value.(metav1.Time).Time 172 | } 173 | 174 | func setStatus(obj interface{}, condName, status string) { 175 | cond := findOrCreateCond(obj, condName) 176 | setValue(cond, "Status", status) 177 | } 178 | 179 | func setValue(cond reflect.Value, fieldName, newValue string) { 180 | value := getFieldValue(cond, fieldName) 181 | if value.String() != newValue { 182 | value.SetString(newValue) 183 | touchTS(cond) 184 | } 185 | } 186 | 187 | func findOrNotCreateCond(obj interface{}, condName string) *reflect.Value { 188 | condSlice := getValue(obj, "Status", "Conditions") 189 | return findCond(obj, condSlice, condName) 190 | } 191 | 192 | func findOrCreateCond(obj interface{}, condName string) reflect.Value { 193 | condSlice := getValue(obj, "Status", "Conditions") 194 | if !condSlice.IsValid() { 195 | condSlice = getValue(obj, "Conditions") 196 | } 197 | cond := findCond(obj, condSlice, condName) 198 | if cond != nil { 199 | return *cond 200 | } 201 | 202 | newCond := reflect.New(condSlice.Type().Elem()).Elem() 203 | newCond.FieldByName("Type").SetString(condName) 204 | newCond.FieldByName("Status").SetString("Unknown") 205 | condSlice.Set(reflect.Append(condSlice, newCond)) 206 | return *findCond(obj, condSlice, condName) 207 | } 208 | 209 | func findCond(obj interface{}, val reflect.Value, name string) *reflect.Value { 210 | defer func() { 211 | if recover() != nil { 212 | logrus.Fatalf("failed to find .Status.Conditions field on %v", reflect.TypeOf(obj)) 213 | } 214 | }() 215 | 216 | for i := 0; i < val.Len(); i++ { 217 | cond := val.Index(i) 218 | typeVal := getFieldValue(cond, "Type") 219 | if typeVal.String() == name { 220 | return &cond 221 | } 222 | } 223 | 224 | return nil 225 | } 226 | 227 | func getValue(obj interface{}, name ...string) reflect.Value { 228 | if obj == nil { 229 | return reflect.Value{} 230 | } 231 | v := reflect.ValueOf(obj) 232 | t := v.Type() 233 | if t.Kind() == reflect.Ptr { 234 | v = v.Elem() 235 | t = v.Type() 236 | } 237 | 238 | field := v.FieldByName(name[0]) 239 | if len(name) == 1 { 240 | return field 241 | } 242 | return getFieldValue(field, name[1:]...) 243 | } 244 | 245 | func getFieldValue(v reflect.Value, name ...string) reflect.Value { 246 | if len(name) == 0 { 247 | return reflect.Value{} 248 | } 249 | field := v.FieldByName(name[0]) 250 | if len(name) == 1 { 251 | return field 252 | } 253 | return getFieldValue(field, name[1:]...) 254 | } 255 | 256 | func Error(reason string, err error) error { 257 | return &conditionError{ 258 | reason: reason, 259 | message: err.Error(), 260 | } 261 | } 262 | 263 | type conditionError struct { 264 | reason string 265 | message string 266 | } 267 | 268 | func (e *conditionError) Error() string { 269 | return e.message 270 | } 271 | -------------------------------------------------------------------------------- /pkg/crds/crd.go: -------------------------------------------------------------------------------- 1 | package crds 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "os" 7 | "strings" 8 | 9 | cisoperator "github.com/rancher/cis-operator/pkg/apis/cis.cattle.io/v1" 10 | "github.com/rancher/wrangler/v3/pkg/crd" 11 | _ "github.com/rancher/wrangler/v3/pkg/generated/controllers/apiextensions.k8s.io" //using init 12 | "github.com/rancher/wrangler/v3/pkg/yaml" 13 | apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" 14 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 15 | "k8s.io/apimachinery/pkg/runtime" 16 | "k8s.io/apimachinery/pkg/runtime/schema" 17 | ) 18 | 19 | func WriteCRD() error { 20 | for _, crdDef := range List() { 21 | bCrd, err := crdDef.ToCustomResourceDefinition() 22 | if err != nil { 23 | return err 24 | } 25 | newObj, _ := bCrd.(*unstructured.Unstructured) 26 | var crd apiextv1.CustomResourceDefinition 27 | if err := runtime.DefaultUnstructuredConverter.FromUnstructured(newObj.Object, &crd); err != nil { 28 | return err 29 | } 30 | 31 | if crd.Name == "clusterscans.cis.cattle.io" { 32 | customizeClusterScan(&crd) 33 | } 34 | yamlBytes, err := yaml.Export(&crd) 35 | if err != nil { 36 | return err 37 | } 38 | 39 | filename := fmt.Sprintf("./crds/%s.yaml", strings.ToLower(crd.Spec.Names.Kind)) 40 | err = os.WriteFile(filename, yamlBytes, 0o600) 41 | if err != nil { 42 | return err 43 | } 44 | } 45 | return nil 46 | } 47 | 48 | func List() []crd.CRD { 49 | return []crd.CRD{ 50 | newCRD(&cisoperator.ClusterScan{}, func(c crd.CRD) crd.CRD { 51 | return c. 52 | WithColumn("ClusterScanProfile", ".status.lastRunScanProfileName"). 53 | WithColumn("Total", ".status.summary.total"). 54 | WithColumn("Pass", ".status.summary.pass"). 55 | WithColumn("Fail", ".status.summary.fail"). 56 | WithColumn("Skip", ".status.summary.skip"). 57 | WithColumn("Warn", ".status.summary.warn"). 58 | WithColumn("Not Applicable", ".status.summary.notApplicable"). 59 | WithColumn("LastRunTimestamp", ".status.lastRunTimestamp"). 60 | WithColumn("CronSchedule", ".spec.scheduledScanConfig.cronSchedule") 61 | }), 62 | newCRD(&cisoperator.ClusterScanProfile{}, func(c crd.CRD) crd.CRD { 63 | return c. 64 | WithColumn("BenchmarkVersion", ".spec.benchmarkVersion") 65 | }), 66 | newCRD(&cisoperator.ClusterScanReport{}, func(c crd.CRD) crd.CRD { 67 | return c. 68 | WithColumn("LastRunTimestamp", ".spec.lastRunTimestamp"). 69 | WithColumn("BenchmarkVersion", ".spec.benchmarkVersion") 70 | }), 71 | newCRD(&cisoperator.ClusterScanBenchmark{}, func(c crd.CRD) crd.CRD { 72 | return c. 73 | WithColumn("ClusterProvider", ".spec.clusterProvider"). 74 | WithColumn("MinKubernetesVersion", ".spec.minKubernetesVersion"). 75 | WithColumn("MaxKubernetesVersion", ".spec.maxKubernetesVersion"). 76 | WithColumn("customBenchmarkConfigMapName", ".spec.customBenchmarkConfigMapName"). 77 | WithColumn("customBenchmarkConfigMapNamespace", ".spec.customBenchmarkConfigMapNamespace") 78 | }), 79 | } 80 | } 81 | 82 | func newCRD(obj interface{}, customize func(crd.CRD) crd.CRD) crd.CRD { 83 | crd := crd.CRD{ 84 | GVK: schema.GroupVersionKind{ 85 | Group: "cis.cattle.io", 86 | Version: "v1", 87 | }, 88 | NonNamespace: true, 89 | Status: true, 90 | SchemaObject: obj, 91 | } 92 | if customize != nil { 93 | crd = customize(crd) 94 | } 95 | return crd 96 | } 97 | 98 | func customizeClusterScan(clusterScan *apiextv1.CustomResourceDefinition) { 99 | properties := clusterScan.Spec.Versions[0].Schema.OpenAPIV3Schema.Properties 100 | 101 | if len(properties) == 0 { 102 | return 103 | } 104 | 105 | spec := properties["spec"] 106 | scoreWarning := spec.Properties["scoreWarning"] 107 | passRaw, _ := json.Marshal(cisoperator.ClusterScanPassOnWarning) 108 | failRaw, _ := json.Marshal(cisoperator.ClusterScanFailOnWarning) 109 | scoreWarning.Enum = []apiextv1.JSON{{Raw: passRaw}, {Raw: failRaw}} 110 | spec.Properties["scoreWarning"] = scoreWarning 111 | properties["spec"] = spec 112 | } 113 | -------------------------------------------------------------------------------- /pkg/generated/controllers/cis.cattle.io/factory.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2025 Rancher Labs, Inc. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package cis 20 | 21 | import ( 22 | "github.com/rancher/lasso/pkg/controller" 23 | "github.com/rancher/wrangler/v3/pkg/generic" 24 | "k8s.io/client-go/rest" 25 | ) 26 | 27 | type Factory struct { 28 | *generic.Factory 29 | } 30 | 31 | func NewFactoryFromConfigOrDie(config *rest.Config) *Factory { 32 | f, err := NewFactoryFromConfig(config) 33 | if err != nil { 34 | panic(err) 35 | } 36 | return f 37 | } 38 | 39 | func NewFactoryFromConfig(config *rest.Config) (*Factory, error) { 40 | return NewFactoryFromConfigWithOptions(config, nil) 41 | } 42 | 43 | func NewFactoryFromConfigWithNamespace(config *rest.Config, namespace string) (*Factory, error) { 44 | return NewFactoryFromConfigWithOptions(config, &FactoryOptions{ 45 | Namespace: namespace, 46 | }) 47 | } 48 | 49 | type FactoryOptions = generic.FactoryOptions 50 | 51 | func NewFactoryFromConfigWithOptions(config *rest.Config, opts *FactoryOptions) (*Factory, error) { 52 | f, err := generic.NewFactoryFromConfigWithOptions(config, opts) 53 | return &Factory{ 54 | Factory: f, 55 | }, err 56 | } 57 | 58 | func NewFactoryFromConfigWithOptionsOrDie(config *rest.Config, opts *FactoryOptions) *Factory { 59 | f, err := NewFactoryFromConfigWithOptions(config, opts) 60 | if err != nil { 61 | panic(err) 62 | } 63 | return f 64 | } 65 | 66 | func (c *Factory) Cis() Interface { 67 | return New(c.ControllerFactory()) 68 | } 69 | 70 | func (c *Factory) WithAgent(userAgent string) Interface { 71 | return New(controller.NewSharedControllerFactoryWithAgent(userAgent, c.ControllerFactory())) 72 | } 73 | -------------------------------------------------------------------------------- /pkg/generated/controllers/cis.cattle.io/interface.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2025 Rancher Labs, Inc. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package cis 20 | 21 | import ( 22 | v1 "github.com/rancher/cis-operator/pkg/generated/controllers/cis.cattle.io/v1" 23 | "github.com/rancher/lasso/pkg/controller" 24 | ) 25 | 26 | type Interface interface { 27 | V1() v1.Interface 28 | } 29 | 30 | type group struct { 31 | controllerFactory controller.SharedControllerFactory 32 | } 33 | 34 | // New returns a new Interface. 35 | func New(controllerFactory controller.SharedControllerFactory) Interface { 36 | return &group{ 37 | controllerFactory: controllerFactory, 38 | } 39 | } 40 | 41 | func (g *group) V1() v1.Interface { 42 | return v1.New(g.controllerFactory) 43 | } 44 | -------------------------------------------------------------------------------- /pkg/generated/controllers/cis.cattle.io/v1/clusterscan.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2025 Rancher Labs, Inc. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package v1 20 | 21 | import ( 22 | "context" 23 | "sync" 24 | "time" 25 | 26 | v1 "github.com/rancher/cis-operator/pkg/apis/cis.cattle.io/v1" 27 | "github.com/rancher/wrangler/v3/pkg/apply" 28 | "github.com/rancher/wrangler/v3/pkg/condition" 29 | "github.com/rancher/wrangler/v3/pkg/generic" 30 | "github.com/rancher/wrangler/v3/pkg/kv" 31 | "k8s.io/apimachinery/pkg/api/equality" 32 | "k8s.io/apimachinery/pkg/api/errors" 33 | "k8s.io/apimachinery/pkg/runtime" 34 | "k8s.io/apimachinery/pkg/runtime/schema" 35 | ) 36 | 37 | // ClusterScanController interface for managing ClusterScan resources. 38 | type ClusterScanController interface { 39 | generic.NonNamespacedControllerInterface[*v1.ClusterScan, *v1.ClusterScanList] 40 | } 41 | 42 | // ClusterScanClient interface for managing ClusterScan resources in Kubernetes. 43 | type ClusterScanClient interface { 44 | generic.NonNamespacedClientInterface[*v1.ClusterScan, *v1.ClusterScanList] 45 | } 46 | 47 | // ClusterScanCache interface for retrieving ClusterScan resources in memory. 48 | type ClusterScanCache interface { 49 | generic.NonNamespacedCacheInterface[*v1.ClusterScan] 50 | } 51 | 52 | // ClusterScanStatusHandler is executed for every added or modified ClusterScan. Should return the new status to be updated 53 | type ClusterScanStatusHandler func(obj *v1.ClusterScan, status v1.ClusterScanStatus) (v1.ClusterScanStatus, error) 54 | 55 | // ClusterScanGeneratingHandler is the top-level handler that is executed for every ClusterScan event. It extends ClusterScanStatusHandler by a returning a slice of child objects to be passed to apply.Apply 56 | type ClusterScanGeneratingHandler func(obj *v1.ClusterScan, status v1.ClusterScanStatus) ([]runtime.Object, v1.ClusterScanStatus, error) 57 | 58 | // RegisterClusterScanStatusHandler configures a ClusterScanController to execute a ClusterScanStatusHandler for every events observed. 59 | // If a non-empty condition is provided, it will be updated in the status conditions for every handler execution 60 | func RegisterClusterScanStatusHandler(ctx context.Context, controller ClusterScanController, condition condition.Cond, name string, handler ClusterScanStatusHandler) { 61 | statusHandler := &clusterScanStatusHandler{ 62 | client: controller, 63 | condition: condition, 64 | handler: handler, 65 | } 66 | controller.AddGenericHandler(ctx, name, generic.FromObjectHandlerToHandler(statusHandler.sync)) 67 | } 68 | 69 | // RegisterClusterScanGeneratingHandler configures a ClusterScanController to execute a ClusterScanGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. 70 | // If a non-empty condition is provided, it will be updated in the status conditions for every handler execution 71 | func RegisterClusterScanGeneratingHandler(ctx context.Context, controller ClusterScanController, apply apply.Apply, 72 | condition condition.Cond, name string, handler ClusterScanGeneratingHandler, opts *generic.GeneratingHandlerOptions) { 73 | statusHandler := &clusterScanGeneratingHandler{ 74 | ClusterScanGeneratingHandler: handler, 75 | apply: apply, 76 | name: name, 77 | gvk: controller.GroupVersionKind(), 78 | } 79 | if opts != nil { 80 | statusHandler.opts = *opts 81 | } 82 | controller.OnChange(ctx, name, statusHandler.Remove) 83 | RegisterClusterScanStatusHandler(ctx, controller, condition, name, statusHandler.Handle) 84 | } 85 | 86 | type clusterScanStatusHandler struct { 87 | client ClusterScanClient 88 | condition condition.Cond 89 | handler ClusterScanStatusHandler 90 | } 91 | 92 | // sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API 93 | func (a *clusterScanStatusHandler) sync(key string, obj *v1.ClusterScan) (*v1.ClusterScan, error) { 94 | if obj == nil { 95 | return obj, nil 96 | } 97 | 98 | origStatus := obj.Status.DeepCopy() 99 | obj = obj.DeepCopy() 100 | newStatus, err := a.handler(obj, obj.Status) 101 | if err != nil { 102 | // Revert to old status on error 103 | newStatus = *origStatus.DeepCopy() 104 | } 105 | 106 | if a.condition != "" { 107 | if errors.IsConflict(err) { 108 | a.condition.SetError(&newStatus, "", nil) 109 | } else { 110 | a.condition.SetError(&newStatus, "", err) 111 | } 112 | } 113 | if !equality.Semantic.DeepEqual(origStatus, &newStatus) { 114 | if a.condition != "" { 115 | // Since status has changed, update the lastUpdatedTime 116 | a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) 117 | } 118 | 119 | var newErr error 120 | obj.Status = newStatus 121 | newObj, newErr := a.client.UpdateStatus(obj) 122 | if err == nil { 123 | err = newErr 124 | } 125 | if newErr == nil { 126 | obj = newObj 127 | } 128 | } 129 | return obj, err 130 | } 131 | 132 | type clusterScanGeneratingHandler struct { 133 | ClusterScanGeneratingHandler 134 | apply apply.Apply 135 | opts generic.GeneratingHandlerOptions 136 | gvk schema.GroupVersionKind 137 | name string 138 | seen sync.Map 139 | } 140 | 141 | // Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied 142 | func (a *clusterScanGeneratingHandler) Remove(key string, obj *v1.ClusterScan) (*v1.ClusterScan, error) { 143 | if obj != nil { 144 | return obj, nil 145 | } 146 | 147 | obj = &v1.ClusterScan{} 148 | obj.Namespace, obj.Name = kv.RSplit(key, "/") 149 | obj.SetGroupVersionKind(a.gvk) 150 | 151 | if a.opts.UniqueApplyForResourceVersion { 152 | a.seen.Delete(key) 153 | } 154 | 155 | return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). 156 | WithOwner(obj). 157 | WithSetID(a.name). 158 | ApplyObjects() 159 | } 160 | 161 | // Handle executes the configured ClusterScanGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource 162 | func (a *clusterScanGeneratingHandler) Handle(obj *v1.ClusterScan, status v1.ClusterScanStatus) (v1.ClusterScanStatus, error) { 163 | if !obj.DeletionTimestamp.IsZero() { 164 | return status, nil 165 | } 166 | 167 | objs, newStatus, err := a.ClusterScanGeneratingHandler(obj, status) 168 | if err != nil { 169 | return newStatus, err 170 | } 171 | if !a.isNewResourceVersion(obj) { 172 | return newStatus, nil 173 | } 174 | 175 | err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). 176 | WithOwner(obj). 177 | WithSetID(a.name). 178 | ApplyObjects(objs...) 179 | if err != nil { 180 | return newStatus, err 181 | } 182 | a.storeResourceVersion(obj) 183 | return newStatus, nil 184 | } 185 | 186 | // isNewResourceVersion detects if a specific resource version was already successfully processed. 187 | // Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions 188 | func (a *clusterScanGeneratingHandler) isNewResourceVersion(obj *v1.ClusterScan) bool { 189 | if !a.opts.UniqueApplyForResourceVersion { 190 | return true 191 | } 192 | 193 | // Apply once per resource version 194 | key := obj.Namespace + "/" + obj.Name 195 | previous, ok := a.seen.Load(key) 196 | return !ok || previous != obj.ResourceVersion 197 | } 198 | 199 | // storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed 200 | // Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions 201 | func (a *clusterScanGeneratingHandler) storeResourceVersion(obj *v1.ClusterScan) { 202 | if !a.opts.UniqueApplyForResourceVersion { 203 | return 204 | } 205 | 206 | key := obj.Namespace + "/" + obj.Name 207 | a.seen.Store(key, obj.ResourceVersion) 208 | } 209 | -------------------------------------------------------------------------------- /pkg/generated/controllers/cis.cattle.io/v1/clusterscanbenchmark.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2025 Rancher Labs, Inc. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package v1 20 | 21 | import ( 22 | v1 "github.com/rancher/cis-operator/pkg/apis/cis.cattle.io/v1" 23 | "github.com/rancher/wrangler/v3/pkg/generic" 24 | ) 25 | 26 | // ClusterScanBenchmarkController interface for managing ClusterScanBenchmark resources. 27 | type ClusterScanBenchmarkController interface { 28 | generic.NonNamespacedControllerInterface[*v1.ClusterScanBenchmark, *v1.ClusterScanBenchmarkList] 29 | } 30 | 31 | // ClusterScanBenchmarkClient interface for managing ClusterScanBenchmark resources in Kubernetes. 32 | type ClusterScanBenchmarkClient interface { 33 | generic.NonNamespacedClientInterface[*v1.ClusterScanBenchmark, *v1.ClusterScanBenchmarkList] 34 | } 35 | 36 | // ClusterScanBenchmarkCache interface for retrieving ClusterScanBenchmark resources in memory. 37 | type ClusterScanBenchmarkCache interface { 38 | generic.NonNamespacedCacheInterface[*v1.ClusterScanBenchmark] 39 | } 40 | -------------------------------------------------------------------------------- /pkg/generated/controllers/cis.cattle.io/v1/clusterscanprofile.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2025 Rancher Labs, Inc. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package v1 20 | 21 | import ( 22 | v1 "github.com/rancher/cis-operator/pkg/apis/cis.cattle.io/v1" 23 | "github.com/rancher/wrangler/v3/pkg/generic" 24 | ) 25 | 26 | // ClusterScanProfileController interface for managing ClusterScanProfile resources. 27 | type ClusterScanProfileController interface { 28 | generic.NonNamespacedControllerInterface[*v1.ClusterScanProfile, *v1.ClusterScanProfileList] 29 | } 30 | 31 | // ClusterScanProfileClient interface for managing ClusterScanProfile resources in Kubernetes. 32 | type ClusterScanProfileClient interface { 33 | generic.NonNamespacedClientInterface[*v1.ClusterScanProfile, *v1.ClusterScanProfileList] 34 | } 35 | 36 | // ClusterScanProfileCache interface for retrieving ClusterScanProfile resources in memory. 37 | type ClusterScanProfileCache interface { 38 | generic.NonNamespacedCacheInterface[*v1.ClusterScanProfile] 39 | } 40 | -------------------------------------------------------------------------------- /pkg/generated/controllers/cis.cattle.io/v1/clusterscanreport.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2025 Rancher Labs, Inc. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package v1 20 | 21 | import ( 22 | v1 "github.com/rancher/cis-operator/pkg/apis/cis.cattle.io/v1" 23 | "github.com/rancher/wrangler/v3/pkg/generic" 24 | ) 25 | 26 | // ClusterScanReportController interface for managing ClusterScanReport resources. 27 | type ClusterScanReportController interface { 28 | generic.NonNamespacedControllerInterface[*v1.ClusterScanReport, *v1.ClusterScanReportList] 29 | } 30 | 31 | // ClusterScanReportClient interface for managing ClusterScanReport resources in Kubernetes. 32 | type ClusterScanReportClient interface { 33 | generic.NonNamespacedClientInterface[*v1.ClusterScanReport, *v1.ClusterScanReportList] 34 | } 35 | 36 | // ClusterScanReportCache interface for retrieving ClusterScanReport resources in memory. 37 | type ClusterScanReportCache interface { 38 | generic.NonNamespacedCacheInterface[*v1.ClusterScanReport] 39 | } 40 | -------------------------------------------------------------------------------- /pkg/generated/controllers/cis.cattle.io/v1/interface.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2025 Rancher Labs, Inc. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package v1 20 | 21 | import ( 22 | v1 "github.com/rancher/cis-operator/pkg/apis/cis.cattle.io/v1" 23 | "github.com/rancher/lasso/pkg/controller" 24 | "github.com/rancher/wrangler/v3/pkg/generic" 25 | "github.com/rancher/wrangler/v3/pkg/schemes" 26 | "k8s.io/apimachinery/pkg/runtime/schema" 27 | ) 28 | 29 | func init() { 30 | schemes.Register(v1.AddToScheme) 31 | } 32 | 33 | type Interface interface { 34 | ClusterScan() ClusterScanController 35 | ClusterScanBenchmark() ClusterScanBenchmarkController 36 | ClusterScanProfile() ClusterScanProfileController 37 | ClusterScanReport() ClusterScanReportController 38 | } 39 | 40 | func New(controllerFactory controller.SharedControllerFactory) Interface { 41 | return &version{ 42 | controllerFactory: controllerFactory, 43 | } 44 | } 45 | 46 | type version struct { 47 | controllerFactory controller.SharedControllerFactory 48 | } 49 | 50 | func (v *version) ClusterScan() ClusterScanController { 51 | return generic.NewNonNamespacedController[*v1.ClusterScan, *v1.ClusterScanList](schema.GroupVersionKind{Group: "cis.cattle.io", Version: "v1", Kind: "ClusterScan"}, "clusterscans", v.controllerFactory) 52 | } 53 | 54 | func (v *version) ClusterScanBenchmark() ClusterScanBenchmarkController { 55 | return generic.NewNonNamespacedController[*v1.ClusterScanBenchmark, *v1.ClusterScanBenchmarkList](schema.GroupVersionKind{Group: "cis.cattle.io", Version: "v1", Kind: "ClusterScanBenchmark"}, "clusterscanbenchmarks", v.controllerFactory) 56 | } 57 | 58 | func (v *version) ClusterScanProfile() ClusterScanProfileController { 59 | return generic.NewNonNamespacedController[*v1.ClusterScanProfile, *v1.ClusterScanProfileList](schema.GroupVersionKind{Group: "cis.cattle.io", Version: "v1", Kind: "ClusterScanProfile"}, "clusterscanprofiles", v.controllerFactory) 60 | } 61 | 62 | func (v *version) ClusterScanReport() ClusterScanReportController { 63 | return generic.NewNonNamespacedController[*v1.ClusterScanReport, *v1.ClusterScanReportList](schema.GroupVersionKind{Group: "cis.cattle.io", Version: "v1", Kind: "ClusterScanReport"}, "clusterscanreports", v.controllerFactory) 64 | } 65 | -------------------------------------------------------------------------------- /pkg/securityscan/alert/prometheusrule.go: -------------------------------------------------------------------------------- 1 | package alert 2 | 3 | import ( 4 | "bytes" 5 | _ "embed" // nolint 6 | "fmt" 7 | "text/template" 8 | 9 | meta1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | 11 | monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" 12 | k8Yaml "k8s.io/apimachinery/pkg/util/yaml" 13 | 14 | cisoperatorapiv1 "github.com/rancher/cis-operator/pkg/apis/cis.cattle.io/v1" 15 | "github.com/rancher/wrangler/v3/pkg/name" 16 | ) 17 | 18 | //go:embed templates/prometheusrule.template 19 | var prometheusRuleTemplate string 20 | 21 | const templateName = "prometheusrule.template" 22 | 23 | func NewPrometheusRule(clusterscan *cisoperatorapiv1.ClusterScan, clusterscanprofile *cisoperatorapiv1.ClusterScanProfile, imageConfig *cisoperatorapiv1.ScanImageConfig) (*monitoringv1.PrometheusRule, error) { 24 | configdata := map[string]interface{}{ 25 | "namespace": cisoperatorapiv1.ClusterScanNS, 26 | "name": name.SafeConcatName("rancher-cis-alerts", clusterscan.Name), 27 | "severity": imageConfig.AlertSeverity, 28 | "scanName": clusterscan.Name, 29 | "scanProfileName": clusterscanprofile.Name, 30 | "alertOnFailure": clusterscan.Spec.ScheduledScanConfig.ScanAlertRule.AlertOnFailure, 31 | "alertOnComplete": clusterscan.Spec.ScheduledScanConfig.ScanAlertRule.AlertOnComplete, 32 | "failOnWarn": clusterscan.Spec.ScoreWarning == cisoperatorapiv1.ClusterScanFailOnWarning, 33 | } 34 | scanAlertRule, err := generatePrometheusRule(clusterscan, configdata) 35 | if err != nil { 36 | return scanAlertRule, err 37 | } 38 | 39 | return scanAlertRule, nil 40 | } 41 | 42 | func generatePrometheusRule(clusterscan *cisoperatorapiv1.ClusterScan, data map[string]interface{}) (*monitoringv1.PrometheusRule, error) { 43 | scanAlertRule := &monitoringv1.PrometheusRule{} 44 | obj, err := parseTemplate(clusterscan, data) 45 | if err != nil { 46 | return nil, fmt.Errorf("Error parsing the template %w", err) 47 | } 48 | 49 | if err := obj.Decode(&scanAlertRule); err != nil { 50 | return nil, fmt.Errorf("Error decoding to template %w", err) 51 | } 52 | 53 | ownerRef := meta1.OwnerReference{ 54 | APIVersion: "cis.cattle.io/v1", 55 | Kind: "ClusterScan", 56 | Name: clusterscan.Name, 57 | UID: clusterscan.GetUID(), 58 | } 59 | scanAlertRule.ObjectMeta.OwnerReferences = append(scanAlertRule.ObjectMeta.OwnerReferences, ownerRef) 60 | 61 | return scanAlertRule, nil 62 | } 63 | 64 | func parseTemplate(_ *cisoperatorapiv1.ClusterScan, data map[string]interface{}) (*k8Yaml.YAMLOrJSONDecoder, error) { 65 | cmTemplate, err := template.New(templateName).Parse(prometheusRuleTemplate) 66 | if err != nil { 67 | return nil, err 68 | } 69 | 70 | var b bytes.Buffer 71 | err = cmTemplate.Execute(&b, data) 72 | if err != nil { 73 | return nil, err 74 | } 75 | 76 | return k8Yaml.NewYAMLOrJSONDecoder(bytes.NewReader([]byte(b.String())), 1000), nil 77 | } 78 | -------------------------------------------------------------------------------- /pkg/securityscan/alert/templates/prometheusrule.template: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PrometheusRule 3 | metadata: 4 | name: {{ .name }} 5 | namespace: {{ .namespace }} 6 | labels: 7 | app: rancher-monitoring 8 | spec: 9 | groups: 10 | - name: rancher-cis-scan-exporter 11 | rules: 12 | {{- if .alertOnFailure }} 13 | - alert: CISScanHasFailures 14 | annotations: 15 | description: CIS ClusterScan "{{ .scanName }}" has {{ "{{ $value }}" }} test failures or warnings 16 | summary: CIS ClusterScan has tests failures 17 | {{- if .failOnWarn }} 18 | expr: cis_scan_num_tests_fail{scan_name="{{ .scanName }}"} > 0 or ON(scan_name) cis_scan_num_tests_warn{scan_name="{{ .scanName }}"} > 0 19 | {{- else }} 20 | expr: cis_scan_num_tests_fail{scan_name="{{ .scanName }}"} > 0 21 | {{- end }} 22 | for: 1m 23 | labels: 24 | severity: {{ .severity }} 25 | job: rancher-cis-scan 26 | {{- end }} 27 | {{- if .alertOnComplete }} 28 | - alert: CISScanHasCompleted 29 | annotations: 30 | description: CIS ClusterScan "{{ .scanName }}" with Cluster Scan profile "{{ .scanProfileName }}" has completed. 31 | summary: CIS ClusterScan has completed 32 | expr: increase(cis_scan_num_scans_complete{scan_name="{{ .scanName }}"}[5m]) > 0 33 | for: 1m 34 | labels: 35 | severity: {{ .severity }} 36 | job: rancher-cis-scan 37 | {{- end }} 38 | -------------------------------------------------------------------------------- /pkg/securityscan/controller.go: -------------------------------------------------------------------------------- 1 | package securityscan 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strings" 7 | "time" 8 | 9 | v1monitoringclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1" 10 | "github.com/sirupsen/logrus" 11 | "k8s.io/client-go/kubernetes" 12 | "k8s.io/client-go/rest" 13 | 14 | detector "github.com/rancher/kubernetes-provider-detector" 15 | "github.com/rancher/wrangler/v3/pkg/apply" 16 | "github.com/rancher/wrangler/v3/pkg/crd" 17 | appsctl "github.com/rancher/wrangler/v3/pkg/generated/controllers/apps" 18 | appsctlv1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/apps/v1" 19 | batchctl "github.com/rancher/wrangler/v3/pkg/generated/controllers/batch" 20 | batchctlv1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/batch/v1" 21 | corectl "github.com/rancher/wrangler/v3/pkg/generated/controllers/core" 22 | corectlv1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1" 23 | "github.com/rancher/wrangler/v3/pkg/start" 24 | 25 | "sync" 26 | 27 | "github.com/prometheus/client_golang/prometheus" 28 | 29 | cisoperatorapiv1 "github.com/rancher/cis-operator/pkg/apis/cis.cattle.io/v1" 30 | cisoperatorctl "github.com/rancher/cis-operator/pkg/generated/controllers/cis.cattle.io" 31 | cisoperatorctlv1 "github.com/rancher/cis-operator/pkg/generated/controllers/cis.cattle.io/v1" 32 | "github.com/rancher/cis-operator/pkg/securityscan/scan" 33 | corev1 "k8s.io/api/core/v1" 34 | ) 35 | 36 | type Controller struct { 37 | Namespace string 38 | Name string 39 | ClusterProvider string 40 | KubernetesVersion string 41 | ImageConfig *cisoperatorapiv1.ScanImageConfig 42 | 43 | kcs *kubernetes.Clientset 44 | cfg *rest.Config 45 | coreFactory *corectl.Factory 46 | batchFactory *batchctl.Factory 47 | appsFactory *appsctl.Factory 48 | cisFactory *cisoperatorctl.Factory 49 | apply apply.Apply 50 | monitoringClient v1monitoringclient.MonitoringV1Interface 51 | 52 | mu *sync.Mutex 53 | currentScanName string 54 | 55 | numTestsFailed *prometheus.GaugeVec 56 | numScansComplete *prometheus.CounterVec 57 | numTestsSkipped *prometheus.GaugeVec 58 | numTestsTotal *prometheus.GaugeVec 59 | numTestsNA *prometheus.GaugeVec 60 | numTestsPassed *prometheus.GaugeVec 61 | numTestsWarn *prometheus.GaugeVec 62 | 63 | scans cisoperatorctlv1.ClusterScanController 64 | jobs batchctlv1.JobController 65 | configmaps corectlv1.ConfigMapController 66 | configMapCache corectlv1.ConfigMapCache 67 | services corectlv1.ServiceController 68 | pods corectlv1.PodController 69 | podCache corectlv1.PodCache 70 | daemonsets appsctlv1.DaemonSetController 71 | daemonsetCache appsctlv1.DaemonSetCache 72 | securityScanJobTolerations []corev1.Toleration 73 | } 74 | 75 | func NewController(ctx context.Context, cfg *rest.Config, namespace, name string, 76 | imgConfig *cisoperatorapiv1.ScanImageConfig, securityScanJobTolerations []corev1.Toleration) (ctl *Controller, err error) { 77 | if cfg == nil { 78 | cfg, err = rest.InClusterConfig() 79 | if err != nil { 80 | return nil, err 81 | } 82 | } 83 | ctl = &Controller{ 84 | Namespace: namespace, 85 | Name: name, 86 | ImageConfig: imgConfig, 87 | mu: &sync.Mutex{}, 88 | } 89 | 90 | ctl.kcs, err = kubernetes.NewForConfig(cfg) 91 | if err != nil { 92 | return nil, err 93 | } 94 | 95 | ctl.cfg = cfg 96 | 97 | clientset, err := kubernetes.NewForConfig(cfg) 98 | if err != nil { 99 | return nil, err 100 | } 101 | ctl.ClusterProvider, err = detectClusterProvider(ctx, clientset) 102 | if err != nil { 103 | return nil, err 104 | } 105 | logrus.Infof("ClusterProvider detected %v", ctl.ClusterProvider) 106 | 107 | ctl.KubernetesVersion, err = detectKubernetesVersion(ctx, clientset) 108 | if err != nil { 109 | return nil, err 110 | } 111 | logrus.Infof("KubernetesVersion detected %v", ctl.KubernetesVersion) 112 | 113 | ctl.apply, err = apply.NewForConfig(cfg) 114 | if err != nil { 115 | return nil, err 116 | } 117 | ctl.cisFactory, err = cisoperatorctl.NewFactoryFromConfig(cfg) 118 | if err != nil { 119 | return nil, fmt.Errorf("Error building securityscan NewFactoryFromConfig: %w", err) 120 | } 121 | 122 | ctl.batchFactory, err = batchctl.NewFactoryFromConfig(cfg) 123 | if err != nil { 124 | return nil, fmt.Errorf("Error building batch NewFactoryFromConfig: %w", err) 125 | } 126 | 127 | ctl.coreFactory, err = corectl.NewFactoryFromConfig(cfg) 128 | if err != nil { 129 | return nil, fmt.Errorf("Error building core NewFactoryFromConfig: %w", err) 130 | } 131 | 132 | ctl.appsFactory, err = appsctl.NewFactoryFromConfig(cfg) 133 | if err != nil { 134 | return nil, fmt.Errorf("Error building apps NewFactoryFromConfig: %w", err) 135 | } 136 | 137 | ctl.monitoringClient, err = v1monitoringclient.NewForConfig(cfg) 138 | if err != nil { 139 | return nil, fmt.Errorf("Error building v1 monitoring client from config: %w", err) 140 | } 141 | 142 | err = initializeMetrics(ctl) 143 | if err != nil { 144 | return nil, fmt.Errorf("Error registering CIS Metrics: %w", err) 145 | } 146 | 147 | ctl.scans = ctl.cisFactory.Cis().V1().ClusterScan() 148 | ctl.jobs = ctl.batchFactory.Batch().V1().Job() 149 | ctl.configmaps = ctl.coreFactory.Core().V1().ConfigMap() 150 | ctl.configMapCache = ctl.coreFactory.Core().V1().ConfigMap().Cache() 151 | ctl.services = ctl.coreFactory.Core().V1().Service() 152 | ctl.pods = ctl.coreFactory.Core().V1().Pod() 153 | ctl.podCache = ctl.coreFactory.Core().V1().Pod().Cache() 154 | ctl.daemonsets = ctl.appsFactory.Apps().V1().DaemonSet() 155 | ctl.daemonsetCache = ctl.appsFactory.Apps().V1().DaemonSet().Cache() 156 | ctl.securityScanJobTolerations = securityScanJobTolerations 157 | return ctl, nil 158 | } 159 | 160 | func (c *Controller) Start(ctx context.Context, threads int, _ time.Duration) error { 161 | // register our handlers 162 | if err := c.handleJobs(ctx); err != nil { 163 | return err 164 | } 165 | if err := c.handlePods(ctx); err != nil { 166 | return err 167 | } 168 | if err := c.handleClusterScans(ctx); err != nil { 169 | return err 170 | } 171 | if err := c.handleScheduledClusterScans(ctx); err != nil { 172 | return err 173 | } 174 | if err := c.handleClusterScanMetrics(ctx); err != nil { 175 | return err 176 | } 177 | return start.All(ctx, threads, c.cisFactory, c.coreFactory, c.batchFactory) 178 | } 179 | 180 | func (c *Controller) registerCRD(ctx context.Context) error { 181 | factory, err := crd.NewFactoryFromClient(c.cfg) 182 | if err != nil { 183 | return err 184 | } 185 | 186 | var crds []crd.CRD 187 | for _, crdFn := range []func() (*crd.CRD, error){ 188 | scan.ClusterScanCRD, 189 | } { 190 | crdef, err := crdFn() 191 | if err != nil { 192 | return err 193 | } 194 | crds = append(crds, *crdef) 195 | } 196 | return factory.BatchCreateCRDs(ctx, crds...).BatchWait() 197 | } 198 | 199 | func (c *Controller) refreshClusterKubernetesVersion(ctx context.Context) error { 200 | clusterK8sVersion, err := detectKubernetesVersion(ctx, c.kcs) 201 | if err != nil { 202 | return err 203 | } 204 | if !strings.EqualFold(clusterK8sVersion, c.KubernetesVersion) { 205 | c.KubernetesVersion = clusterK8sVersion 206 | logrus.Infof("New KubernetesVersion detected %v", c.KubernetesVersion) 207 | } 208 | return nil 209 | } 210 | 211 | func detectClusterProvider(ctx context.Context, k8sClient kubernetes.Interface) (string, error) { 212 | provider, err := detector.DetectProvider(ctx, k8sClient) 213 | if err != nil { 214 | return "", err 215 | } 216 | return provider, err 217 | } 218 | 219 | func detectKubernetesVersion(_ context.Context, k8sClient kubernetes.Interface) (string, error) { 220 | v, err := k8sClient.Discovery().ServerVersion() 221 | if err != nil { 222 | return "", err 223 | } 224 | return v.GitVersion, nil 225 | } 226 | 227 | func initializeMetrics(ctl *Controller) error { 228 | ctl.numTestsFailed = prometheus.NewGaugeVec( 229 | prometheus.GaugeOpts{ 230 | Name: "cis_scan_num_tests_fail", 231 | Help: "Number of test failed in the CIS scans, partioned by scan_name, scan_profile_name", 232 | }, 233 | []string{ 234 | // scan_name will be set to "manual" for on-demand manual scans and the actual name set for the scheduled scans 235 | "scan_name", 236 | // name of the clusterScanProfile used for scanning 237 | "scan_profile_name", 238 | "cluster_name", 239 | }, 240 | ) 241 | if err := prometheus.Register(ctl.numTestsFailed); err != nil { 242 | return err 243 | } 244 | 245 | ctl.numScansComplete = prometheus.NewCounterVec( 246 | prometheus.CounterOpts{ 247 | Name: "cis_scan_num_scans_complete", 248 | Help: "Number of CIS clusterscans completed, partioned by scan_name, scan_profile_name", 249 | }, 250 | []string{ 251 | // scan_name will be set to "manual" for on-demand manual scans and the actual name set for the scheduled scans 252 | "scan_name", 253 | // name of the clusterScanProfile used for scanning 254 | "scan_profile_name", 255 | "cluster_name", 256 | }, 257 | ) 258 | if err := prometheus.Register(ctl.numScansComplete); err != nil { 259 | return err 260 | } 261 | 262 | ctl.numTestsTotal = prometheus.NewGaugeVec( 263 | prometheus.GaugeOpts{ 264 | Name: "cis_scan_num_tests_total", 265 | Help: "Total Number of tests run in the CIS scans, partioned by scan_name, scan_profile_name", 266 | }, 267 | []string{ 268 | // scan_name will be set to "manual" for on-demand manual scans and the actual name set for the scheduled scans 269 | "scan_name", 270 | // name of the clusterScanProfile used for scanning 271 | "scan_profile_name", 272 | "cluster_name", 273 | }, 274 | ) 275 | if err := prometheus.Register(ctl.numTestsTotal); err != nil { 276 | return err 277 | } 278 | 279 | ctl.numTestsPassed = prometheus.NewGaugeVec( 280 | prometheus.GaugeOpts{ 281 | Name: "cis_scan_num_tests_pass", 282 | Help: "Number of tests passing in the CIS scans, partioned by scan_name, scan_profile_name", 283 | }, 284 | []string{ 285 | // scan_name will be set to "manual" for on-demand manual scans and the actual name set for the scheduled scans 286 | "scan_name", 287 | // name of the clusterScanProfile used for scanning 288 | "scan_profile_name", 289 | "cluster_name", 290 | }, 291 | ) 292 | if err := prometheus.Register(ctl.numTestsPassed); err != nil { 293 | return err 294 | } 295 | 296 | ctl.numTestsSkipped = prometheus.NewGaugeVec( 297 | prometheus.GaugeOpts{ 298 | Name: "cis_scan_num_tests_skipped", 299 | Help: "Number of test skipped in the CIS scans, partioned by scan_name, scan_profile_name", 300 | }, 301 | []string{ 302 | // scan_name will be set to "manual" for on-demand manual scans and the actual name set for the scheduled scans 303 | "scan_name", 304 | // name of the clusterScanProfile used for scanning 305 | "scan_profile_name", 306 | "cluster_name", 307 | }, 308 | ) 309 | if err := prometheus.Register(ctl.numTestsSkipped); err != nil { 310 | return err 311 | } 312 | 313 | ctl.numTestsNA = prometheus.NewGaugeVec( 314 | prometheus.GaugeOpts{ 315 | Name: "cis_scan_num_tests_na", 316 | Help: "Number of tests not applicable in the CIS scans, partioned by scan_name, scan_profile_name", 317 | }, 318 | []string{ 319 | // scan_name will be set to "manual" for on-demand manual scans and the actual name set for the scheduled scans 320 | "scan_name", 321 | // name of the clusterScanProfile used for scanning 322 | "scan_profile_name", 323 | "cluster_name", 324 | }, 325 | ) 326 | if err := prometheus.Register(ctl.numTestsNA); err != nil { 327 | return err 328 | } 329 | 330 | ctl.numTestsWarn = prometheus.NewGaugeVec( 331 | prometheus.GaugeOpts{ 332 | Name: "cis_scan_num_tests_warn", 333 | Help: "Number of tests having warn status in the CIS scans, partioned by scan_name, scan_profile_name", 334 | }, 335 | []string{ 336 | // scan_name will be set to "manual" for on-demand manual scans and the actual name set for the scheduled scans 337 | "scan_name", 338 | // name of the clusterScanProfile used for scanning 339 | "scan_profile_name", 340 | "cluster_name", 341 | }, 342 | ) 343 | if err := prometheus.Register(ctl.numTestsWarn); err != nil { 344 | return err 345 | } 346 | 347 | return nil 348 | } 349 | -------------------------------------------------------------------------------- /pkg/securityscan/core/configmap.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "bytes" 5 | _ "embed" // nolint 6 | "encoding/json" 7 | "text/template" 8 | 9 | corev1 "k8s.io/api/core/v1" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | k8Yaml "k8s.io/apimachinery/pkg/util/yaml" 12 | 13 | wcorev1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1" 14 | "github.com/rancher/wrangler/v3/pkg/name" 15 | 16 | cisoperatorapiv1 "github.com/rancher/cis-operator/pkg/apis/cis.cattle.io/v1" 17 | ) 18 | 19 | //go:embed templates/pluginConfig.template 20 | var pluginConfigTemplate string 21 | 22 | //go:embed templates/cisscanConfig.template 23 | var cisscanConfigTemplate string 24 | 25 | type OverrideSkipInfoData struct { 26 | Skip map[string][]string `json:"skip"` 27 | } 28 | 29 | const ( 30 | CurrentBenchmarkKey = "current" 31 | ConfigFileName = "config.json" 32 | ) 33 | 34 | func NewConfigMaps(clusterscan *cisoperatorapiv1.ClusterScan, clusterscanprofile *cisoperatorapiv1.ClusterScanProfile, clusterscanbenchmark *cisoperatorapiv1.ClusterScanBenchmark, _ string, imageConfig *cisoperatorapiv1.ScanImageConfig, configmapsClient wcorev1.ConfigMapController) (cmMap map[string]*corev1.ConfigMap, err error) { 35 | cmMap = make(map[string]*corev1.ConfigMap) 36 | 37 | configdata := map[string]interface{}{ 38 | "namespace": cisoperatorapiv1.ClusterScanNS, 39 | "name": name.SafeConcatName(cisoperatorapiv1.ClusterScanConfigMap, clusterscan.Name), 40 | "runName": name.SafeConcatName("security-scan-runner", clusterscan.Name), 41 | "appName": "rancher-cis-benchmark", 42 | "advertiseAddress": cisoperatorapiv1.ClusterScanService, 43 | "sonobuoyImage": imageConfig.SonobuoyImage + ":" + imageConfig.SonobuoyImageTag, 44 | "sonobuoyVersion": imageConfig.SonobuoyImageTag, 45 | } 46 | configcm, err := generateConfigMap(clusterscan, "cisscanConfig.template", cisscanConfigTemplate, configdata) 47 | if err != nil { 48 | return cmMap, err 49 | } 50 | cmMap["configcm"] = configcm 51 | 52 | var isCustomBenchmark bool 53 | customBenchmarkConfigMapName := "" 54 | customBenchmarkConfigMapData := make(map[string]string) 55 | if clusterscanbenchmark.Spec.CustomBenchmarkConfigMapName != "" { 56 | isCustomBenchmark = true 57 | customcm, err := getCustomBenchmarkConfigMap(clusterscanbenchmark, clusterscan, configmapsClient) 58 | if err != nil { 59 | return cmMap, err 60 | } 61 | customBenchmarkConfigMapData = customcm.Data 62 | customBenchmarkConfigMapName = customcm.Name 63 | } 64 | 65 | plugindata := map[string]interface{}{ 66 | "namespace": cisoperatorapiv1.ClusterScanNS, 67 | "name": name.SafeConcatName(cisoperatorapiv1.ClusterScanPluginsConfigMap, clusterscan.Name), 68 | "runName": name.SafeConcatName("security-scan-runner", clusterscan.Name), 69 | "appName": "rancher-cis-benchmark", 70 | "serviceaccount": cisoperatorapiv1.ClusterScanSA, 71 | "securityScanImage": imageConfig.SecurityScanImage + ":" + imageConfig.SecurityScanImageTag, 72 | "benchmarkVersion": clusterscanprofile.Spec.BenchmarkVersion, 73 | "isCustomBenchmark": isCustomBenchmark, 74 | "configDir": cisoperatorapiv1.CustomBenchmarkBaseDir, 75 | "customBenchmarkConfigMapName": customBenchmarkConfigMapName, 76 | "customBenchmarkConfigMapData": customBenchmarkConfigMapData, 77 | } 78 | plugincm, err := generateConfigMap(clusterscan, "pluginConfig.template", pluginConfigTemplate, plugindata) 79 | if err != nil { 80 | return cmMap, err 81 | } 82 | cmMap["plugincm"] = plugincm 83 | 84 | var skipConfigcm *corev1.ConfigMap 85 | if clusterscanprofile.Spec.SkipTests != nil && len(clusterscanprofile.Spec.SkipTests) > 0 { 86 | //create user skip config map as well 87 | // create the cm 88 | skipDataBytes, err := getOverrideSkipInfoData(clusterscanprofile.Spec.SkipTests) 89 | if err != nil { 90 | return cmMap, err 91 | } 92 | skipConfigcm = getConfigMapObject(getOverrideConfigMapName(clusterscan), string(skipDataBytes)) 93 | cmMap["skipConfigcm"] = skipConfigcm 94 | } 95 | 96 | return cmMap, nil 97 | } 98 | 99 | func generateConfigMap(clusterscan *cisoperatorapiv1.ClusterScan, name string, text string, data map[string]interface{}) (*corev1.ConfigMap, error) { 100 | configcm := &corev1.ConfigMap{} 101 | 102 | obj, err := parseTemplate(clusterscan, name, text, data) 103 | if err != nil { 104 | return nil, err 105 | } 106 | 107 | if err := obj.Decode(&configcm); err != nil { 108 | return nil, err 109 | } 110 | return configcm, nil 111 | } 112 | 113 | func parseTemplate(_ *cisoperatorapiv1.ClusterScan, name string, text string, data map[string]interface{}) (*k8Yaml.YAMLOrJSONDecoder, error) { 114 | cmTemplate, err := template.New(name).Parse(text) 115 | if err != nil { 116 | return nil, err 117 | } 118 | 119 | var b bytes.Buffer 120 | err = cmTemplate.Execute(&b, data) 121 | if err != nil { 122 | return nil, err 123 | } 124 | 125 | return k8Yaml.NewYAMLOrJSONDecoder(bytes.NewReader([]byte(b.String())), 1000), nil 126 | } 127 | 128 | func getOverrideConfigMapName(cs *cisoperatorapiv1.ClusterScan) string { 129 | return name.SafeConcatName(cisoperatorapiv1.ClusterScanUserSkipConfigMap, cs.Name) 130 | } 131 | 132 | func getOverrideSkipInfoData(skip []string) ([]byte, error) { 133 | s := OverrideSkipInfoData{Skip: map[string][]string{CurrentBenchmarkKey: skip}} 134 | return json.Marshal(s) 135 | } 136 | 137 | func getConfigMapObject(cmName, data string) *corev1.ConfigMap { 138 | return &corev1.ConfigMap{ 139 | TypeMeta: metav1.TypeMeta{ 140 | APIVersion: "v1", 141 | Kind: "ConfigMap", 142 | }, 143 | ObjectMeta: metav1.ObjectMeta{ 144 | Name: cmName, 145 | Namespace: cisoperatorapiv1.ClusterScanNS, 146 | }, 147 | Data: map[string]string{ 148 | ConfigFileName: data, 149 | }, 150 | } 151 | } 152 | 153 | func getCustomBenchmarkConfigMap(benchmark *cisoperatorapiv1.ClusterScanBenchmark, clusterscan *cisoperatorapiv1.ClusterScan, configmapsClient wcorev1.ConfigMapController) (*corev1.ConfigMap, error) { 154 | if benchmark.Spec.CustomBenchmarkConfigMapName == "" { 155 | return nil, nil 156 | } 157 | userConfigmap, err := configmapsClient.Get(benchmark.Spec.CustomBenchmarkConfigMapNamespace, benchmark.Spec.CustomBenchmarkConfigMapName, metav1.GetOptions{}) 158 | if err != nil { 159 | return nil, err 160 | } 161 | if benchmark.Spec.CustomBenchmarkConfigMapNamespace == cisoperatorapiv1.ClusterScanNS { 162 | return userConfigmap, nil 163 | } 164 | //copy the configmap to ClusterScanNS so that cis scan pod can find it for volume mount 165 | //this will be cleaned up after scan job finishes 166 | configmapCopy := corev1.ConfigMap{ 167 | TypeMeta: metav1.TypeMeta{ 168 | APIVersion: "v1", 169 | Kind: "ConfigMap", 170 | }, 171 | ObjectMeta: metav1.ObjectMeta{ 172 | Name: name.SafeConcatName(cisoperatorapiv1.CustomBenchmarkConfigMap, clusterscan.Name), 173 | Namespace: cisoperatorapiv1.ClusterScanNS, 174 | }, 175 | Data: userConfigmap.Data, 176 | } 177 | return configmapsClient.Create(&configmapCopy) 178 | } 179 | -------------------------------------------------------------------------------- /pkg/securityscan/core/service.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | _ "embed" // nolint 5 | 6 | "github.com/rancher/wrangler/v3/pkg/name" 7 | corev1 "k8s.io/api/core/v1" 8 | 9 | cisoperatorapiv1 "github.com/rancher/cis-operator/pkg/apis/cis.cattle.io/v1" 10 | ) 11 | 12 | //go:embed templates/service.template 13 | var serviceTemplate string 14 | 15 | func NewService(clusterscan *cisoperatorapiv1.ClusterScan, _ *cisoperatorapiv1.ClusterScanProfile, _ string) (service *corev1.Service, err error) { 16 | 17 | servicedata := map[string]interface{}{ 18 | "namespace": cisoperatorapiv1.ClusterScanNS, 19 | "name": cisoperatorapiv1.ClusterScanService, 20 | "runName": name.SafeConcatName("security-scan-runner", clusterscan.Name), 21 | "appName": "rancher-cis-benchmark", 22 | } 23 | service, err = generateService(clusterscan, "service.template", serviceTemplate, servicedata) 24 | if err != nil { 25 | return nil, err 26 | } 27 | return service, nil 28 | } 29 | 30 | func generateService(clusterscan *cisoperatorapiv1.ClusterScan, templateName string, templContent string, data map[string]interface{}) (*corev1.Service, error) { 31 | service := &corev1.Service{} 32 | 33 | obj, err := parseTemplate(clusterscan, templateName, templContent, data) 34 | if err != nil { 35 | return nil, err 36 | } 37 | 38 | if err := obj.Decode(&service); err != nil { 39 | return nil, err 40 | } 41 | return service, nil 42 | } 43 | -------------------------------------------------------------------------------- /pkg/securityscan/core/templates/cisscanConfig.template: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | namespace: {{.namespace}} 5 | labels: 6 | app.kubernetes.io/name: {{.appName}} 7 | app.kubernetes.io/instance: {{.runName}} 8 | name: {{.name}} 9 | data: 10 | config.json: | 11 | { 12 | "Description": "kube-bench plugin for CIS benchmarks", 13 | "Filters": { 14 | "LabelSelector": "", 15 | "Namespaces": "[^\\w-.]+" 16 | }, 17 | "PluginNamespace": "{{.namespace}}", 18 | "Plugins": [ 19 | { 20 | "name": "rancher-kube-bench" 21 | } 22 | ], 23 | "PluginSearchPath": [ 24 | "/plugins.d" 25 | ], 26 | "Resources": [], 27 | "ResultsDir": "/tmp/sonobuoy", 28 | "Server": { 29 | "advertiseaddress": "{{.advertiseAddress}}", 30 | "bindaddress": "0.0.0.0", 31 | "bindport": 443, 32 | "timeoutseconds": 5400 33 | }, 34 | "Namespace": "{{.namespace}}", 35 | "WorkerImage": "{{.sonobuoyImage}}", 36 | "Version": "{{.sonobuoyVersion}}" 37 | } -------------------------------------------------------------------------------- /pkg/securityscan/core/templates/pluginConfig.template: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | namespace: {{.namespace}} 5 | labels: 6 | app.kubernetes.io/name: {{.appName}} 7 | app.kubernetes.io/instance: {{.runName}} 8 | name: {{.name}} 9 | data: 10 | rancher-kube-bench.yaml: | 11 | podSpec: 12 | containers: [] 13 | dnsPolicy: ClusterFirstWithHostNet 14 | hostIPC: true 15 | hostNetwork: true 16 | hostPID: true 17 | serviceAccountName: {{ .serviceaccount }} 18 | tolerations: 19 | - effect: NoSchedule 20 | key: node-role.kubernetes.io/controlplane 21 | operator: Exists 22 | - effect: NoSchedule 23 | key: node-role.kubernetes.io/control-plane 24 | operator: Exists 25 | - effect: NoExecute 26 | key: node-role.kubernetes.io/etcd 27 | operator: Exists 28 | - effect: NoExecute 29 | key: CriticalAddonsOnly 30 | operator: Exists 31 | volumes: 32 | - hostPath: 33 | path: / 34 | name: root 35 | - hostPath: 36 | path: /etc/passwd 37 | name: etc-passwd 38 | - hostPath: 39 | path: /etc/group 40 | name: etc-group 41 | - hostPath: 42 | path: /var/lib/rancher 43 | name: var-rancher 44 | - hostPath: 45 | path: /etc/rancher 46 | name: etc-rancher 47 | - hostPath: 48 | path: /etc/cni/net.d 49 | name: etc-cni 50 | - hostPath: 51 | path: /var/lib/cni 52 | name: var-cni 53 | - hostPath: 54 | path: /var/log 55 | name: var-log 56 | - hostPath: 57 | path: /run/log 58 | name: run-log 59 | - hostPath: 60 | path: /etc/kubernetes/kubelet 61 | name: etc-kubelet 62 | - hostPath: 63 | path: /var/lib/kubelet 64 | name: var-kubelet 65 | {{- if .isCustomBenchmark }} 66 | - configMap: 67 | defaultMode: 420 68 | items: 69 | {{- range $key, $value := .customBenchmarkConfigMapData }} 70 | {{- if eq $key "config.yaml"}} 71 | - key: {{ $key }} 72 | path: {{ $key }} 73 | {{- else}} 74 | - key: {{ $key }} 75 | path: {{ $.benchmarkVersion }}/{{ $key }} 76 | {{- end }} 77 | {{- end }} 78 | name: {{ .customBenchmarkConfigMapName }} 79 | name: custom-benchmark-volume 80 | {{- end }} 81 | sonobuoy-config: 82 | driver: DaemonSet 83 | plugin-name: rancher-kube-bench 84 | result-type: rancher-kube-bench 85 | result-format: raw 86 | spec: 87 | name: rancher-kube-bench 88 | image: {{ .securityScanImage }} 89 | command: ["/bin/bash", "-c", "run_sonobuoy_plugin.sh && sleep 3600"] 90 | env: 91 | - name: SONOBUOY_NS 92 | value: {{ .namespace }} 93 | - name: NODE_NAME 94 | valueFrom: 95 | fieldRef: 96 | fieldPath: spec.nodeName 97 | - name: RESULTS_DIR 98 | value: /tmp/results 99 | - name: CHROOT_DIR 100 | value: /node 101 | - name: OVERRIDE_BENCHMARK_VERSION 102 | value: {{ .benchmarkVersion }} 103 | {{- if .isCustomBenchmark }} 104 | - name: CONFIG_DIR 105 | value: {{ .configDir }} 106 | {{- end }} 107 | imagePullPolicy: IfNotPresent 108 | securityContext: 109 | privileged: true 110 | volumeMounts: 111 | - mountPath: /tmp/results 112 | name: results 113 | readOnly: false 114 | - mountPath: /node 115 | name: root 116 | readOnly: true 117 | - mountPath: /etc/passwd 118 | name: etc-passwd 119 | readOnly: true 120 | - mountPath: /etc/group 121 | name: etc-group 122 | readOnly: true 123 | - mountPath: /var/lib/rancher 124 | name: var-rancher 125 | readOnly: true 126 | - mountPath: /etc/rancher 127 | name: etc-rancher 128 | readOnly: true 129 | - mountPath: /etc/cni/net.d 130 | name: etc-cni 131 | readOnly: true 132 | - mountPath: /var/lib/cni 133 | name: var-cni 134 | readOnly: true 135 | - mountPath: /var/log/ 136 | name: var-log 137 | readOnly: true 138 | - mountPath: /run/log/ 139 | name: run-log 140 | readOnly: true 141 | - mountPath: /etc/kubernetes/kubelet 142 | name: etc-kubelet 143 | readOnly: true 144 | - mountPath: /var/lib/kubelet 145 | name: var-kubelet 146 | readOnly: true 147 | {{- if .isCustomBenchmark }} 148 | - mountPath: /etc/kbs/custombenchmark/cfg 149 | name: custom-benchmark-volume 150 | {{- end }} 151 | -------------------------------------------------------------------------------- /pkg/securityscan/core/templates/service.template: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{.name}} 5 | namespace: {{.namespace}} 6 | labels: 7 | app.kubernetes.io/name: {{.appName}} 8 | app.kubernetes.io/instance: {{.runName}} 9 | spec: 10 | type: ClusterIP 11 | ports: 12 | - port: 443 13 | targetPort: 443 14 | protocol: TCP 15 | selector: 16 | app.kubernetes.io/name: {{.appName}} 17 | app.kubernetes.io/instance: {{.runName}} 18 | -------------------------------------------------------------------------------- /pkg/securityscan/job/job.go: -------------------------------------------------------------------------------- 1 | package job 2 | 3 | import ( 4 | "os" 5 | "strconv" 6 | "strings" 7 | 8 | "github.com/sirupsen/logrus" 9 | batchv1 "k8s.io/api/batch/v1" 10 | corev1 "k8s.io/api/core/v1" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "k8s.io/apimachinery/pkg/labels" 13 | 14 | wcorev1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1" 15 | "github.com/rancher/wrangler/v3/pkg/name" 16 | 17 | cisoperatorapi "github.com/rancher/cis-operator/pkg/apis/cis.cattle.io" 18 | cisoperatorapiv1 "github.com/rancher/cis-operator/pkg/apis/cis.cattle.io/v1" 19 | "github.com/rancher/cis-operator/pkg/condition" 20 | ) 21 | 22 | const ( 23 | defaultTerminationGracePeriodSeconds = int64(0) 24 | defaultBackoffLimit = int32(0) 25 | defaultTTLSecondsAfterFinished = int32(0) 26 | ) 27 | 28 | var ( 29 | ConditionComplete = condition.Cond(batchv1.JobComplete) 30 | ConditionFailed = condition.Cond(batchv1.JobFailed) 31 | 32 | backoffLimit = readFromEnv("CIS_JOB_BACKOFF_LIMIT", defaultBackoffLimit) 33 | 34 | TerminationGracePeriodSeconds = func(defaultValue int64) int64 { 35 | return defaultValue 36 | }(defaultTerminationGracePeriodSeconds) 37 | 38 | ttlSecondsAfterFinished = readFromEnv("CIS_JOB_TTL_SECONDS_AFTER_FINISH", defaultTTLSecondsAfterFinished) 39 | ) 40 | 41 | func readFromEnv(key string, defaultValue int32) int32 { 42 | if str, ok := os.LookupEnv(key); ok { 43 | i, err := strconv.ParseInt(str, 10, 32) 44 | if err != nil { 45 | logrus.Errorf("failed to parse $%s: %v", key, err) 46 | return defaultValue 47 | } 48 | return int32(i) 49 | } 50 | return defaultValue 51 | } 52 | 53 | func New(clusterscan *cisoperatorapiv1.ClusterScan, clusterscanprofile *cisoperatorapiv1.ClusterScanProfile, clusterscanbenchmark *cisoperatorapiv1.ClusterScanBenchmark, 54 | controllerName string, imageConfig *cisoperatorapiv1.ScanImageConfig, configmapsClient wcorev1.ConfigMapController, tolerations []corev1.Toleration) *batchv1.Job { 55 | privileged := true 56 | job := &batchv1.Job{ 57 | ObjectMeta: metav1.ObjectMeta{ 58 | Name: name.SafeConcatName("security-scan-runner", clusterscan.Name), 59 | Namespace: cisoperatorapiv1.ClusterScanNS, 60 | Annotations: labels.Set{}, 61 | Labels: labels.Set{ 62 | cisoperatorapi.LabelController: controllerName, 63 | cisoperatorapi.LabelProfile: clusterscan.Spec.ScanProfileName, 64 | cisoperatorapi.LabelClusterScan: clusterscan.Name, 65 | }, 66 | OwnerReferences: []metav1.OwnerReference{{ 67 | APIVersion: "cis.cattle.io/v1", 68 | Kind: "ClusterScan", 69 | Name: clusterscan.Name, 70 | UID: clusterscan.GetUID(), 71 | }}, 72 | }, 73 | Spec: batchv1.JobSpec{ 74 | BackoffLimit: &backoffLimit, 75 | TTLSecondsAfterFinished: &ttlSecondsAfterFinished, 76 | Template: corev1.PodTemplateSpec{ 77 | ObjectMeta: metav1.ObjectMeta{ 78 | Labels: labels.Set{ 79 | "app.kubernetes.io/name": "rancher-cis-benchmark", 80 | "app.kubernetes.io/instance": name.SafeConcatName("security-scan-runner", clusterscan.Name), 81 | "run": "sonobuoy-master", 82 | cisoperatorapi.LabelController: controllerName, 83 | cisoperatorapi.LabelProfile: clusterscan.Spec.ScanProfileName, 84 | cisoperatorapi.LabelClusterScan: clusterscan.Name, 85 | }, 86 | }, 87 | Spec: corev1.PodSpec{ 88 | HostPID: true, 89 | HostIPC: true, 90 | ServiceAccountName: cisoperatorapiv1.ClusterScanSA, 91 | TerminationGracePeriodSeconds: &TerminationGracePeriodSeconds, 92 | Tolerations: tolerations, 93 | NodeSelector: labels.Set{ 94 | "kubernetes.io/os": "linux", 95 | }, 96 | RestartPolicy: corev1.RestartPolicyNever, 97 | Volumes: []corev1.Volume{{ 98 | Name: `s-config-volume`, 99 | VolumeSource: corev1.VolumeSource{ 100 | ConfigMap: &corev1.ConfigMapVolumeSource{ 101 | LocalObjectReference: corev1.LocalObjectReference{ 102 | Name: name.SafeConcatName(cisoperatorapiv1.ClusterScanConfigMap, clusterscan.Name), 103 | }, 104 | }, 105 | }, 106 | }, { 107 | Name: `s-plugins-volume`, 108 | VolumeSource: corev1.VolumeSource{ 109 | ConfigMap: &corev1.ConfigMapVolumeSource{ 110 | LocalObjectReference: corev1.LocalObjectReference{ 111 | Name: name.SafeConcatName(cisoperatorapiv1.ClusterScanPluginsConfigMap, clusterscan.Name), 112 | }, 113 | }, 114 | }, 115 | }, { 116 | Name: `output-volume`, 117 | VolumeSource: corev1.VolumeSource{ 118 | EmptyDir: &corev1.EmptyDirVolumeSource{}, 119 | }, 120 | }, { 121 | Name: `rke2-root`, 122 | VolumeSource: corev1.VolumeSource{ 123 | HostPath: &corev1.HostPathVolumeSource{ 124 | Path: `/var/lib/rancher`, 125 | }, 126 | }, 127 | }, { 128 | Name: `rke2-root-config`, 129 | VolumeSource: corev1.VolumeSource{ 130 | HostPath: &corev1.HostPathVolumeSource{ 131 | Path: `/etc/rancher`, 132 | }, 133 | }, 134 | }, { 135 | Name: `rke2-cni`, 136 | VolumeSource: corev1.VolumeSource{ 137 | HostPath: &corev1.HostPathVolumeSource{ 138 | Path: `/etc/cni/net.d`, 139 | }, 140 | }, 141 | }, { 142 | Name: `etc-passwd`, 143 | VolumeSource: corev1.VolumeSource{ 144 | HostPath: &corev1.HostPathVolumeSource{ 145 | Path: `/etc/passwd`, 146 | }, 147 | }, 148 | }, { 149 | Name: `etc-group`, 150 | VolumeSource: corev1.VolumeSource{ 151 | HostPath: &corev1.HostPathVolumeSource{ 152 | Path: `/etc/group`, 153 | }, 154 | }, 155 | }, { 156 | Name: `var-log`, 157 | VolumeSource: corev1.VolumeSource{ 158 | HostPath: &corev1.HostPathVolumeSource{ 159 | Path: `/var/log`, 160 | }, 161 | }, 162 | }, { 163 | Name: `run-log`, 164 | VolumeSource: corev1.VolumeSource{ 165 | HostPath: &corev1.HostPathVolumeSource{ 166 | Path: `/run/log`, 167 | }, 168 | }, 169 | }, 170 | }, 171 | Containers: []corev1.Container{{ 172 | Name: `rancher-cis-benchmark`, 173 | Image: imageConfig.SecurityScanImage + ":" + imageConfig.SecurityScanImageTag, 174 | ImagePullPolicy: corev1.PullIfNotPresent, 175 | SecurityContext: &corev1.SecurityContext{ 176 | Privileged: &privileged, 177 | }, 178 | Env: []corev1.EnvVar{{ 179 | Name: `OVERRIDE_BENCHMARK_VERSION`, 180 | Value: clusterscanprofile.Spec.BenchmarkVersion, 181 | }, { 182 | Name: `SONOBUOY_NS`, 183 | Value: cisoperatorapiv1.ClusterScanNS, 184 | }, { 185 | Name: `SONOBUOY_POD_NAME`, 186 | ValueFrom: &corev1.EnvVarSource{ 187 | FieldRef: &corev1.ObjectFieldSelector{ 188 | FieldPath: `metadata.name`, 189 | }, 190 | }, 191 | }, { 192 | Name: `SONOBUOY_ADVERTISE_IP`, 193 | Value: `cisscan-rancher-cis-benchmark`, 194 | }, { 195 | Name: `OUTPUT_CONFIGMAPNAME`, 196 | Value: strings.Join([]string{`cisscan-output-for`, clusterscan.Name}, "-"), 197 | }}, 198 | Ports: []corev1.ContainerPort{{ 199 | ContainerPort: 8080, 200 | Protocol: corev1.ProtocolTCP, 201 | }}, 202 | VolumeMounts: []corev1.VolumeMount{{ 203 | Name: `s-config-volume`, 204 | MountPath: `/etc/sonobuoy`, 205 | }, { 206 | Name: `s-plugins-volume`, 207 | MountPath: `/plugins.d`, 208 | }, { 209 | Name: `output-volume`, 210 | MountPath: `/tmp/sonobuoy`, 211 | }, { 212 | Name: `rke2-root`, 213 | MountPath: `/var/lib/rancher`, 214 | }, { 215 | Name: `rke2-root-config`, 216 | MountPath: `/etc/rancher`, 217 | }, { 218 | Name: `rke2-cni`, 219 | MountPath: `/etc/cni/net.d`, 220 | }, { 221 | Name: `etc-passwd`, 222 | MountPath: `/etc/passwd`, 223 | }, { 224 | Name: `etc-group`, 225 | MountPath: `/etc/group`, 226 | }, { 227 | Name: `var-log`, 228 | MountPath: `/var/log/`, 229 | }, { 230 | Name: `run-log`, 231 | MountPath: `/run/log/`, 232 | }}, 233 | }}, 234 | }, 235 | }, 236 | }, 237 | } 238 | //add userskip configmap if present 239 | if clusterscanprofile.Spec.SkipTests != nil && len(clusterscanprofile.Spec.SkipTests) > 0 { 240 | skipVol := corev1.Volume{ 241 | Name: `user-skip-info-volume`, 242 | VolumeSource: corev1.VolumeSource{ 243 | ConfigMap: &corev1.ConfigMapVolumeSource{ 244 | LocalObjectReference: corev1.LocalObjectReference{ 245 | Name: name.SafeConcatName(cisoperatorapiv1.ClusterScanUserSkipConfigMap, clusterscan.Name), 246 | }, 247 | }, 248 | }, 249 | } 250 | job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, skipVol) 251 | 252 | //volume mount 253 | skipVolMnt := corev1.VolumeMount{ 254 | Name: `user-skip-info-volume`, 255 | MountPath: `/etc/kbs/userskip`, 256 | } 257 | 258 | job.Spec.Template.Spec.Containers[0].VolumeMounts = append(job.Spec.Template.Spec.Containers[0].VolumeMounts, skipVolMnt) 259 | } 260 | 261 | //add custom benchmark config and volume 262 | if clusterscanbenchmark.Spec.CustomBenchmarkConfigMapName != "" { 263 | //this env variable is read by kb-summarizer tool in security-scan image 264 | configDirEnv := corev1.EnvVar{ 265 | Name: `CONFIG_DIR`, 266 | Value: cisoperatorapiv1.CustomBenchmarkBaseDir, 267 | } 268 | job.Spec.Template.Spec.Containers[0].Env = append(job.Spec.Template.Spec.Containers[0].Env, configDirEnv) 269 | 270 | //add the volume 271 | customcm, err := loadCustomBenchmarkConfigMap(clusterscanbenchmark, clusterscan, configmapsClient) 272 | if err != nil { 273 | logrus.Errorf("Error loading custom CustomBenchmarkConfigMap %v %v", clusterscanbenchmark.Spec.CustomBenchmarkConfigMapNamespace, clusterscanbenchmark.Spec.CustomBenchmarkConfigMapName) 274 | return job 275 | } 276 | customVol := corev1.Volume{ 277 | Name: `custom-benchmark-volume`, 278 | VolumeSource: corev1.VolumeSource{ 279 | ConfigMap: &corev1.ConfigMapVolumeSource{ 280 | LocalObjectReference: corev1.LocalObjectReference{ 281 | Name: customcm.Name, 282 | }, 283 | }, 284 | }, 285 | } 286 | for key := range customcm.Data { 287 | if key == "config.yaml" { 288 | customVol.VolumeSource.ConfigMap.Items = append(customVol.VolumeSource.ConfigMap.Items, corev1.KeyToPath{Key: key, Path: key}) 289 | } else { 290 | customVol.VolumeSource.ConfigMap.Items = append(customVol.VolumeSource.ConfigMap.Items, corev1.KeyToPath{Key: key, Path: clusterscanbenchmark.Name + "/" + key}) 291 | } 292 | } 293 | job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, customVol) 294 | //volume mount 295 | customVolMnt := corev1.VolumeMount{ 296 | Name: `custom-benchmark-volume`, 297 | MountPath: cisoperatorapiv1.CustomBenchmarkBaseDir, 298 | } 299 | job.Spec.Template.Spec.Containers[0].VolumeMounts = append(job.Spec.Template.Spec.Containers[0].VolumeMounts, customVolMnt) 300 | } 301 | 302 | return job 303 | } 304 | 305 | func loadCustomBenchmarkConfigMap(benchmark *cisoperatorapiv1.ClusterScanBenchmark, clusterscan *cisoperatorapiv1.ClusterScan, configmapsClient wcorev1.ConfigMapController) (*corev1.ConfigMap, error) { 306 | if benchmark.Spec.CustomBenchmarkConfigMapName == "" { 307 | return nil, nil 308 | } 309 | if benchmark.Spec.CustomBenchmarkConfigMapNamespace == cisoperatorapiv1.ClusterScanNS { 310 | return configmapsClient.Get(cisoperatorapiv1.ClusterScanNS, benchmark.Spec.CustomBenchmarkConfigMapName, metav1.GetOptions{}) 311 | } 312 | //get copy of the configmap in ClusterScanNS created while creating plugin configmap 313 | cmName := name.SafeConcatName(cisoperatorapiv1.CustomBenchmarkConfigMap, clusterscan.Name) 314 | configmapCopy, err := configmapsClient.Get(cisoperatorapiv1.ClusterScanNS, cmName, metav1.GetOptions{}) 315 | if err != nil { 316 | return nil, err 317 | } 318 | return configmapCopy, nil 319 | } 320 | -------------------------------------------------------------------------------- /pkg/securityscan/jobHandler.go: -------------------------------------------------------------------------------- 1 | package securityscan 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strings" 7 | 8 | "github.com/sirupsen/logrus" 9 | batchv1 "k8s.io/api/batch/v1" 10 | "k8s.io/apimachinery/pkg/api/errors" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "k8s.io/apimachinery/pkg/labels" 13 | 14 | "github.com/rancher/security-scan/pkg/kb-summarizer/report" 15 | reportLibrary "github.com/rancher/security-scan/pkg/kb-summarizer/report" 16 | batchctlv1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/batch/v1" 17 | 18 | "time" 19 | 20 | cisoperatorapi "github.com/rancher/cis-operator/pkg/apis/cis.cattle.io" 21 | v1 "github.com/rancher/cis-operator/pkg/apis/cis.cattle.io/v1" 22 | "github.com/rancher/wrangler/v3/pkg/name" 23 | ) 24 | 25 | var sonobuoyWorkerLabel = map[string]string{"sonobuoy-plugin": "rancher-kube-bench"} 26 | 27 | // job events (successful completions) should remove the job after validatinf Done annotation and Output CM 28 | func (c *Controller) handleJobs(ctx context.Context) error { 29 | scans := c.cisFactory.Cis().V1().ClusterScan() 30 | reports := c.cisFactory.Cis().V1().ClusterScanReport() 31 | jobs := c.batchFactory.Batch().V1().Job() 32 | 33 | jobs.OnChange(ctx, c.Name, func(_ string, obj *batchv1.Job) (*batchv1.Job, error) { 34 | if obj == nil || obj.DeletionTimestamp != nil { 35 | return obj, nil 36 | } 37 | jobSelector := labels.SelectorFromSet(labels.Set{ 38 | cisoperatorapi.LabelController: c.Name, 39 | }) 40 | // avoid commandeering jobs from other controllers 41 | if obj.Labels == nil || !jobSelector.Matches(labels.Set(obj.Labels)) { 42 | return obj, nil 43 | } 44 | // identify the scan object for this job 45 | scanName, ok := obj.Labels[cisoperatorapi.LabelClusterScan] 46 | if !ok { 47 | // malformed, just delete it and move on 48 | logrus.Errorf("malformed scan, deleting the job %v", obj.Name) 49 | return obj, c.deleteJob(jobs, obj, metav1.DeletePropagationBackground) 50 | } 51 | // get the scan being run 52 | scan, err := scans.Get(scanName, metav1.GetOptions{}) 53 | switch { 54 | case errors.IsNotFound(err): 55 | // scan is gone, delete 56 | logrus.Errorf("scan gone, deleting the job %v", obj.Name) 57 | return obj, c.deleteJob(jobs, obj, metav1.DeletePropagationBackground) 58 | case err != nil: 59 | return obj, err 60 | } 61 | 62 | // if the scan has completed then delete the job 63 | if v1.ClusterScanConditionComplete.IsTrue(scan) { 64 | if !v1.ClusterScanConditionFailed.IsTrue(scan) { 65 | logrus.Infof("Marking ClusterScanConditionAlerted for scan: %v", scanName) 66 | v1.ClusterScanConditionAlerted.Unknown(scan) 67 | } 68 | scan.Status.ObservedGeneration = scan.Generation 69 | c.setClusterScanStatusDisplay(scan) 70 | 71 | if scan.Spec.ScheduledScanConfig != nil && scan.Spec.ScheduledScanConfig.CronSchedule != "" { 72 | err := c.rescheduleScan(scan) 73 | if err != nil { 74 | return obj, fmt.Errorf("error rescheduling scan: %w", err) 75 | } 76 | err = c.purgeOldClusterScanReports(scan) 77 | if err != nil { 78 | return obj, fmt.Errorf("error purging old ClusterScanReports: %w", err) 79 | } 80 | } 81 | err := c.deleteJob(jobs, obj, metav1.DeletePropagationBackground) 82 | if err != nil { 83 | return obj, fmt.Errorf("error deleting job: %w", err) 84 | } 85 | err = c.ensureCleanup(scan) 86 | if err != nil { 87 | return obj, err 88 | } 89 | //update scan 90 | _, err = scans.UpdateStatus(scan) 91 | if err != nil { 92 | return nil, fmt.Errorf("error updating condition of cluster scan object: %v", scanName) 93 | } 94 | c.currentScanName = "" 95 | return obj, nil 96 | } 97 | 98 | if v1.ClusterScanConditionRunCompleted.IsTrue(scan) { 99 | scancopy := scan.DeepCopy() 100 | 101 | if !v1.ClusterScanConditionFailed.IsTrue(scan) { 102 | summary, report, err := c.getScanResults(ctx, scan) 103 | if err != nil { 104 | return nil, fmt.Errorf("error %v reading results of cluster scan object: %v", err, scanName) 105 | } 106 | scancopy.Status.Summary = summary 107 | _, err = reports.Create(report) 108 | if err != nil { 109 | return nil, fmt.Errorf("error %v saving clusterscanreport object", err) 110 | } 111 | } 112 | v1.ClusterScanConditionComplete.True(scancopy) 113 | /* update scan */ 114 | _, err = scans.UpdateStatus(scancopy) 115 | if err != nil { 116 | return nil, fmt.Errorf("error updating condition of scan object: %v", scanName) 117 | } 118 | logrus.Infof("Marking ClusterScanConditionComplete for scan: %v", scanName) 119 | jobs.Enqueue(obj.Namespace, obj.Name) 120 | } 121 | return obj, nil 122 | }) 123 | return nil 124 | } 125 | 126 | func (c *Controller) deleteJob(jobController batchctlv1.JobController, job *batchv1.Job, deletionPropagation metav1.DeletionPropagation) error { 127 | return jobController.Delete(job.Namespace, job.Name, &metav1.DeleteOptions{PropagationPolicy: &deletionPropagation}) 128 | } 129 | 130 | func (c *Controller) getScanResults(ctx context.Context, scan *v1.ClusterScan) (*v1.ClusterScanSummary, *v1.ClusterScanReport, error) { 131 | configmaps := c.coreFactory.Core().V1().ConfigMap() 132 | //get the output configmap and create a report 133 | outputConfigName := strings.Join([]string{`cisscan-output-for`, scan.Name}, "-") 134 | cm, err := configmaps.Cache().Get(v1.ClusterScanNS, outputConfigName) 135 | if err != nil { 136 | return nil, nil, fmt.Errorf("cisScanHandler: Updated: error fetching configmap %v: %v", outputConfigName, err) 137 | } 138 | outputBytes := []byte(cm.Data[v1.DefaultScanOutputFileName]) 139 | cisScanSummary, err := c.getScanSummary(outputBytes) 140 | if err != nil { 141 | return nil, nil, fmt.Errorf("cisScanHandler: Updated: error getting report from configmap %v: %v", outputConfigName, err) 142 | } 143 | if cisScanSummary == nil { 144 | return nil, nil, fmt.Errorf("cisScanHandler: Updated: error: got empty report from configmap %v", outputConfigName) 145 | } 146 | 147 | scanReport, err := c.createClusterScanReport(ctx, outputBytes, scan) 148 | if err != nil { 149 | return nil, nil, fmt.Errorf("cisScanHandler: Updated: error getting report from configmap %v: %v", outputConfigName, err) 150 | } 151 | 152 | return cisScanSummary, scanReport, nil 153 | } 154 | 155 | func (c *Controller) getScanSummary(outputBytes []byte) (*v1.ClusterScanSummary, error) { 156 | r, err := report.Get(outputBytes) 157 | if err != nil { 158 | return nil, err 159 | } 160 | if r == nil { 161 | return nil, nil 162 | } 163 | cisScanSummary := &v1.ClusterScanSummary{ 164 | Total: r.Total, 165 | Pass: r.Pass, 166 | Fail: r.Fail, 167 | Skip: r.Skip, 168 | Warn: r.Warn, 169 | NotApplicable: r.NotApplicable, 170 | } 171 | return cisScanSummary, nil 172 | } 173 | 174 | func (c *Controller) createClusterScanReport(ctx context.Context, outputBytes []byte, scan *v1.ClusterScan) (*v1.ClusterScanReport, error) { 175 | scanReport := &v1.ClusterScanReport{ 176 | ObjectMeta: metav1.ObjectMeta{ 177 | GenerateName: name.SafeConcatName("scan-report", scan.Name, scan.Spec.ScanProfileName) + "-", 178 | }, 179 | } 180 | profile, err := c.getClusterScanProfile(ctx, scan) 181 | if err != nil { 182 | return nil, fmt.Errorf("Error %v loading v1.ClusterScanProfile for name %w", scan.Spec.ScanProfileName, err) 183 | } 184 | scanReport.Spec.BenchmarkVersion = profile.Spec.BenchmarkVersion 185 | scanReport.Spec.LastRunTimestamp = time.Now().String() 186 | 187 | data, err := reportLibrary.GetJSONBytes(outputBytes) 188 | if err != nil { 189 | return nil, fmt.Errorf("Error %w loading scan report json bytes", err) 190 | } 191 | scanReport.Spec.ReportJSON = string(data[:]) 192 | 193 | ownerRef := metav1.OwnerReference{ 194 | APIVersion: "cis.cattle.io/v1", 195 | Kind: "ClusterScan", 196 | Name: scan.Name, 197 | UID: scan.GetUID(), 198 | } 199 | scanReport.ObjectMeta.OwnerReferences = append(scanReport.ObjectMeta.OwnerReferences, ownerRef) 200 | 201 | return scanReport, nil 202 | } 203 | 204 | func (c *Controller) ensureCleanup(scan *v1.ClusterScan) error { 205 | var err error 206 | // Delete the dameonset 207 | dsPrefix := "sonobuoy-rancher-kube-bench-daemon-set" 208 | dsList, err := c.daemonsetCache.List(v1.ClusterScanNS, labels.Set(sonobuoyWorkerLabel).AsSelector()) 209 | if err != nil { 210 | return fmt.Errorf("cis: ensureCleanup: error listing daemonsets: %w", err) 211 | } 212 | for _, ds := range dsList { 213 | if !strings.HasPrefix(ds.Name, dsPrefix) { 214 | continue 215 | } 216 | if e := c.daemonsets.Delete(v1.ClusterScanNS, ds.Name, &metav1.DeleteOptions{}); e != nil && !errors.IsNotFound(e) { 217 | return fmt.Errorf("cis: ensureCleanup: error deleting daemonset %v: %v", ds.Name, e) 218 | } 219 | } 220 | 221 | // Delete the pod 222 | podPrefix := name.SafeConcatName("security-scan-runner", scan.Name) 223 | podList, err := c.podCache.List(v1.ClusterScanNS, labels.Set(SonobuoyMasterLabel).AsSelector()) 224 | if err != nil { 225 | return fmt.Errorf("cis: ensureCleanup: error listing pods: %w", err) 226 | } 227 | for _, pod := range podList { 228 | if !strings.HasPrefix(pod.Name, podPrefix) { 229 | continue 230 | } 231 | if e := c.pods.Delete(v1.ClusterScanNS, pod.Name, &metav1.DeleteOptions{}); e != nil && !errors.IsNotFound(e) { 232 | return fmt.Errorf("cis: ensureCleanup: error deleting pod %v: %w", pod.Name, e) 233 | } 234 | } 235 | 236 | // Delete cms 237 | cms, err := c.configMapCache.List(v1.ClusterScanNS, labels.NewSelector()) 238 | if err != nil { 239 | return fmt.Errorf("cis: ensureCleanup: error listing cm: %w", err) 240 | } 241 | for _, cm := range cms { 242 | if !strings.Contains(cm.Name, scan.Name) { 243 | continue 244 | } 245 | 246 | if e := c.configmaps.Delete(v1.ClusterScanNS, cm.Name, &metav1.DeleteOptions{}); e != nil && !errors.IsNotFound(e) { 247 | return fmt.Errorf("cis: ensureCleanup: error deleting cm %v: %w", cm.Name, e) 248 | } 249 | } 250 | 251 | return err 252 | } 253 | -------------------------------------------------------------------------------- /pkg/securityscan/podHandler.go: -------------------------------------------------------------------------------- 1 | package securityscan 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/sirupsen/logrus" 8 | corev1 "k8s.io/api/core/v1" 9 | "k8s.io/apimachinery/pkg/api/errors" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | "k8s.io/apimachinery/pkg/labels" 12 | 13 | corectlv1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1" 14 | "github.com/rancher/wrangler/v3/pkg/name" 15 | 16 | cisoperatorapi "github.com/rancher/cis-operator/pkg/apis/cis.cattle.io" 17 | v1 "github.com/rancher/cis-operator/pkg/apis/cis.cattle.io/v1" 18 | ) 19 | 20 | // pod events should update the job conditions after validating Done annotation and Output CM 21 | func (c *Controller) handlePods(ctx context.Context) error { 22 | scans := c.cisFactory.Cis().V1().ClusterScan() 23 | jobs := c.batchFactory.Batch().V1().Job() 24 | pods := c.coreFactory.Core().V1().Pod() 25 | pods.OnChange(ctx, c.Name, func(_ string, obj *corev1.Pod) (*corev1.Pod, error) { 26 | if obj == nil || obj.DeletionTimestamp != nil { 27 | return obj, nil 28 | } 29 | podSelector := labels.SelectorFromSet(labels.Set{ 30 | cisoperatorapi.LabelController: c.Name, 31 | }) 32 | // only handle pods launched by securityscan 33 | if obj.Labels == nil || !podSelector.Matches(labels.Set(obj.Labels)) { 34 | return obj, nil 35 | } 36 | // Check the annotation to see if it's done processing 37 | done, ok := obj.Annotations[cisoperatorapi.SonobuoyCompletionAnnotation] 38 | if !ok { 39 | return nil, nil 40 | } 41 | 42 | scanName, ok := obj.Labels[cisoperatorapi.LabelClusterScan] 43 | if !ok { 44 | // malformed 45 | return nil, nil 46 | } 47 | // get the scan being run 48 | scan, err := scans.Get(scanName, metav1.GetOptions{}) 49 | switch { 50 | case errors.IsNotFound(err): 51 | // scan is gone, delete 52 | logrus.Infof("scan gone, just delete it and move on %v", scanName) 53 | return nil, nil 54 | case err != nil: 55 | return obj, err 56 | } 57 | 58 | //find the job for this Pod and the clusterScan as well 59 | jobName := name.SafeConcatName("security-scan-runner", scanName) 60 | job, err := jobs.Cache().Get(obj.Namespace, jobName) 61 | switch { 62 | case errors.IsNotFound(err): 63 | return nil, nil 64 | case err != nil: 65 | return obj, err 66 | } 67 | 68 | scanCopy := scan.DeepCopy() 69 | if !v1.ClusterScanConditionRunCompleted.IsTrue(scan) { 70 | v1.ClusterScanConditionRunCompleted.True(scanCopy) 71 | if done != "true" { 72 | v1.ClusterScanConditionFailed.True(scanCopy) 73 | if done != "error" { 74 | v1.ClusterScanConditionFailed.Message(scanCopy, done) 75 | } 76 | logrus.Infof("Marking ClusterScanConditionFailed for scan: %v, error %v", scanName, done) 77 | } 78 | c.setClusterScanStatusDisplay(scanCopy) 79 | //update scan 80 | _, err = scans.UpdateStatus(scanCopy) 81 | if err != nil { 82 | return nil, fmt.Errorf("error updating condition of cluster scan object: %v", scanName) 83 | } 84 | logrus.Infof("Marking ClusterScanConditionRunCompleted for scan: %v", scanName) 85 | jobs.Enqueue(job.Namespace, job.Name) 86 | } 87 | return obj, nil 88 | }) 89 | return nil 90 | } 91 | 92 | func deletePod(podController corectlv1.PodController, pod *corev1.Pod, deletionPropagation metav1.DeletionPropagation) error { 93 | logrus.Infof("delete pod called %v", pod.Status.Conditions) 94 | return podController.Delete(pod.Namespace, pod.Name, &metav1.DeleteOptions{PropagationPolicy: &deletionPropagation}) 95 | } 96 | -------------------------------------------------------------------------------- /pkg/securityscan/scan/clusterscan.go: -------------------------------------------------------------------------------- 1 | package scan 2 | 3 | import ( 4 | cisoperatorapiv1 "github.com/rancher/cis-operator/pkg/apis/cis.cattle.io/v1" 5 | "github.com/rancher/wrangler/v3/pkg/crd" 6 | "github.com/rancher/wrangler/v3/pkg/schemas/openapi" 7 | ) 8 | 9 | func ClusterScanCRD() (*crd.CRD, error) { 10 | prototype := cisoperatorapiv1.NewClusterScan("", "", cisoperatorapiv1.ClusterScan{}) 11 | schema, err := openapi.ToOpenAPIFromStruct(*prototype) 12 | if err != nil { 13 | return nil, err 14 | } 15 | return &crd.CRD{ 16 | GVK: prototype.GroupVersionKind(), 17 | PluralName: cisoperatorapiv1.ClusterScanResourceName, 18 | Status: true, 19 | Schema: schema, 20 | Categories: []string{"securityscan"}, 21 | }, nil 22 | } 23 | -------------------------------------------------------------------------------- /pkg/securityscan/scanMetricsHandler.go: -------------------------------------------------------------------------------- 1 | package securityscan 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | v1 "github.com/rancher/cis-operator/pkg/apis/cis.cattle.io/v1" 8 | "github.com/sirupsen/logrus" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | "k8s.io/client-go/util/retry" 11 | ) 12 | 13 | func (c *Controller) handleClusterScanMetrics(ctx context.Context) error { 14 | scans := c.cisFactory.Cis().V1().ClusterScan() 15 | 16 | scans.OnChange(ctx, c.Name, func(_ string, obj *v1.ClusterScan) (*v1.ClusterScan, error) { 17 | if obj == nil || obj.DeletionTimestamp != nil { 18 | return obj, nil 19 | } 20 | if !(v1.ClusterScanConditionAlerted.IsUnknown(obj) && v1.ClusterScanConditionComplete.IsTrue(obj)) { 21 | return obj, nil 22 | } 23 | 24 | logrus.Debugf("Updating metrics for scan %v", obj.Name) 25 | 26 | scanName := "manual" 27 | if obj.Spec.ScheduledScanConfig != nil && obj.Spec.ScheduledScanConfig.CronSchedule != "" { 28 | scanName = obj.Name 29 | } 30 | scanProfileName := obj.Status.LastRunScanProfileName 31 | numTestsFailed := float64(obj.Status.Summary.Fail) 32 | numTestsTotal := float64(obj.Status.Summary.Total) 33 | numTestsNA := float64(obj.Status.Summary.NotApplicable) 34 | numTestsSkip := float64(obj.Status.Summary.Skip) 35 | numTestsPass := float64(obj.Status.Summary.Pass) 36 | numTestsWarn := float64(obj.Status.Summary.Warn) 37 | clusterName := c.ImageConfig.ClusterName 38 | 39 | c.numTestsFailed.WithLabelValues(scanName, scanProfileName, clusterName).Set(numTestsFailed) 40 | c.numScansComplete.WithLabelValues(scanName, scanProfileName, clusterName).Inc() 41 | c.numTestsTotal.WithLabelValues(scanName, scanProfileName, clusterName).Set(numTestsTotal) 42 | c.numTestsPassed.WithLabelValues(scanName, scanProfileName, clusterName).Set(numTestsPass) 43 | c.numTestsSkipped.WithLabelValues(scanName, scanProfileName, clusterName).Set(numTestsSkip) 44 | c.numTestsNA.WithLabelValues(scanName, scanProfileName, clusterName).Set(numTestsNA) 45 | c.numTestsWarn.WithLabelValues(scanName, scanProfileName, clusterName).Set(numTestsWarn) 46 | 47 | logrus.Debugf("Done updating metrics for scan %v", obj.Name) 48 | 49 | if obj.Spec.ScheduledScanConfig != nil { 50 | updateErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { 51 | var err error 52 | scanObj, err := scans.Get(obj.Name, metav1.GetOptions{}) 53 | if err != nil { 54 | return err 55 | } 56 | if scanObj.Spec.ScheduledScanConfig.ScanAlertRule == nil || 57 | (scanObj.Spec.ScheduledScanConfig.ScanAlertRule != nil && 58 | !scanObj.Spec.ScheduledScanConfig.ScanAlertRule.AlertOnComplete && 59 | !scanObj.Spec.ScheduledScanConfig.ScanAlertRule.AlertOnFailure) { 60 | logrus.Debugf("No AlertRules configured for scan %v", scanObj.Name) 61 | v1.ClusterScanConditionAlerted.False(scanObj) 62 | v1.ClusterScanConditionAlerted.Message(scanObj, "No AlertRule configured for this scan") 63 | } else if scanObj.Status.ScanAlertingRuleName == "" { 64 | logrus.Debugf("Error creating PrometheusRule for scan %v", scanObj.Name) 65 | v1.ClusterScanConditionAlerted.False(scanObj) 66 | v1.ClusterScanConditionAlerted.Message(scanObj, "Alerts will not work due to the error creating PrometheusRule, Please check if Monitoring app is installed") 67 | } else { 68 | v1.ClusterScanConditionAlerted.True(scanObj) 69 | } 70 | _, err = scans.UpdateStatus(scanObj) 71 | return err 72 | }) 73 | 74 | if updateErr != nil { 75 | return obj, fmt.Errorf("Retrying, got error %v in updating condition of scan object: %v ", updateErr, obj.Name) 76 | } 77 | } 78 | 79 | return obj, nil 80 | }) 81 | return nil 82 | } 83 | -------------------------------------------------------------------------------- /pkg/securityscan/scheduledScanHandler.go: -------------------------------------------------------------------------------- 1 | package securityscan 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "sort" 7 | "strings" 8 | "time" 9 | 10 | v1 "github.com/rancher/cis-operator/pkg/apis/cis.cattle.io/v1" 11 | "github.com/rancher/wrangler/v3/pkg/genericcondition" 12 | "github.com/rancher/wrangler/v3/pkg/name" 13 | "github.com/robfig/cron" 14 | "github.com/sirupsen/logrus" 15 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 16 | "k8s.io/client-go/util/retry" 17 | ) 18 | 19 | func (c *Controller) handleScheduledClusterScans(ctx context.Context) error { 20 | scheduledScans := c.cisFactory.Cis().V1().ClusterScan() 21 | 22 | scheduledScans.OnChange(ctx, c.Name, func(_ string, obj *v1.ClusterScan) (*v1.ClusterScan, error) { 23 | if obj == nil || obj.DeletionTimestamp != nil { 24 | return obj, nil 25 | } 26 | 27 | if obj.Spec.ScheduledScanConfig != nil && obj.Spec.ScheduledScanConfig.CronSchedule == "" { 28 | return obj, nil 29 | } 30 | 31 | //if nextScanAt is set then make sure we process only if the time is right 32 | if v1.ClusterScanConditionComplete.IsTrue(obj) && obj.Status.LastRunTimestamp != "" && obj.Status.NextScanAt != "" { 33 | currTime := time.Now().Format(time.RFC3339) 34 | logrus.Debugf("scheduledScanHandler: sync called for scheduled ClusterScan CR %v ", obj.Name) 35 | logrus.Debugf("scheduledScanHandler: next run is scheduled for: %v, current time: %v", obj.Status.NextScanAt, currTime) 36 | 37 | nextScanTime, err := time.Parse(time.RFC3339, obj.Status.NextScanAt) 38 | if err != nil { 39 | return obj, fmt.Errorf("scheduledScanHandler: retrying, got error %w in parsing NextScanAt %v time for scheduledScan: %s", err, obj.Status.NextScanAt, obj.Name) 40 | } 41 | if nextScanTime.After(time.Now()) { 42 | logrus.Debugf("scheduledScanHandler: run time is later, skipping this run scheduledScan CR %v ", obj.Name) 43 | after := nextScanTime.Sub(time.Now()) 44 | scheduledScans.EnqueueAfter(obj.Name, after) 45 | if obj.Generation != obj.Status.ObservedGeneration { 46 | obj.Status.ObservedGeneration = obj.Generation 47 | return scheduledScans.UpdateStatus(obj) 48 | } 49 | return obj, nil 50 | } 51 | // can process this scan again 52 | logrus.Infof("scheduledScanHandler: now processing scheduledScan CR %v ", obj.Name) 53 | updateErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { 54 | var err error 55 | scheduledScanObj, err := scheduledScans.Get(obj.Name, metav1.GetOptions{}) 56 | if err != nil { 57 | return err 58 | } 59 | // reset conditions 60 | scheduledScanObj.Status.Conditions = []genericcondition.GenericCondition{} 61 | scheduledScanObj.Status.LastRunTimestamp = "" 62 | scheduledScanObj.Status.NextScanAt = "" 63 | 64 | _, err = scheduledScans.UpdateStatus(scheduledScanObj) 65 | return err 66 | }) 67 | 68 | if updateErr != nil { 69 | return obj, fmt.Errorf("Retrying, got error %w in updating status for scheduledScan: %s", updateErr, obj.Name) 70 | } 71 | } 72 | 73 | return obj, nil 74 | }) 75 | 76 | return nil 77 | } 78 | 79 | func (c *Controller) validateScheduledScanSpec(scan *v1.ClusterScan) error { 80 | if scan.Spec.ScheduledScanConfig != nil && scan.Spec.ScheduledScanConfig.CronSchedule != "" { 81 | _, err := cron.ParseStandard(scan.Spec.ScheduledScanConfig.CronSchedule) 82 | if err != nil { 83 | return fmt.Errorf("error parsing invalid cron string for schedule: %w", err) 84 | } 85 | } 86 | return nil 87 | } 88 | 89 | func (c *Controller) getCronSchedule(scan *v1.ClusterScan) (cron.Schedule, error) { 90 | schedule := v1.DefaultCronSchedule 91 | if scan.Spec.ScheduledScanConfig != nil && scan.Spec.ScheduledScanConfig.CronSchedule != "" { 92 | schedule = scan.Spec.ScheduledScanConfig.CronSchedule 93 | } 94 | cronSchedule, err := cron.ParseStandard(schedule) 95 | if err != nil { 96 | return nil, fmt.Errorf("Error parsing invalid cron string for schedule: %w", err) 97 | } 98 | return cronSchedule, nil 99 | } 100 | 101 | func (c *Controller) getRetentionCount(scan *v1.ClusterScan) int { 102 | retentionCount := v1.DefaultRetention 103 | if scan.Spec.ScheduledScanConfig != nil && scan.Spec.ScheduledScanConfig.RetentionCount != 0 { 104 | retentionCount = scan.Spec.ScheduledScanConfig.RetentionCount 105 | } 106 | return retentionCount 107 | } 108 | 109 | func (c *Controller) rescheduleScan(scan *v1.ClusterScan) error { 110 | scans := c.cisFactory.Cis().V1().ClusterScan() 111 | cronSchedule, err := c.getCronSchedule(scan) 112 | if err != nil { 113 | return fmt.Errorf("Cannot reschedule, Error parsing invalid cron string for schedule: %w", err) 114 | } 115 | now := time.Now() 116 | nextScanAt := cronSchedule.Next(now) 117 | scan.Status.NextScanAt = nextScanAt.Format(time.RFC3339) 118 | after := nextScanAt.Sub(now) 119 | scans.EnqueueAfter(scan.Name, after) 120 | return nil 121 | } 122 | 123 | func (c *Controller) purgeOldClusterScanReports(obj *v1.ClusterScan) error { 124 | reports := c.cisFactory.Cis().V1().ClusterScanReport() 125 | retention := c.getRetentionCount(obj) 126 | allClusterScanReportsList, err := reports.List(metav1.ListOptions{}) 127 | if err != nil { 128 | return fmt.Errorf("error listing cluster scans for scheduledScan %v: %w", obj.Name, err) 129 | } 130 | allClusterScanReports := allClusterScanReportsList.Items 131 | var clusterScanReports []v1.ClusterScanReport 132 | for _, cs := range allClusterScanReports { 133 | if !strings.HasPrefix(cs.Name, name.SafeConcatName("scan-report", obj.Name)+"-") { 134 | continue 135 | } 136 | clusterScanReports = append(clusterScanReports, cs) 137 | } 138 | if len(clusterScanReports) <= retention { 139 | return nil 140 | } 141 | sort.Slice(clusterScanReports, func(i, j int) bool { 142 | return !clusterScanReports[i].CreationTimestamp.Before(&clusterScanReports[j].CreationTimestamp) 143 | }) 144 | 145 | for _, cs := range clusterScanReports[retention:] { 146 | logrus.Infof("scheduledScanHandler: purgeOldScans: deleting cs: %v %v", cs.Name, cs.CreationTimestamp.String()) 147 | if err := c.deleteClusterScanReportWithRetry(cs.Name); err != nil { 148 | logrus.Errorf("scheduledScanHandler: purgeOldScans: error deleting cluster scan: %v: %v", 149 | cs.Name, err) 150 | } 151 | } 152 | return nil 153 | } 154 | 155 | func (c *Controller) deleteClusterScanReportWithRetry(name string) error { 156 | reports := c.cisFactory.Cis().V1().ClusterScanReport() 157 | delErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { 158 | var err error 159 | err = reports.Delete(name, &metav1.DeleteOptions{}) 160 | return err 161 | }) 162 | return delErr 163 | } 164 | -------------------------------------------------------------------------------- /tests/Dockerfile.k3s: -------------------------------------------------------------------------------- 1 | # This image is solely used for testing purposes 2 | # and aims to wrap around k3s, making any needed 3 | # changes for the cis-operator tests to work. 4 | FROM rancher/k3s:v1.33.1-k3s1 5 | 6 | # Upstream does not have files /etc/passwd nor /etc/group 7 | # which causes cis-operator to fail when scheduling a 8 | # running container that maps those files from the "host". 9 | RUN echo "root:!:0:0::/:/bin/false" > /etc/passwd && \ 10 | touch /etc/group 11 | 12 | # A fake apiserver to trigger the if condition within 13 | # security-scan that runs kube-bench for the api-server. 14 | COPY kube-apiserver /usr/local/bin/kube-apiserver 15 | -------------------------------------------------------------------------------- /tests/k3d-expected.json: -------------------------------------------------------------------------------- 1 | { 2 | "fail": 48, 3 | "notApplicable": 24, 4 | "pass": 16, 5 | "skip": 0, 6 | "total": 130, 7 | "warn": 42 8 | } 9 | -------------------------------------------------------------------------------- /tests/k3s-bench-test.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cis.cattle.io/v1 2 | kind: ClusterScan 3 | metadata: 4 | name: k3s-e2e-scan 5 | namespace: cis-operator-system 6 | spec: 7 | scanProfileName: k3s-cis-1.9-profile 8 | scoreWarning: pass 9 | -------------------------------------------------------------------------------- /tests/kube-apiserver: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | sleep 90 4 | --------------------------------------------------------------------------------