├── objectcache ├── objectcache.go ├── objectcache_interface.go ├── k8scache_interface.go └── objectcache_mock.go ├── examples ├── registryScanPayload.sh ├── scanRegistriesOldPayload.json ├── deleteRegistryScanCronJob.json ├── updateRegistryScanCronJobPayloadOld.json ├── setRegistryScanCronJobPayloadOld.json ├── testRegistryConnectivity.json ├── scanRegistriesPayload.json ├── setRegistryScanCronJobPayload.json └── updateRegistryScanCronJobPayload.json ├── .gitignore ├── continuousscanning ├── error.go ├── test.json ├── doc.go ├── watchbuilder_test.go ├── gvks.py ├── loader.go ├── service.go ├── watchbuilder.go └── loader_test.go ├── ADOPTERS.md ├── SECURITY.md ├── COMMUNITY.md ├── GOVERNANCE.md ├── MAINTAINERS.md ├── CONTRIBUTING.md ├── CODE_OF_CONDUCT.md ├── utils ├── doc.go ├── fakes.go ├── typesutils.go ├── containerprofile.go ├── utils_test.go ├── containerprofile_test.go ├── types.go └── utils.go ├── configuration ├── config.json └── capabilities.json ├── admission ├── rulebinding │ ├── rulebinding_interface.go │ ├── rulebinding_interface_mock.go │ └── cache │ │ ├── helpers.go │ │ └── helpers_test.go ├── exporter │ ├── exporter.go │ └── mock_exporter.go ├── rules │ ├── v1 │ │ ├── rule.go │ │ ├── factory.go │ │ ├── r2001_portforward_test.go │ │ ├── r2000_exec_to_pod_test.go │ │ ├── failureobject.go │ │ ├── r2001_portforward.go │ │ ├── helpers.go │ │ └── r2000_exec_to_pod.go │ ├── rule_interface_mock.go │ └── rule_interface.go ├── webhook │ ├── testdata │ │ ├── cert.pem │ │ └── key.pem │ ├── validator.go │ └── server_test.go └── rulesupdate │ └── updater.go ├── Makefile ├── docs ├── docs.go └── server.go ├── mainhandler ├── testdata │ └── vulnscan │ │ ├── regcreds-with-httpheaders.json │ │ ├── regcreds.json │ │ ├── registry-secret.json │ │ └── deployment.json ├── vulnscanhandlerhelper.go ├── cronjobhandler_test.go ├── handlerequestsutils.go ├── imageregistryhandler_test.go ├── handlerequests_test.go ├── handlecommandresponse.go ├── vulnscanhandlerhelper_test.go ├── imageregistryhandler.go ├── cronjobhandler.go ├── vulnscan_test.go ├── vulnscanhandler.go └── kubescapehandlerhelper_test.go ├── .github ├── workflows │ ├── pr-created.yaml │ └── pr-merged.yaml ├── ISSUE_TEMPLATE │ ├── feature_request.md │ └── bug_report.md └── PULL_REQUEST_TEMPLATE.md ├── restapihandler ├── restapiutils.go ├── restapi.go └── triggeraction.go ├── watcher ├── testdata │ ├── create-registry-command.json │ ├── rs-redis.json │ ├── rs-redis-2.json │ ├── registry-template-configmap.yaml │ ├── deployment-redis.json │ ├── deployment.json │ └── deployment-two-containers.json ├── utils.go ├── watchhandler.go ├── watchhandler_test.go ├── watchretry.go ├── cooldownqueue.go ├── commandshandler.go ├── cooldownqueue_test.go └── commandswatcher.go ├── servicehandler └── portsscan.go └── config └── config_test.go /objectcache/objectcache.go: -------------------------------------------------------------------------------- 1 | package objectcache 2 | 3 | type ObjectCache interface { 4 | GetKubernetesCache() KubernetesCache 5 | } 6 | -------------------------------------------------------------------------------- /examples/registryScanPayload.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | curl -X POST http://127.0.0.1:4002/v1/triggerAction -H 'Content-Type: application/json' -d @registryScanPayload.json -v -w "\\n" -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *_tests_go.txt 2 | *_tests_xunit.xml 3 | *k8s-ca-websocket* 4 | *operator* 5 | *.vscode* 6 | dist/* 7 | licenses 8 | .vscode/launch.json 9 | go.work 10 | go.work.* 11 | -------------------------------------------------------------------------------- /continuousscanning/error.go: -------------------------------------------------------------------------------- 1 | package continuousscanning 2 | 3 | import "errors" 4 | 5 | var ( 6 | ErrUnexpectedGVRString = errors.New("unexpected Group Version Resource string") 7 | ) 8 | -------------------------------------------------------------------------------- /ADOPTERS.md: -------------------------------------------------------------------------------- 1 | # Adopters 2 | 3 | The Kubescape project manages this document in the central project repository. 4 | 5 | Go to the [centralized ADOPTERS.md](https://github.com/kubescape/project-governance/blob/main/ADOPTERS.md) -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security 2 | 3 | The Kubescape project manages this document in the central project repository. 4 | 5 | Go to the [centralized SECURITY.md](https://github.com/kubescape/project-governance/blob/main/SECURITY.md) 6 | -------------------------------------------------------------------------------- /COMMUNITY.md: -------------------------------------------------------------------------------- 1 | # Community 2 | 3 | The Kubescape project manages this document in the central project repository. 4 | 5 | Go to the [centralized COMMUNITY.md](https://github.com/kubescape/project-governance/blob/main/COMMUNITY.md) 6 | -------------------------------------------------------------------------------- /GOVERNANCE.md: -------------------------------------------------------------------------------- 1 | # Governance 2 | 3 | The Kubescape project manages this document in the central project repository. 4 | 5 | Go to the [centralized GOVERNANCE.md](https://github.com/kubescape/project-governance/blob/main/GOVERNANCE.md) 6 | -------------------------------------------------------------------------------- /MAINTAINERS.md: -------------------------------------------------------------------------------- 1 | # Maintainers 2 | 3 | The Kubescape project manages this document in the central project repository. 4 | 5 | Go to the [centralized MAINTAINERS.md](https://github.com/kubescape/project-governance/blob/main/MAINTAINERS.md) 6 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | The Kubescape project manages this document in the central project repository. 4 | 5 | Go to the [centralized CONTRIBUTING.md](https://github.com/kubescape/project-governance/blob/main/CONTRIBUTING.md) 6 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | The Kubescape project manages this document in the central project repository. 4 | 5 | Go to the [centralized CODE_OF_CONDUCT.md](https://github.com/kubescape/project-governance/blob/main/CODE_OF_CONDUCT.md) 6 | -------------------------------------------------------------------------------- /utils/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package utils contains utilities common to all units of the Operator 3 | 4 | The package includes: 5 | 6 | - Common utilities 7 | 8 | - Common test utilities, like constructors for test doubles etc 9 | */ 10 | package utils 11 | -------------------------------------------------------------------------------- /continuousscanning/test.json: -------------------------------------------------------------------------------- 1 | { 2 | "match": [ 3 | { 4 | "apiGroups": [], 5 | "apiVersions": ["v1"], 6 | "resources": ["deployment"] 7 | }, 8 | { 9 | "apiGroups": ["rbac.authorization.k8s.io"], 10 | "apiVersions": ["v1"], 11 | "resources": ["ClusterRoleBinding"] 12 | } 13 | ] 14 | } 15 | -------------------------------------------------------------------------------- /examples/scanRegistriesOldPayload.json: -------------------------------------------------------------------------------- 1 | { 2 | "commands": [ 3 | { 4 | "CommandName": "scanRegistry", 5 | "args": { 6 | "registryInfo-v1": { 7 | "registryName": "quay.io/kubescape" 8 | } 9 | } 10 | } 11 | ] 12 | } 13 | -------------------------------------------------------------------------------- /configuration/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "cleanupdelay": 600000000000, 3 | "excludeNamespaces": "kube-system,kubescape", 4 | "includeNamespaces": "", 5 | "matchingrulesfilename": "/etc/config/matchingRules.json", 6 | "namespace": "kubescape", 7 | "port": "4002", 8 | "triggersecurityframework": false, 9 | "workerconcurrency": 3 10 | } 11 | -------------------------------------------------------------------------------- /examples/deleteRegistryScanCronJob.json: -------------------------------------------------------------------------------- 1 | { 2 | "commands": [ 3 | { 4 | "CommandName": "deleteRegistryScanCronJob", 5 | "args": { 6 | "jobParams": { 7 | "name": "kubescape-registry-scan-4924508290975795459" 8 | } 9 | } 10 | } 11 | ] 12 | } -------------------------------------------------------------------------------- /admission/rulebinding/rulebinding_interface.go: -------------------------------------------------------------------------------- 1 | package rulebinding 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/kubescape/operator/admission/rules" 7 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 8 | ) 9 | 10 | type RuleBindingCache interface { 11 | ListRulesForObject(ctx context.Context, object *unstructured.Unstructured) []rules.RuleEvaluator 12 | } 13 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | DOCKERFILE_PATH=./build/Dockerfile 2 | BINARY_NAME=operator 3 | 4 | IMAGE?=quay.io/dwertent/$(BINARY_NAME) 5 | 6 | 7 | build: 8 | CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o $(BINARY_NAME) 9 | 10 | docker-build: 11 | docker buildx build --platform linux/amd64 -t $(IMAGE):$(TAG) -f $(DOCKERFILE_PATH) . 12 | docker-push: 13 | docker push $(IMAGE):$(TAG) 14 | -------------------------------------------------------------------------------- /admission/exporter/exporter.go: -------------------------------------------------------------------------------- 1 | package exporters 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/armosec/armoapi-go/armotypes" 7 | "github.com/kubescape/operator/admission/rules" 8 | ) 9 | 10 | type Exporter interface { 11 | SendAdmissionAlert(ruleFailure rules.RuleFailure) 12 | SendRegistryStatus(guid string, status armotypes.RegistryScanStatus, statusMessage string, time time.Time) 13 | } 14 | -------------------------------------------------------------------------------- /examples/updateRegistryScanCronJobPayloadOld.json: -------------------------------------------------------------------------------- 1 | { 2 | "commands": [ 3 | { 4 | "CommandName": "updateRegistryScanCronJob", 5 | "args": { 6 | "jobParams": { 7 | "name": "kubescape-registry-scan-4924508290975795459", 8 | "cronTabSchedule": "* 0 * * *" 9 | } 10 | } 11 | } 12 | ] 13 | } -------------------------------------------------------------------------------- /objectcache/objectcache_interface.go: -------------------------------------------------------------------------------- 1 | package objectcache 2 | 3 | type ObjectCacheImpl struct { 4 | kubernetesCache KubernetesCache 5 | } 6 | 7 | func NewObjectCache(kubernetesCache KubernetesCache) *ObjectCacheImpl { 8 | return &ObjectCacheImpl{ 9 | kubernetesCache: kubernetesCache, 10 | } 11 | } 12 | 13 | func (oc ObjectCacheImpl) GetKubernetesCache() KubernetesCache { 14 | return oc.kubernetesCache 15 | } 16 | -------------------------------------------------------------------------------- /docs/docs.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package classification Kubescape Operator 3 | 4 | The Kubescape Operator coordinates the Kubescape in-cluster components and allows clients to execute actions these components. 5 | 6 | Schemes: https, http 7 | BasePath: / 8 | Version: 1.0.0 9 | 10 | Consumes: 11 | - application/json 12 | Produces: 13 | - text/plain 14 | 15 | Security: 16 | - basic 17 | 18 | swagger:meta 19 | */ 20 | package docs 21 | -------------------------------------------------------------------------------- /examples/setRegistryScanCronJobPayloadOld.json: -------------------------------------------------------------------------------- 1 | { 2 | "commands": [ 3 | { 4 | "commandName": "setRegistryScanCronJob", 5 | "args": { 6 | "jobParams": { 7 | "cronTabSchedule": "5 5 * * *" 8 | }, 9 | "registryInfo-v1": { 10 | "registryName": "quay.io/kubescape" 11 | } 12 | } 13 | } 14 | ] 15 | } -------------------------------------------------------------------------------- /admission/exporter/mock_exporter.go: -------------------------------------------------------------------------------- 1 | package exporters 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/armosec/armoapi-go/armotypes" 7 | "github.com/kubescape/operator/admission/rules" 8 | ) 9 | 10 | type MockExporter struct{} 11 | 12 | var _ Exporter = (*MockExporter)(nil) 13 | 14 | func (m MockExporter) SendAdmissionAlert(_ rules.RuleFailure) {} 15 | 16 | func (m MockExporter) SendRegistryStatus(_ string, _ armotypes.RegistryScanStatus, _ string, _ time.Time) { 17 | } 18 | -------------------------------------------------------------------------------- /admission/rulebinding/rulebinding_interface_mock.go: -------------------------------------------------------------------------------- 1 | package rulebinding 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/kubescape/operator/admission/rules" 7 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 8 | ) 9 | 10 | var _ RuleBindingCache = (*RuleBindingCacheMock)(nil) 11 | 12 | type RuleBindingCacheMock struct { 13 | } 14 | 15 | func (r *RuleBindingCacheMock) ListRulesForObject(_ context.Context, _ *unstructured.Unstructured) []rules.RuleEvaluator { 16 | return []rules.RuleEvaluator{} 17 | } 18 | -------------------------------------------------------------------------------- /mainhandler/testdata/vulnscan/regcreds-with-httpheaders.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "Secret", 4 | "type": "kubernetes.io/dockerconfigjson", 5 | "data": { 6 | ".dockerconfigjson": "eyJhdXRocyI6IHsiaHR0cHM6Ly9pbmRleC5kb2NrZXIuaW8vdjEvIjogeyJhdXRoIjogIjxSRURBQ1RFRD4ifSwicmVnaXN0cnkuaG9yaXpvbnMuc2giOiB7ImF1dGgiOiAiPFJFREFDVEVEPiJ9fSwiSHR0cEhlYWRlcnMiOiB7IlVzZXItQWdlbnQiOiAiRG9ja2VyLUNsaWVudC8xOS4wMy42LWNlIChsaW51eCkifX0K" 7 | }, 8 | "metadata": { 9 | "name": "regcreds", 10 | "namespace": "default" 11 | } 12 | } -------------------------------------------------------------------------------- /mainhandler/testdata/vulnscan/regcreds.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "Secret", 4 | "type": "kubernetes.io/dockerconfigjson", 5 | "data": { 6 | ".dockerconfigjson": "ewoJImF1dGhzIjogewoJCSJwcml2YXRlLmRvY2tlci5pbyI6IHsKICAgICAgICAgICAgInVzZXJuYW1lIjogIllXUnRhVzQ9IiwKICAgICAgICAgICAgInBhc3N3b3JkIjogIlNHRnlZbTl5TVRJek5EVT0iLAoJCQkiYXV0aCI6ICJZV1J0YVc0NlNHRnlZbTl5TVRJek5EVT0iCgkJfSwKCQkiaHR0cHM6Ly9pbmRleC5kb2NrZXIuaW8vdjEvIjogewoJCQkiYXV0aCI6ICJiV0YwZEdoNWVEcDBiM1J2IgoJCX0KICAgIH0KfQ==" 7 | }, 8 | "metadata": { 9 | "name": "regcreds", 10 | "namespace": "default" 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /objectcache/k8scache_interface.go: -------------------------------------------------------------------------------- 1 | package objectcache 2 | 3 | import ( 4 | "github.com/kubescape/k8s-interface/k8sinterface" 5 | "k8s.io/client-go/kubernetes" 6 | ) 7 | 8 | type KubernetesCache interface { 9 | GetClientset() kubernetes.Interface 10 | } 11 | 12 | type KubernetesCacheImpl struct { 13 | kubernetesClient *k8sinterface.KubernetesApi 14 | } 15 | 16 | func (kc KubernetesCacheImpl) GetClientset() kubernetes.Interface { 17 | return kc.kubernetesClient.KubernetesClient 18 | } 19 | 20 | func NewKubernetesCache(kubernetesClient *k8sinterface.KubernetesApi) *KubernetesCacheImpl { 21 | return &KubernetesCacheImpl{ 22 | kubernetesClient: kubernetesClient, 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /.github/workflows/pr-created.yaml: -------------------------------------------------------------------------------- 1 | name: pull_request_created 2 | permissions: read-all 3 | on: 4 | pull_request: 5 | types: [opened, reopened, synchronize, ready_for_review] 6 | paths-ignore: 7 | - '*.md' 8 | - '*.yaml' 9 | - '.github/workflows/*' 10 | 11 | concurrency: 12 | group: ${{ github.workflow }}-${{ github.ref }} 13 | cancel-in-progress: true 14 | 15 | jobs: 16 | pr-created: 17 | permissions: 18 | pull-requests: write 19 | security-events: write 20 | uses: kubescape/workflows/.github/workflows/incluster-comp-pr-created.yaml@main 21 | with: 22 | CGO_ENABLED: 0 23 | GO_VERSION: "1.25" 24 | secrets: inherit 25 | -------------------------------------------------------------------------------- /examples/testRegistryConnectivity.json: -------------------------------------------------------------------------------- 1 | { 2 | "commands": [ 3 | { 4 | "commandName": "testRegistryConnectivity", 5 | "jobTracking": { 6 | }, 7 | "args": { 8 | "registryInfo-v1": { 9 | "registryName": "quay.io/kubescape", 10 | "registryProvider": "quay.io", 11 | "depth": 4, 12 | "kind": "quay.io", 13 | "isHTTPS": true, 14 | "skipTLSVerify": false, 15 | "authMethod": { 16 | "type": "public" 17 | } 18 | } 19 | } 20 | } 21 | ] 22 | } -------------------------------------------------------------------------------- /restapihandler/restapiutils.go: -------------------------------------------------------------------------------- 1 | package restapihandler 2 | 3 | import ( 4 | "crypto/tls" 5 | "flag" 6 | "fmt" 7 | ) 8 | 9 | func (handler *HTTPHandler) loadTLSKey() error { 10 | certFile := "" 11 | keyFile := "" 12 | 13 | flag.StringVar(&certFile, "tlsCertFile", "", "File containing the x509 Certificate for HTTPS.") 14 | flag.StringVar(&keyFile, "tlsKeyFile", "", "File containing the x509 private key to --tlsCertFile.") 15 | flag.Parse() 16 | 17 | if keyFile == "" || certFile == "" { 18 | return nil 19 | } 20 | 21 | pair, err := tls.LoadX509KeyPair(certFile, keyFile) 22 | if err != nil { 23 | return fmt.Errorf("failed to load key pair: %w", err) 24 | } 25 | handler.keyPair = &pair 26 | return nil 27 | } 28 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: 'feature' 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## Overview 11 | 12 | 13 | ## Problem 14 | 15 | 16 | ## Solution 17 | 18 | 19 | ## Alternatives 20 | 21 | 22 | ## Additional context 23 | 24 | -------------------------------------------------------------------------------- /examples/scanRegistriesPayload.json: -------------------------------------------------------------------------------- 1 | { 2 | "commands": [ 3 | { 4 | "commandName": "scanRegistry", 5 | "jobTracking": { 6 | "timestamp": "0001-01-01T00:00:00Z" 7 | }, 8 | "args": { 9 | "registryInfo-v1": { 10 | "registryName": "quay.io/kubescape", 11 | "registryProvider": "quay.io", 12 | "depth": 1, 13 | "exclude": [ 14 | ], 15 | "kind": "quay.io", 16 | "isHTTPS": false, 17 | "skipTLSVerify": false, 18 | "authMethod": { 19 | "type": "public" 20 | } 21 | } 22 | } 23 | } 24 | ] 25 | } -------------------------------------------------------------------------------- /utils/fakes.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/kubescape/k8s-interface/k8sinterface" 7 | k8s "k8s.io/client-go/kubernetes" 8 | ) 9 | 10 | // NewK8sInterfaceFake returns a new K8sInterface with a fake Kubernetes Client attached 11 | // 12 | // This function is a Dependency Injection-friendly version for the 13 | // `KubernetesApi` constructor that allows to inject any Kubernetes Clients. 14 | // For example, the official fake Kubernetes client, so unit tests would have a 15 | // suitable test double instead of trying to talk to a real cluster 16 | func NewK8sInterfaceFake(k8sClient k8s.Interface) *k8sinterface.KubernetesApi { 17 | return &k8sinterface.KubernetesApi{ 18 | KubernetesClient: k8sClient, 19 | DynamicClient: nil, 20 | Context: context.Background(), 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /mainhandler/vulnscanhandlerhelper.go: -------------------------------------------------------------------------------- 1 | package mainhandler 2 | 3 | import ( 4 | "github.com/armosec/armoapi-go/apis" 5 | pkgwlid "github.com/armosec/utils-k8s-go/wlid" 6 | ) 7 | 8 | // Extract vuln-scan command from create cronjob command, 9 | // And warp it with commands so the websocket can parse the request 10 | func getVulnScanRequest(command *apis.Command) *apis.Commands { 11 | 12 | c := *command 13 | c.CommandName = apis.TypeScanImages 14 | c.Args = nil 15 | commands := apis.Commands{ 16 | Commands: []apis.Command{c}, 17 | } 18 | return &commands 19 | } 20 | 21 | func getNamespaceFromVulnScanCommand(command *apis.Command) string { 22 | if command.WildWlid != "" { 23 | return pkgwlid.GetNamespaceFromWlid(command.WildWlid) 24 | } 25 | 26 | if len(command.Designators) > 0 { 27 | return command.Designators[0].GetNamespace() 28 | } 29 | 30 | return "" 31 | } 32 | -------------------------------------------------------------------------------- /mainhandler/testdata/vulnscan/registry-secret.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "Secret", 4 | "type": "Opaque", 5 | "data": { 6 | "registriesAuth": "WyAgICAgCiAgewogICAgInJlZ2lzdHJ5IjogImRvY2tlci5pbyIsCiAgICAidXNlcm5hbWUiOiAidGVzdC11c2VyIiwKICAgICJwYXNzd29yZCI6ICJ0ZXN0LXBhc3MiLAogICAgImF1dGhfbWV0aG9kIjogImNyZWRlbnRpYWxzIiwKICAgICJodHRwIjogdHJ1ZQogIH0sCiAgewogICAgInJlZ2lzdHJ5IjogInF1YXkuaW8iLAogICAgInVzZXJuYW1lIjogInRlc3QtdXNlci1xdWF5IiwKICAgICJwYXNzd29yZCI6ICJ0ZXN0LXBhc3MtcXVheSIsCiAgICAiYXV0aF9tZXRob2QiOiAiY3JlZGVudGlhbHMiLAogICAgInNraXBUbHNWZXJpZnkiOiB0cnVlCiAgfQpdCg==" 7 | }, 8 | "metadata": { 9 | "creationTimestamp": "2024-03-03T13:45:44Z", 10 | "name": "kubescape-registry-scan-test-secret", 11 | "namespace": "kubescape", 12 | "resourceVersion": "80227", 13 | "uid": "0617be65-cf6c-4cac-8bcf-d26592b34156" 14 | } 15 | } -------------------------------------------------------------------------------- /examples/setRegistryScanCronJobPayload.json: -------------------------------------------------------------------------------- 1 | { 2 | "commands": [ 3 | { 4 | "CommandName": "setRegistryScanCronJob", 5 | "args": { 6 | "jobParams": { 7 | "name": "kubescape-registry-scan-3351514809465429647", 8 | "cronTabSchedule": "* 0 * * *" 9 | }, 10 | "registryInfo-v1": { 11 | "registryName": "quay.io/kubescape", 12 | "registryProvider": "quay.io", 13 | "depth": 1, 14 | "exclude": [ 15 | ], 16 | "kind": "quay.io", 17 | "isHTTPS": false, 18 | "skipTLSVerify": false, 19 | "authMethod": { 20 | "type": "public" 21 | } 22 | } 23 | } 24 | } 25 | ] 26 | } -------------------------------------------------------------------------------- /mainhandler/cronjobhandler_test.go: -------------------------------------------------------------------------------- 1 | package mainhandler 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/armosec/armoapi-go/apis" 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func getCommandForConfigMap(jobName, cronTabSchedule string) *apis.Command { 11 | jobParams := apis.CronJobParams{ 12 | JobName: jobName, 13 | CronTabSchedule: cronTabSchedule, 14 | } 15 | return &apis.Command{ 16 | CommandName: apis.TypeSetVulnScanCronJob, 17 | WildWlid: "wlid://cluster-minikube", 18 | Args: map[string]interface{}{ 19 | "jobParams": jobParams, 20 | }, 21 | } 22 | } 23 | func TestGetJobParams(t *testing.T) { 24 | jobName := "aaaa" 25 | cronTabSchedule := "bbbb" 26 | command := getCommandForConfigMap(jobName, cronTabSchedule) 27 | jobParams := getJobParams(command) 28 | 29 | assert.Equal(t, jobParams.JobName, jobName) 30 | assert.Equal(t, jobParams.CronTabSchedule, cronTabSchedule) 31 | 32 | } 33 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: 'bug' 6 | assignees: '' 7 | 8 | --- 9 | 10 | # Description 11 | 12 | 13 | # Environment 14 | OS: ` ` 15 | Version: ` ` 16 | 17 | # Steps To Reproduce 18 | 25 | 26 | # Expected behavior 27 | 28 | 29 | # Actual Behavior 30 | 31 | 32 | # Additional context 33 | 34 | -------------------------------------------------------------------------------- /examples/updateRegistryScanCronJobPayload.json: -------------------------------------------------------------------------------- 1 | { 2 | "commands": [ 3 | { 4 | "CommandName": "updateRegistryScanCronJob", 5 | "args": { 6 | "jobParams": { 7 | "name": "kubescape-registry-scan-6185063278658353525", 8 | "cronTabSchedule": "* 2 * 2 *" 9 | }, 10 | "registryInfo-v1": { 11 | "registryName": "quay.io/kubescape", 12 | "registryProvider": "quay.io", 13 | "depth": 1, 14 | "exclude": [ 15 | ], 16 | "kind": "quay.io", 17 | "isHTTPS": false, 18 | "skipTLSVerify": false, 19 | "authMethod": { 20 | "type": "private", 21 | "username": "user", 22 | "password": "pass" 23 | } 24 | } 25 | } 26 | } 27 | ] 28 | } -------------------------------------------------------------------------------- /utils/typesutils.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/armosec/armoapi-go/apis" 8 | "github.com/google/uuid" 9 | beServerV1 "github.com/kubescape/backend/pkg/server/v1" 10 | "github.com/kubescape/operator/config" 11 | ) 12 | 13 | func GetRequestHeaders(accessKey string) map[string]string { 14 | return map[string]string{ 15 | "Content-Type": "application/json", 16 | beServerV1.AccessKeyHeader: accessKey, 17 | } 18 | } 19 | 20 | func NewSessionObj(ctx context.Context, config config.IConfig, command *apis.Command, parentJobId, jobID string) *SessionObj { 21 | sessionObj := SessionObj{ 22 | CustomerGUID: config.AccountID(), 23 | JobID: jobID, 24 | ParentJobID: parentJobId, 25 | Command: command, 26 | Timestamp: time.Now(), 27 | } 28 | 29 | if jobID == "" { 30 | sessionObj.JobID = uuid.NewString() 31 | } 32 | 33 | return &sessionObj 34 | } 35 | 36 | func (s *SessionObj) SetOperatorCommandDetails(opcmd *OperatorCommandDetails) { 37 | s.ParentCommandDetails = opcmd 38 | } 39 | -------------------------------------------------------------------------------- /admission/rulebinding/cache/helpers.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | typesv1 "github.com/kubescape/node-agent/pkg/rulebindingmanager/types/v1" 5 | "github.com/kubescape/node-agent/pkg/utils" 6 | "github.com/kubescape/node-agent/pkg/watcher" 7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 9 | k8sruntime "k8s.io/apimachinery/pkg/runtime" 10 | ) 11 | 12 | func uniqueName(obj metav1.Object) string { 13 | return utils.CreateK8sPodID(obj.GetNamespace(), obj.GetName()) 14 | } 15 | 16 | func unstructuredToRuleBinding(obj *unstructured.Unstructured) (*typesv1.RuntimeAlertRuleBinding, error) { 17 | rb := &typesv1.RuntimeAlertRuleBinding{} 18 | if err := k8sruntime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, rb); err != nil { 19 | return nil, err 20 | } 21 | return rb, nil 22 | } 23 | 24 | func resourcesToWatch() []watcher.WatchResource { 25 | var w []watcher.WatchResource 26 | 27 | // add rule binding 28 | rb := watcher.NewWatchResource(typesv1.RuleBindingAlertGvr, metav1.ListOptions{}) 29 | w = append(w, rb) 30 | 31 | return w 32 | } 33 | -------------------------------------------------------------------------------- /watcher/testdata/create-registry-command.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "kubescape.io/v1alpha1", 3 | "kind": "OperatorCommand", 4 | "metadata": { 5 | "creationTimestamp": "2024-10-22T07:22:27Z", 6 | "generation": 1, 7 | "labels": { 8 | "kubescape.io/app-name": "operator" 9 | }, 10 | "name": "52601522-359f-4417-a140-cf60e57302f6", 11 | "namespace": "kubescape", 12 | "uid": "e67cc9d0-d486-4a77-8f32-572bf952e3de" 13 | }, 14 | "spec": { 15 | "body": "eyJndWlkIjoiYTY3YWNmMWQtOGYwNC00MTc5LTg1NjUtYTZmY2FlZWQ4YzdjIiwibmFtZSI6IiIsInVwZGF0ZWRUaW1lIjoiMjAyNC0xMC0yMlQwNzoyMjoyN1oiLCJwcm92aWRlciI6InF1YXkiLCJjbHVzdGVyTmFtZSI6ImNsdXN0ZXItMjE4MzQiLCJyZXBvc2l0b3JpZXMiOlsidGVzdDEiXSwic2NhbkZyZXF1ZW5jeSI6IjMgNSAqICogKiIsInJlc291cmNlTmFtZSI6Imt1YmVzY2FwZS1yZWdpc3RyeS1zY2FuLTIxMjI3OTczMTAiLCJhdXRoSUQiOiI4YmE4N2JkNS1hNjc1LTQ4YTYtOTEzMC1kN2ZlNjBlYzVhZjEiLCJjb250YWluZXJSZWdpc3RyeU5hbWUiOiJ0ZXN0Iiwicm9ib3RBY2NvdW50TmFtZSI6InRlc3QiLCJyb2JvdEFjY291bnRUb2tlbiI6InRlc3QifQo=", 16 | "commandType": "CreateRegistry", 17 | "guid": "a67acf1d-8f04-4179-8565-a6fcaeed8c7c" 18 | } 19 | } -------------------------------------------------------------------------------- /servicehandler/portsscan.go: -------------------------------------------------------------------------------- 1 | package servicehandler 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/kubescape/go-logger" 7 | "github.com/kubescape/kubescape-network-scanner/cmd" 8 | corev1 "k8s.io/api/core/v1" 9 | ) 10 | 11 | type Port struct { 12 | port int 13 | protocol string 14 | sessionLayer string 15 | presentationLayer string 16 | applicationLayer string 17 | authenticated *bool 18 | } 19 | 20 | func (port *Port) scan(ctx context.Context, ip string) { 21 | result, err := cmd.ScanTargets(ctx, ip, port.port) 22 | if err != nil { 23 | logger.L().Ctx(ctx).Error(err.Error()) 24 | return 25 | } 26 | 27 | port.applicationLayer = result.ApplicationLayer 28 | port.presentationLayer = result.PresentationLayer 29 | port.sessionLayer = result.SessionLayer 30 | if result.ApplicationLayer != "" { 31 | port.authenticated = &result.IsAuthenticated 32 | } 33 | } 34 | 35 | func K8sPortsTranslator(sp []corev1.ServicePort) []Port { 36 | ports := make([]Port, 0, len(sp)) 37 | for _, port := range sp { 38 | ports = append(ports, 39 | Port{ 40 | port: int(port.Port), 41 | protocol: string(port.Protocol), 42 | }) 43 | } 44 | return ports 45 | } 46 | -------------------------------------------------------------------------------- /configuration/capabilities.json: -------------------------------------------------------------------------------- 1 | { 2 | "capabilities": { 3 | "configurationScan": "enable", 4 | "continuousScan": "disable", 5 | "nodeScan": "enable", 6 | "relevancy": "enable", 7 | "vulnerabilityScan": "enable", 8 | "admissionController": "enable" 9 | }, 10 | "components": { 11 | "hostScanner": { 12 | "enabled": true 13 | }, 14 | "kubescape": { 15 | "enabled": true 16 | }, 17 | "kubescapeScheduler": { 18 | "enabled": true 19 | }, 20 | "kubevuln": { 21 | "enabled": true 22 | }, 23 | "kubevulnScheduler": { 24 | "enabled": true 25 | }, 26 | "nodeAgent": { 27 | "enabled": true 28 | }, 29 | "operator": { 30 | "enabled": true 31 | }, 32 | "otelCollector": { 33 | "enabled": true 34 | }, 35 | "serviceDiscovery": { 36 | "enabled": true 37 | }, 38 | "storage": { 39 | "enabled": true 40 | } 41 | }, 42 | "configurations": { 43 | "persistence": "enable", 44 | "server": { 45 | "account": null, 46 | "discoveryUrl": "foo.com", 47 | "otelUrl": null 48 | } 49 | }, 50 | "serviceScanConfig":{ 51 | "enabled": true, 52 | "interval": "60s" 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /objectcache/objectcache_mock.go: -------------------------------------------------------------------------------- 1 | package objectcache 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/kubescape/k8s-interface/k8sinterface" 7 | corev1 "k8s.io/api/core/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | "k8s.io/client-go/kubernetes" 10 | ) 11 | 12 | type KubernetesCacheMockImpl struct{} 13 | 14 | func (om KubernetesCacheMockImpl) GetClientset() kubernetes.Interface { 15 | client := k8sinterface.NewKubernetesApiMock().KubernetesClient 16 | initializeClient(client) 17 | 18 | return client 19 | } 20 | 21 | func initializeClient(client kubernetes.Interface) { 22 | pod := &corev1.Pod{ 23 | ObjectMeta: metav1.ObjectMeta{ 24 | Name: "test-pod", 25 | Namespace: "test-namespace", 26 | Labels: map[string]string{ 27 | "app": "test-app", 28 | "workload": "test-workload", 29 | "controller": "ReplicaSet", 30 | }, 31 | OwnerReferences: []metav1.OwnerReference{ 32 | { 33 | Kind: "ReplicaSet", 34 | Name: "test-workload", 35 | }, 36 | }, 37 | }, 38 | Spec: corev1.PodSpec{ 39 | NodeName: "test-node", 40 | Containers: []corev1.Container{ 41 | { 42 | Name: "test-container", 43 | Image: "nginx:1.14.2", 44 | }, 45 | }, 46 | }, 47 | Status: corev1.PodStatus{ 48 | Phase: corev1.PodRunning, 49 | PodIP: "192.168.1.1", 50 | }, 51 | } 52 | 53 | client.CoreV1().Pods("test-namespace").Create(context.TODO(), pod, metav1.CreateOptions{}) 54 | } 55 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## Overview 2 | This PR fixes # 3 | 4 | **[Signed Commits](../CONTRIBUTING.md#sign-off-per-commit)** 5 | - [ ] Yes, I signed my commits. 6 | 7 | 8 | 9 | 14 | 15 | 20 | 21 | 26 | 27 | 37 | 38 | 52 | -------------------------------------------------------------------------------- /continuousscanning/doc.go: -------------------------------------------------------------------------------- 1 | // package continuousscanning provides utilities that help the Operator watch for changes 2 | // in the cluster it operates in 3 | // 4 | // # Resource Kinds that the Operator is interested in 5 | // 6 | // The non-namespaced kinds the Operator is interested in are: 7 | // - */*/ClusterRole 8 | // - */*/ClusterRoleBinding 9 | // - rbac.authorization.k8s.io/v1/ClusterRole 10 | // - rbac.authorization.k8s.io/v1/ClusterRoleBinding 11 | // - /v1/Namespace 12 | // - */*/Namespace 13 | // - /v1/Node 14 | // - admissionregistration.k8s.io/*/MutatingWebhookConfiguration 15 | // - admissionregistration.k8s.io/*/ValidatingWebhookConfiguration 16 | // - apiregistration.k8s.io/v1/APIService 17 | // - policy/*/PodSecurityPolicy 18 | // 19 | // The Namespaced kinds the Operator is interested in are: 20 | // - */*/ConfigMap 21 | // - */*/CronJob 22 | // - */*/DaemonSet 23 | // - */*/Deployment 24 | // - */*/Job 25 | // - */*/Pod 26 | // - */*/ReplicaSet 27 | // - */*/Role 28 | // - */*/RoleBinding 29 | // - */*/ServiceAccount 30 | // - */*/StatefulSet 31 | // - /v1/Pod 32 | // - /v1/Service 33 | // - /v1/ServiceAccount 34 | // - apps/v1/DaemonSet 35 | // - apps/v1/Deployment 36 | // - apps/v1/ReplicaSet 37 | // - apps/v1/StatefulSet 38 | // - batch/*/CronJob 39 | // - batch/*/Job 40 | // - networking.k8s.io/v1/Ingress 41 | // - networking.k8s.io/v1/NetworkPolicy 42 | // - rbac.authorization.k8s.io/v1/Role 43 | // - rbac.authorization.k8s.io/v1/RoleBinding 44 | package continuousscanning 45 | -------------------------------------------------------------------------------- /docs/server.go: -------------------------------------------------------------------------------- 1 | package docs 2 | 3 | import ( 4 | _ "embed" 5 | "net/http" 6 | 7 | "github.com/go-openapi/runtime/middleware" 8 | ) 9 | 10 | const ( 11 | OpenAPIDocsEndpoint = "docs" 12 | OpenAPIRapiEndpoint = "rapi" 13 | OpenAPISwaggerUIEndpoint = "swaggerui" 14 | OpenAPIswaggerJSONEndpoint = "swagger.yaml" 15 | OpenAPIV2Prefix = "/openapi/v2/" 16 | ) 17 | 18 | //go:embed swagger.yaml 19 | var specJSONBytes []byte 20 | 21 | // ServeOpenAPISpec returns the OpenAPI specification file 22 | func ServeOpenAPISpec(w http.ResponseWriter, r *http.Request) { 23 | w.WriteHeader(http.StatusOK) 24 | w.Write(specJSONBytes) 25 | } 26 | 27 | // NewOpenAPIUIHandler returns a handler that serves OpenAPI specs via UI 28 | func NewOpenAPIUIHandler() http.Handler { 29 | redocOpts := middleware.RedocOpts{ 30 | BasePath: OpenAPIV2Prefix, 31 | SpecURL: OpenAPIswaggerJSONEndpoint, 32 | } 33 | RapiDocOpts := middleware.RapiDocOpts{ 34 | BasePath: OpenAPIV2Prefix, 35 | SpecURL: OpenAPIswaggerJSONEndpoint, 36 | Path: OpenAPIRapiEndpoint, 37 | } 38 | opts := middleware.SwaggerUIOpts{ 39 | BasePath: OpenAPIV2Prefix, 40 | SpecURL: OpenAPIswaggerJSONEndpoint, 41 | Path: OpenAPISwaggerUIEndpoint, 42 | } 43 | 44 | var openAPISpecHandler http.Handler = http.HandlerFunc(ServeOpenAPISpec) 45 | 46 | openAPIUIHandler := middleware.Redoc(redocOpts, openAPISpecHandler) 47 | openAPIUIHandler = middleware.RapiDoc(RapiDocOpts, openAPIUIHandler) 48 | openAPIUIHandler = middleware.SwaggerUI(opts, openAPIUIHandler) 49 | 50 | return openAPIUIHandler 51 | } 52 | -------------------------------------------------------------------------------- /admission/rules/v1/rule.go: -------------------------------------------------------------------------------- 1 | package rules 2 | 3 | import ( 4 | "github.com/goradd/maps" 5 | "github.com/kubescape/operator/admission/rules" 6 | ) 7 | 8 | const ( 9 | RulePriorityNone = 0 10 | RulePriorityLow = 1 11 | RulePriorityMed = 5 12 | RulePriorityHigh = 8 13 | RulePriorityCritical = 10 14 | RulePrioritySystemIssue = 1000 15 | ) 16 | 17 | type RuleDescriptor struct { 18 | // Rule ID 19 | ID string 20 | // Rule Name 21 | Name string 22 | // Rule Description 23 | Description string 24 | // Priority 25 | Priority int 26 | // Tags 27 | Tags []string 28 | // Create a rule function 29 | RuleCreationFunc func() rules.RuleEvaluator 30 | } 31 | 32 | func (r *RuleDescriptor) HasTags(tags []string) bool { 33 | for _, tag := range tags { 34 | for _, ruleTag := range r.Tags { 35 | if tag == ruleTag { 36 | return true 37 | } 38 | } 39 | } 40 | return false 41 | } 42 | 43 | type BaseRule struct { 44 | // Mutex for protecting rule parameters. 45 | parameters maps.SafeMap[string, interface{}] 46 | } 47 | 48 | func (br *BaseRule) SetParameters(parameters map[string]interface{}) { 49 | for k, v := range parameters { 50 | br.parameters.Set(k, v) 51 | } 52 | } 53 | 54 | func (br *BaseRule) GetParameters() map[string]interface{} { 55 | 56 | // Create a copy to avoid returning a reference to the internal map 57 | parametersCopy := make(map[string]interface{}, br.parameters.Len()) 58 | 59 | br.parameters.Range( 60 | func(key string, value interface{}) bool { 61 | parametersCopy[key] = value 62 | return true 63 | }, 64 | ) 65 | return parametersCopy 66 | } 67 | -------------------------------------------------------------------------------- /admission/rules/v1/factory.go: -------------------------------------------------------------------------------- 1 | package rules 2 | 3 | import ( 4 | "github.com/kubescape/operator/admission/rules" 5 | ) 6 | 7 | var _ rules.RuleCreator = (*RuleCreatorImpl)(nil) 8 | 9 | type RuleCreatorImpl struct { 10 | ruleDescriptions []RuleDescriptor 11 | } 12 | 13 | func NewRuleCreator() *RuleCreatorImpl { 14 | return &RuleCreatorImpl{ 15 | ruleDescriptions: []RuleDescriptor{ 16 | R2000ExecToPodRuleDescriptor, 17 | R2001PortForwardRuleDescriptor, 18 | }, 19 | } 20 | } 21 | 22 | func (r *RuleCreatorImpl) CreateRulesByTags(tags []string) []rules.RuleEvaluator { 23 | var rules []rules.RuleEvaluator 24 | for _, rule := range r.ruleDescriptions { 25 | if rule.HasTags(tags) { 26 | rules = append(rules, rule.RuleCreationFunc()) 27 | } 28 | } 29 | return rules 30 | } 31 | 32 | func (r *RuleCreatorImpl) CreateRuleByID(id string) rules.RuleEvaluator { 33 | for _, rule := range r.ruleDescriptions { 34 | if rule.ID == id { 35 | return rule.RuleCreationFunc() 36 | } 37 | } 38 | return nil 39 | } 40 | 41 | func (r *RuleCreatorImpl) CreateRuleByName(name string) rules.RuleEvaluator { 42 | for _, rule := range r.ruleDescriptions { 43 | if rule.Name == name { 44 | return rule.RuleCreationFunc() 45 | } 46 | } 47 | return nil 48 | } 49 | 50 | func (r *RuleCreatorImpl) GetAllRuleDescriptors() []RuleDescriptor { 51 | return r.ruleDescriptions 52 | } 53 | 54 | func (r *RuleCreatorImpl) CreateAllRules() []rules.RuleEvaluator { 55 | all := make([]rules.RuleEvaluator, 0, len(r.ruleDescriptions)) 56 | for _, rd := range r.ruleDescriptions { 57 | all = append(all, rd.RuleCreationFunc()) 58 | } 59 | return all 60 | } 61 | -------------------------------------------------------------------------------- /continuousscanning/watchbuilder_test.go: -------------------------------------------------------------------------------- 1 | package continuousscanning 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | "k8s.io/apimachinery/pkg/runtime" 10 | "k8s.io/apimachinery/pkg/runtime/schema" 11 | dynamicfake "k8s.io/client-go/dynamic/fake" 12 | ktest "k8s.io/client-go/testing" 13 | ) 14 | 15 | func assertWatchAction(t *testing.T, gotAction ktest.Action, wantGVR schema.GroupVersionResource) { 16 | t.Helper() 17 | gotAction, ok := gotAction.(ktest.WatchActionImpl) 18 | assert.Equalf(t, true, ok, "incorrect action type, expecting watch") 19 | 20 | if ok { 21 | gotGvr := gotAction.GetResource() 22 | 23 | assert.Equalf(t, wantGVR, gotGvr, "GVR mismatch") 24 | } 25 | 26 | } 27 | 28 | func TestNewDynamicWatch(t *testing.T) { 29 | tt := []struct { 30 | wantErr error 31 | inputGVR schema.GroupVersionResource 32 | name string 33 | wantActions []ktest.Action 34 | }{ 35 | { 36 | name: "", 37 | inputGVR: schema.GroupVersionResource{ 38 | Group: "", 39 | Version: "v1", 40 | Resource: "Pods", 41 | }, 42 | wantActions: []ktest.Action{}, 43 | wantErr: nil, 44 | }, 45 | } 46 | 47 | for _, tc := range tt { 48 | t.Run(tc.name, func(t *testing.T) { 49 | ctx := context.Background() 50 | opts := metav1.ListOptions{} 51 | dynClient := dynamicfake.NewSimpleDynamicClient(runtime.NewScheme()) 52 | 53 | _, gotErr := NewDynamicWatch(ctx, dynClient, tc.inputGVR, opts) 54 | 55 | gotActions := dynClient.Actions() 56 | 57 | assertWatchAction(t, gotActions[0], tc.inputGVR) 58 | assert.ErrorIs(t, gotErr, tc.wantErr) 59 | }) 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /admission/rules/v1/r2001_portforward_test.go: -------------------------------------------------------------------------------- 1 | package rules 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/kubescape/operator/objectcache" 7 | "github.com/zeebo/assert" 8 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 9 | "k8s.io/apimachinery/pkg/runtime/schema" 10 | "k8s.io/apiserver/pkg/admission" 11 | "k8s.io/apiserver/pkg/authentication/user" 12 | ) 13 | 14 | func TestR2001(t *testing.T) { 15 | event := admission.NewAttributesRecord( 16 | &unstructured.Unstructured{ 17 | Object: map[string]interface{}{ 18 | "kind": "PodPortForwardOptions", 19 | }, 20 | }, 21 | nil, 22 | schema.GroupVersionKind{ 23 | Kind: "PodPortForwardOptions", 24 | }, 25 | "test-namespace", 26 | "test-pod", 27 | schema.GroupVersionResource{ 28 | Resource: "pods", 29 | }, 30 | "", 31 | admission.Create, 32 | nil, 33 | false, 34 | &user.DefaultInfo{ 35 | Name: "test-user", 36 | Groups: []string{"test-group"}, 37 | }, 38 | ) 39 | 40 | rule := CreateRuleR2001PortForward() 41 | result := rule.ProcessEvent(event, objectcache.KubernetesCacheMockImpl{}) 42 | 43 | assert.NotNil(t, result) 44 | assert.Equal(t, "test-workload", result.GetRuntimeAlertK8sDetails().WorkloadName) 45 | assert.Equal(t, "test-namespace", result.GetRuntimeAlertK8sDetails().WorkloadNamespace) 46 | assert.Equal(t, "ReplicaSet", result.GetRuntimeAlertK8sDetails().WorkloadKind) 47 | assert.Equal(t, "test-node", result.GetRuntimeAlertK8sDetails().NodeName) 48 | assert.Equal(t, "Port forward detected on pod test-pod", result.GetRuleAlert().RuleDescription) 49 | assert.Equal(t, "test-pod", result.GetRuntimeAlertK8sDetails().PodName) 50 | assert.Equal(t, "test-namespace", result.GetRuntimeAlertK8sDetails().Namespace) 51 | } 52 | -------------------------------------------------------------------------------- /restapihandler/restapi.go: -------------------------------------------------------------------------------- 1 | package restapihandler 2 | 3 | import ( 4 | "crypto/tls" 5 | "fmt" 6 | "net/http" 7 | 8 | "github.com/gorilla/mux" 9 | "github.com/kubescape/go-logger" 10 | "github.com/kubescape/operator/config" 11 | "github.com/kubescape/operator/docs" 12 | "github.com/panjf2000/ants/v2" 13 | "go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux" 14 | ) 15 | 16 | type HTTPHandler struct { 17 | keyPair *tls.Certificate 18 | pool *ants.PoolWithFunc 19 | config config.IConfig 20 | } 21 | 22 | func NewHTTPHandler(pool *ants.PoolWithFunc, config config.IConfig) *HTTPHandler { 23 | return &HTTPHandler{ 24 | keyPair: nil, 25 | pool: pool, 26 | config: config, 27 | } 28 | } 29 | 30 | // SetupHTTPListener set up listening http servers 31 | func (resthandler *HTTPHandler) SetupHTTPListener(port string) error { 32 | err := resthandler.loadTLSKey() 33 | if err != nil { 34 | return err 35 | } 36 | server := &http.Server{ 37 | Addr: fmt.Sprintf(":%v", port), 38 | } 39 | if resthandler.keyPair != nil { 40 | server.TLSConfig = &tls.Config{Certificates: []tls.Certificate{*resthandler.keyPair}} 41 | } 42 | rtr := mux.NewRouter() 43 | rtr.Use(otelmux.Middleware("operator-http")) 44 | rtr.HandleFunc("/v1/triggerAction", resthandler.ActionRequest) 45 | 46 | openAPIUIHandler := docs.NewOpenAPIUIHandler() 47 | rtr.PathPrefix(docs.OpenAPIV2Prefix).Methods("GET").Handler(openAPIUIHandler) 48 | 49 | server.Handler = rtr 50 | 51 | logger.L().Info("Waiting for REST API to receive notifications, port: " + port) 52 | 53 | // listen 54 | if resthandler.keyPair != nil { 55 | return server.ListenAndServeTLS("", "") 56 | } else { 57 | return server.ListenAndServe() 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /admission/rules/rule_interface_mock.go: -------------------------------------------------------------------------------- 1 | package rules 2 | 3 | import ( 4 | "github.com/kubescape/operator/objectcache" 5 | "k8s.io/apiserver/pkg/admission" 6 | ) 7 | 8 | var _ RuleCreator = (*RuleCreatorMock)(nil) 9 | 10 | type RuleCreatorMock struct { 11 | } 12 | 13 | func (r *RuleCreatorMock) CreateRulesByTags(tags []string) []RuleEvaluator { 14 | var rl []RuleEvaluator 15 | for _, t := range tags { 16 | rl = append(rl, &RuleMock{RuleName: t}) 17 | } 18 | return rl 19 | } 20 | func (r *RuleCreatorMock) CreateRuleByID(id string) RuleEvaluator { 21 | return &RuleMock{RuleID: id} 22 | } 23 | 24 | func (r *RuleCreatorMock) CreateRuleByName(name string) RuleEvaluator { 25 | return &RuleMock{RuleName: name} 26 | } 27 | 28 | func (r *RuleCreatorMock) CreateAllRules() []RuleEvaluator { 29 | // return a couple of deterministic mock rules so tests can assert length/order 30 | return []RuleEvaluator{ 31 | &RuleMock{RuleID: "rule-1"}, 32 | &RuleMock{RuleID: "rule-2"}, 33 | } 34 | } 35 | 36 | var _ RuleEvaluator = (*RuleMock)(nil) 37 | 38 | type RuleMock struct { 39 | RuleParameters map[string]interface{} 40 | RuleName string 41 | RuleID string 42 | } 43 | 44 | func (rule *RuleMock) Name() string { 45 | return rule.RuleName 46 | } 47 | 48 | func (rule *RuleMock) ID() string { 49 | return rule.RuleID 50 | } 51 | 52 | func (rule *RuleMock) DeleteRule() { 53 | } 54 | 55 | func (rule *RuleMock) ProcessEvent(_ admission.Attributes, _ objectcache.KubernetesCache) RuleFailure { 56 | return nil 57 | } 58 | 59 | func (rule *RuleMock) GetParameters() map[string]interface{} { 60 | return rule.RuleParameters 61 | } 62 | func (rule *RuleMock) SetParameters(p map[string]interface{}) { 63 | rule.RuleParameters = p 64 | } 65 | -------------------------------------------------------------------------------- /watcher/utils.go: -------------------------------------------------------------------------------- 1 | package watcher 2 | 3 | import ( 4 | "github.com/kubescape/operator/utils" 5 | corev1 "k8s.io/api/core/v1" 6 | ) 7 | 8 | func extractImageIDsToContainersFromPod(pod *corev1.Pod) map[string][]string { 9 | imageIDsToContainers := make(map[string][]string) 10 | for _, containerStatus := range pod.Status.ContainerStatuses { 11 | imageID := utils.ExtractImageID(containerStatus.ImageID) 12 | if _, ok := imageIDsToContainers[imageID]; !ok { 13 | imageIDsToContainers[imageID] = []string{} 14 | } 15 | imageIDsToContainers[imageID] = append(imageIDsToContainers[imageID], containerStatus.Name) 16 | } 17 | 18 | for _, containerStatus := range pod.Status.InitContainerStatuses { 19 | imageID := utils.ExtractImageID(containerStatus.ImageID) 20 | if _, ok := imageIDsToContainers[imageID]; !ok { 21 | imageIDsToContainers[imageID] = []string{} 22 | } 23 | imageIDsToContainers[imageID] = append(imageIDsToContainers[imageID], containerStatus.Name) 24 | 25 | } 26 | 27 | return imageIDsToContainers 28 | } 29 | 30 | func extractImageIDsFromPod(pod *corev1.Pod) []string { 31 | imageIDs := []string{} 32 | for _, containerStatus := range pod.Status.ContainerStatuses { 33 | if containerStatus.State.Running != nil { 34 | imageID := containerStatus.ImageID 35 | imageIDs = append(imageIDs, utils.ExtractImageID(imageID)) 36 | } 37 | } 38 | 39 | for _, containerStatus := range pod.Status.InitContainerStatuses { 40 | if containerStatus.State.Running != nil { 41 | imageID := containerStatus.ImageID 42 | imageIDs = append(imageIDs, utils.ExtractImageID(imageID)) 43 | } 44 | } 45 | 46 | return imageIDs 47 | } 48 | func getWlidAndImageID(containerData *utils.ContainerData) string { 49 | return containerData.Wlid + containerData.ContainerName + containerData.ImageID 50 | } 51 | -------------------------------------------------------------------------------- /mainhandler/handlerequestsutils.go: -------------------------------------------------------------------------------- 1 | package mainhandler 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/armosec/armoapi-go/apis" 8 | "github.com/armosec/utils-go/httputils" 9 | "github.com/armosec/utils-k8s-go/probes" 10 | "github.com/kubescape/go-logger" 11 | "github.com/kubescape/operator/config" 12 | ) 13 | 14 | func notWaitAtAll(_ config.IConfig) { 15 | } 16 | 17 | func isActionNeedToWait(action apis.Command) waitFunc { 18 | if f, ok := actionNeedToBeWaitOnStartUp[action.CommandName]; ok { 19 | return f 20 | } 21 | return notWaitAtAll 22 | } 23 | 24 | func waitForVulnScanReady(config config.IConfig) { 25 | fullURL := getVulnScanURL(config) 26 | // replace path 27 | fullURL.Path = fmt.Sprintf("v1/%s", probes.ReadinessPath) 28 | 29 | timer := time.NewTimer(time.Duration(1) * time.Minute) 30 | 31 | for { 32 | timer.Reset(time.Duration(1) * time.Second) 33 | <-timer.C 34 | resp, err := httputils.HttpGet(VulnScanHttpClient, fullURL.String(), nil) 35 | if err != nil { 36 | continue 37 | } 38 | defer resp.Body.Close() 39 | if resp.StatusCode >= 200 && resp.StatusCode <= 203 { 40 | logger.L().Info("image vulnerability scanning is available") 41 | break 42 | } 43 | 44 | } 45 | } 46 | 47 | func waitForKubescapeReady(config config.IConfig) { 48 | fullURL := getKubescapeV1ScanURL(config) 49 | fullURL.Path = "readyz" 50 | timer := time.NewTimer(time.Duration(1) * time.Minute) 51 | 52 | for { 53 | timer.Reset(time.Duration(1) * time.Second) 54 | <-timer.C 55 | resp, err := httputils.HttpHead(KubescapeHttpClient, fullURL.String(), nil) 56 | if err != nil { 57 | continue 58 | } 59 | defer resp.Body.Close() 60 | if resp.StatusCode >= 200 && resp.StatusCode <= 203 { 61 | logger.L().Info("kubescape service is ready") 62 | break 63 | } 64 | 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /mainhandler/imageregistryhandler_test.go: -------------------------------------------------------------------------------- 1 | package mainhandler 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/kubescape/k8s-interface/k8sinterface" 7 | ) 8 | 9 | // func TestSetImageToTagsMap(t *testing.T) { 10 | // k8sAPI := k8sinterface.NewKubernetesApi() 11 | // registryScan := NewRegistryScan(k8sAPI) 12 | // registryScan.registry = registry{ 13 | // hostname: "quay.io", 14 | // projectID: "armosec", 15 | // } 16 | // registryScan.registryInfo.RegistryName = "quay.io/armosec" 17 | // registryScan.registryInfo.RegistryProvider = "quay.io" 18 | // registryScan.registryInfo.Kind = "quay.io" 19 | // registryScan.registryInfo.AuthMethod.Type = "public" 20 | // registryScan.registryInfo.Include = append(registryScan.registryInfo.Exclude, "armosec/k8s-ca-webhook-ubi") 21 | 22 | // repos, err := registryScan.enumerateRepos() 23 | // assert.NoError(t, err) 24 | // reporter := systemreports.NewBaseReport("bla", "bla", "http://localhost:7200", http.DefaultClient) 25 | // reposToTags := make(chan map[string][]string, len(repos)) 26 | // mapUniqueRepos := make(map[string]bool, len(repos)) 27 | // for _, repo := range repos { 28 | // if _, ok := mapUniqueRepos[repo]; ok { 29 | // t.Errorf("repo %s already exists, len %d", repo, len(repos)) 30 | // } 31 | // mapUniqueRepos[repo] = true 32 | // //currentRepo := repo 33 | // go registryScan.setImageToTagsMap(repo, reporter, reposToTags) 34 | // } 35 | // for i := 0; i < len(repos); i++ { 36 | // res := <-reposToTags 37 | // for k, v := range res { 38 | // registryScan.mapImageToTags[k] = v 39 | // } 40 | // } 41 | 42 | // } 43 | 44 | func NewMockKubernetesAPI() *k8sinterface.KubernetesApi { 45 | return &k8sinterface.KubernetesApi{ 46 | KubernetesClient: nil, 47 | DynamicClient: nil, 48 | DiscoveryClient: nil, 49 | Context: context.Background(), 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /watcher/watchhandler.go: -------------------------------------------------------------------------------- 1 | package watcher 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "time" 7 | 8 | mapset "github.com/deckarep/golang-set/v2" 9 | "github.com/goradd/maps" 10 | "github.com/kubescape/k8s-interface/k8sinterface" 11 | "github.com/kubescape/operator/config" 12 | "github.com/kubescape/operator/utils" 13 | kssc "github.com/kubescape/storage/pkg/generated/clientset/versioned" 14 | ) 15 | 16 | const retryInterval = 1 * time.Second 17 | 18 | var ( 19 | ErrMissingWLID = fmt.Errorf("missing WLID") 20 | ErrMissingSlug = fmt.Errorf("missing slug") 21 | ErrMissingImageTag = fmt.Errorf("missing image ID") 22 | ErrMissingImageID = fmt.Errorf("missing image tag") 23 | ErrMissingInstanceID = fmt.Errorf("missing instanceID") 24 | ErrMissingContainerName = fmt.Errorf("missing container name") 25 | ErrUnsupportedObject = errors.New("unsupported object type") 26 | ) 27 | 28 | type WatchHandler struct { 29 | ImageToContainerData maps.SafeMap[string, utils.ContainerData] // map of : 30 | SlugToImageID maps.SafeMap[string, string] // map of : string 31 | WlidAndImageID mapset.Set[string] // set of 32 | storageClient kssc.Interface 33 | cfg config.IConfig 34 | k8sAPI *k8sinterface.KubernetesApi 35 | eventQueue *CooldownQueue 36 | } 37 | 38 | // NewWatchHandler creates a new WatchHandler, initializes the maps and returns it 39 | func NewWatchHandler(cfg config.IConfig, k8sAPI *k8sinterface.KubernetesApi, storageClient kssc.Interface, eventQueue *CooldownQueue) *WatchHandler { 40 | return &WatchHandler{ 41 | storageClient: storageClient, 42 | k8sAPI: k8sAPI, 43 | cfg: cfg, 44 | WlidAndImageID: mapset.NewSet[string](), 45 | eventQueue: eventQueue, 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /admission/webhook/testdata/cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIFCTCCAvGgAwIBAgIUOfw+ItKo9OUpC1cyG8vTJ8LOXdYwDQYJKoZIhvcNAQEL 3 | BQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTI0MDcxNDExMDkwM1oXDTI1MDcx 4 | NDExMDkwM1owFDESMBAGA1UEAwwJbG9jYWxob3N0MIICIjANBgkqhkiG9w0BAQEF 5 | AAOCAg8AMIICCgKCAgEAs1Jv3j2RSkjKJg6q51C2h1HKkf1u50cWxsTYaQv+U7O5 6 | nnQVwDiuozdbQrIbCBwr/BTkZvfnIsOHdLL3pKWau7Bq/4KhIYbFM/1CHBFtFwzv 7 | hBmqV69GWE1/RAsSR8T3P1hdWXdYgvwaba36mrKOA63BdpENkCgA3zL+t6U2ggKs 8 | QqFontT0yw/BCMrA81DSJCsrPcn698qLErZaIXvYMvPkGyUgebNlICzFugiWAqAQ 9 | dZTsisQMmQAoJo0NByOK96QPuOmj0w4ZxN1R/Ug/hGUpmBLwz7ngv2/HMWkpSgJk 10 | pIenUtYHE9rCg7Uy7oP+bft/HsptKz8sHJJfoF+t/aR8UfomKCD64MMpPBMHg3Ko 11 | 2fE2q27Rnl8+6rYymdQQ7KN0NZsLdbNAN3Xxdl0TB/jmbEXHRd/iY+6BDfdv89s2 12 | +U5bW1qJS+QuSX7IS1N6hWp4GP6hlK1T/rGIJpxAyOB2XzgUrU/lDsi//hUo6wXN 13 | paPHUKnqTV9DQeTPK5vSzHqYt8Foemr2pJn0KwdQMTDxwl0jhVqZtIa3CPOT5Av3 14 | 1lW/BMAOEIfsepkq0oOx2mg0HmzhRQtqPhmcXCmXm4dEbwe2p0lFC/QskYqa43sk 15 | QfljMrVOfpN0Gxac7wX/HFqX2mREBNzIg4hWVPcOQm96yHYNyhgjVwqFKlaj698C 16 | AwEAAaNTMFEwHQYDVR0OBBYEFNWsRaReo4K9ddebIDSAls38o/DXMB8GA1UdIwQY 17 | MBaAFNWsRaReo4K9ddebIDSAls38o/DXMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI 18 | hvcNAQELBQADggIBACpYdNrRijZjHDzv3W8xQuhjn5Sn4ImfD0c6CZ6ENlm9vuhK 19 | /4EwzBofd1pec3hA+CNKvIU7D6VkfroTlVF1us1Xnq0W1CorT2k4muALT8yIPtL7 20 | T41ZNiD7+ULhBxqrFlOXMs5TRlMP1sx0A9muggKuCzreKnDf8rkTTOIj+XiPt8j2 21 | xYRiCsg94o/TV6cL+XMQ5QztpYzliehwfNAupal8IvwOjlg8OyOZb0GqcFirZ5pS 22 | N8Ofhrelonl3fAoGSN9Ey9RPoetcFobjj4sHp+hLXoKzPmn+gjXAoLymm0hZahWz 23 | gIijbSeV8plbPKd2YAEm1sQ8hq+6K3ze95g1YWepvhzlxbOxPNpZX5CEAFe9nqqq 24 | mxgbim75gWKqB2Bm6QqQpo2RePPw3ZqJwvHjGDMfihw6FONCE2P1KulYcK95SZ9l 25 | Rn2Av3g5b2uBYwJ0aEjKj30gO2KUCxFhESlhCjKQSBAn0SGQTB2DrW2gMzrq/yhN 26 | X2oBa6CZeZXWXJzaVKxweZbSylNtfTT8m+5B7CrVxE7ikv3IcHbTzgBF0rqxRpmt 27 | /vlulSUwejq2f5YWLy7JwS8pS1dZoW902JhjaxvHti9gDJeQ6b/oSERF0ymgQts8 28 | PdXeP9ik0cJSP0UGOiqdfzE23SnZorAuS2ymmQLr2xd2/jLZMB0gwm9WQ17x 29 | -----END CERTIFICATE----- 30 | -------------------------------------------------------------------------------- /mainhandler/handlerequests_test.go: -------------------------------------------------------------------------------- 1 | package mainhandler 2 | 3 | import ( 4 | "strings" 5 | "testing" 6 | ) 7 | 8 | func TestCombineKubescapeCMDArgsWithFrameworkName(t *testing.T) { 9 | fullCMD := combineKubescapeCMDArgsWithFrameworkName("mitre", []string{"scan", "framework"}) 10 | if strings.Join(fullCMD, " ") != "scan framework mitre" { 11 | t.Errorf("invalid kubescape args str: %v", fullCMD) 12 | } 13 | fullCMD = combineKubescapeCMDArgsWithFrameworkName("", []string{"scan", "framework"}) 14 | if strings.Join(fullCMD, " ") != "scan" { 15 | t.Errorf("invalid kubescape args str: %v", fullCMD) 16 | } 17 | fullCMD = combineKubescapeCMDArgsWithFrameworkName("", []string{"scan", "framework", "--environment"}) 18 | if strings.Join(fullCMD, " ") != "scan --environment" { 19 | t.Errorf("invalid kubescape args str: %v", fullCMD) 20 | } 21 | fullCMD = combineKubescapeCMDArgsWithFrameworkName("mitre", []string{"scan", "framework", "--environment"}) 22 | if strings.Join(fullCMD, " ") != "scan framework mitre --environment" { 23 | t.Errorf("invalid kubescape args str: %v", fullCMD) 24 | } 25 | fullCMD = combineKubescapeCMDArgsWithFrameworkName("mitre", []string{"--environment"}) 26 | if strings.Join(fullCMD, " ") != "scan framework mitre --environment" { 27 | t.Errorf("invalid kubescape args str: %v", fullCMD) 28 | } 29 | fullCMD = combineKubescapeCMDArgsWithFrameworkName("", []string{"--environment"}) 30 | if strings.Join(fullCMD, " ") != "scan --environment" { 31 | t.Errorf("invalid kubescape args str: %v", fullCMD) 32 | } 33 | fullCMD = combineKubescapeCMDArgsWithFrameworkName("", []string{}) 34 | if strings.Join(fullCMD, " ") != "scan" { 35 | t.Errorf("invalid kubescape args str: %v", fullCMD) 36 | } 37 | fullCMD = combineKubescapeCMDArgsWithFrameworkName("mitre", []string{}) 38 | if strings.Join(fullCMD, " ") != "scan framework mitre" { 39 | t.Errorf("invalid kubescape args str: %v", fullCMD) 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /admission/rules/v1/r2000_exec_to_pod_test.go: -------------------------------------------------------------------------------- 1 | package rules 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/kubescape/operator/objectcache" 7 | "github.com/zeebo/assert" 8 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 9 | "k8s.io/apimachinery/pkg/runtime/schema" 10 | "k8s.io/apiserver/pkg/admission" 11 | "k8s.io/apiserver/pkg/authentication/user" 12 | ) 13 | 14 | func TestR2000(t *testing.T) { 15 | event := admission.NewAttributesRecord( 16 | &unstructured.Unstructured{ 17 | Object: map[string]interface{}{ 18 | "kind": "PodExecOptions", 19 | "apiVersion": "v1", 20 | "command": []interface{}{"bash"}, 21 | "container": "test-container", 22 | "stdin": true, 23 | "stdout": true, 24 | "stderr": true, 25 | "tty": true, 26 | }, 27 | }, 28 | nil, 29 | schema.GroupVersionKind{ 30 | Kind: "PodExecOptions", 31 | }, 32 | "test-namespace", 33 | "test-pod", 34 | schema.GroupVersionResource{ 35 | Resource: "pods", 36 | }, 37 | "exec", 38 | admission.Create, 39 | nil, 40 | false, 41 | &user.DefaultInfo{ 42 | Name: "test-user", 43 | Groups: []string{"test-group"}, 44 | }, 45 | ) 46 | 47 | rule := CreateRuleR2000ExecToPod() 48 | result := rule.ProcessEvent(event, objectcache.KubernetesCacheMockImpl{}) 49 | 50 | assert.NotNil(t, result) 51 | assert.Equal(t, "test-container", result.GetRuntimeAlertK8sDetails().ContainerName) 52 | assert.Equal(t, "test-workload", result.GetRuntimeAlertK8sDetails().WorkloadName) 53 | assert.Equal(t, "test-namespace", result.GetRuntimeAlertK8sDetails().WorkloadNamespace) 54 | assert.Equal(t, "ReplicaSet", result.GetRuntimeAlertK8sDetails().WorkloadKind) 55 | assert.Equal(t, "test-node", result.GetRuntimeAlertK8sDetails().NodeName) 56 | assert.Equal(t, "Exec to pod detected on pod test-pod", result.GetRuleAlert().RuleDescription) 57 | assert.Equal(t, "test-pod", result.GetRuntimeAlertK8sDetails().PodName) 58 | assert.Equal(t, "test-namespace", result.GetRuntimeAlertK8sDetails().Namespace) 59 | } 60 | -------------------------------------------------------------------------------- /watcher/watchhandler_test.go: -------------------------------------------------------------------------------- 1 | package watcher 2 | 3 | import ( 4 | _ "embed" 5 | "testing" 6 | 7 | utilsmetadata "github.com/armosec/utils-k8s-go/armometadata" 8 | beUtils "github.com/kubescape/backend/pkg/utils" 9 | "github.com/kubescape/operator/config" 10 | "github.com/kubescape/operator/utils" 11 | kssfake "github.com/kubescape/storage/pkg/generated/clientset/versioned/fake" 12 | "github.com/stretchr/testify/assert" 13 | k8sfake "k8s.io/client-go/kubernetes/fake" 14 | ) 15 | 16 | func TestNewWatchHandlerProducesValidResult(t *testing.T) { 17 | tt := []struct { 18 | imageIDsToWLIDSsMap map[string][]string 19 | expectedIWMap map[string][]string 20 | name string 21 | }{ 22 | { 23 | name: "Creating with provided empty map returns matching empty map", 24 | imageIDsToWLIDSsMap: map[string][]string{}, 25 | expectedIWMap: map[string][]string{}, 26 | }, 27 | { 28 | name: "Creating with provided nil map returns matching empty map", 29 | imageIDsToWLIDSsMap: nil, 30 | expectedIWMap: map[string][]string{}, 31 | }, 32 | { 33 | name: "Creating with provided non-empty map returns matching map", 34 | imageIDsToWLIDSsMap: map[string][]string{ 35 | "imageid-01": {"wlid-01"}, 36 | }, 37 | expectedIWMap: map[string][]string{ 38 | "imageid-01": {"wlid-01"}, 39 | }, 40 | }, 41 | } 42 | 43 | for _, tc := range tt { 44 | t.Run(tc.name, func(t *testing.T) { 45 | clusterConfig := utilsmetadata.ClusterConfig{} 46 | cfg, err := config.LoadConfig("../configuration") 47 | assert.NoError(t, err) 48 | operatorConfig := config.NewOperatorConfig(config.CapabilitiesConfig{}, clusterConfig, &beUtils.Credentials{}, cfg) 49 | 50 | k8sClient := k8sfake.NewSimpleClientset() 51 | k8sAPI := utils.NewK8sInterfaceFake(k8sClient) 52 | storageClient := kssfake.NewSimpleClientset() 53 | 54 | wh := NewWatchHandler(operatorConfig, k8sAPI, storageClient, nil) 55 | 56 | assert.NotNilf(t, wh, "Constructing should create a non-nil object") 57 | }) 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /admission/rules/rule_interface.go: -------------------------------------------------------------------------------- 1 | package rules 2 | 3 | import ( 4 | apitypes "github.com/armosec/armoapi-go/armotypes" 5 | "github.com/kubescape/operator/objectcache" 6 | "k8s.io/apiserver/pkg/admission" 7 | ) 8 | 9 | const ( 10 | RulePriorityNone = 0 11 | RulePriorityLow = 1 12 | RulePriorityMed = 5 13 | RulePriorityHigh = 8 14 | RulePriorityCritical = 10 15 | RulePrioritySystemIssue = 1000 16 | ) 17 | 18 | // RuleCreator is an interface for creating rules by tags, IDs, and names 19 | type RuleCreator interface { 20 | CreateRulesByTags(tags []string) []RuleEvaluator 21 | CreateRuleByID(id string) RuleEvaluator 22 | CreateRuleByName(name string) RuleEvaluator 23 | // CreateAllRules returns all available rules; used when rule bindings are ignored. 24 | CreateAllRules() []RuleEvaluator 25 | } 26 | 27 | type RuleEvaluator interface { 28 | // Rule ID - this is the rules unique identifier 29 | ID() string 30 | // Rule Name 31 | Name() string 32 | // Rule processing 33 | ProcessEvent(event admission.Attributes, access objectcache.KubernetesCache) RuleFailure 34 | // Set rule parameters 35 | SetParameters(parameters map[string]interface{}) 36 | // Get rule parameters 37 | GetParameters() map[string]interface{} 38 | } 39 | 40 | type RuleFailure interface { 41 | // Get Base Runtime Alert 42 | GetBaseRuntimeAlert() apitypes.BaseRuntimeAlert 43 | // Get Runtime Process Details 44 | GetRuntimeProcessDetails() apitypes.ProcessTree 45 | // Get Rule Description 46 | GetRuleAlert() apitypes.RuleAlert 47 | // Get Admissions Details 48 | GetAdmissionsAlert() apitypes.AdmissionAlert 49 | // Get K8s Runtime Details 50 | GetRuntimeAlertK8sDetails() apitypes.RuntimeAlertK8sDetails 51 | // Get Rule ID 52 | GetRuleId() string 53 | 54 | // Set Workload Details 55 | SetWorkloadDetails(workloadDetails string) 56 | // Set Base Runtime Alert 57 | SetBaseRuntimeAlert(baseRuntimeAlert apitypes.BaseRuntimeAlert) 58 | // Set Runtime Process Details 59 | SetRuntimeProcessDetails(runtimeProcessDetails apitypes.ProcessTree) 60 | // Set Rule Description 61 | SetRuleAlert(ruleAlert apitypes.RuleAlert) 62 | // Set Admissions Details 63 | SetAdmissionsAlert(admissionsAlert apitypes.AdmissionAlert) 64 | // Set K8s Runtime Details 65 | SetRuntimeAlertK8sDetails(runtimeAlertK8sDetails apitypes.RuntimeAlertK8sDetails) 66 | } 67 | -------------------------------------------------------------------------------- /watcher/watchretry.go: -------------------------------------------------------------------------------- 1 | package watcher 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "time" 8 | 9 | "github.com/cenkalti/backoff/v4" 10 | "github.com/kubescape/go-logger" 11 | "github.com/kubescape/go-logger/helpers" 12 | corev1 "k8s.io/api/core/v1" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | "k8s.io/apimachinery/pkg/watch" 15 | ) 16 | 17 | type resourceVersionGetter interface { 18 | GetResourceVersion() string 19 | } 20 | 21 | var errWatchClosed = errors.New("watch channel closed") 22 | 23 | func (wh *WatchHandler) watchRetry(ctx context.Context, watchOpts metav1.ListOptions) { 24 | if err := backoff.RetryNotify(func() error { 25 | watcher, err := wh.k8sAPI.KubernetesClient.CoreV1().Pods("").Watch(context.Background(), watchOpts) 26 | if err != nil { 27 | return fmt.Errorf("client resource: %w", err) 28 | } 29 | for { 30 | event, chanActive := <-watcher.ResultChan() 31 | // set resource version to resume watch from 32 | // inspired by https://github.com/kubernetes/client-go/blob/5a0a4247921dd9e72d158aaa6c1ee124aba1da80/tools/watch/retrywatcher.go#L157 33 | if metaObject, ok := event.Object.(resourceVersionGetter); ok { 34 | watchOpts.ResourceVersion = metaObject.GetResourceVersion() 35 | } 36 | if wh.eventQueue.Closed() { 37 | watcher.Stop() 38 | return backoff.Permanent(errors.New("event queue closed")) 39 | } 40 | if !chanActive { 41 | // channel closed, retry 42 | return errWatchClosed 43 | } 44 | if event.Type == watch.Error { 45 | return fmt.Errorf("watch error: %s", event.Object) 46 | } 47 | pod := event.Object.(*corev1.Pod) 48 | if wh.cfg.SkipNamespace(pod.Namespace) { 49 | continue 50 | } 51 | wh.eventQueue.Enqueue(event) 52 | } 53 | }, newBackOff(), func(err error, d time.Duration) { 54 | if !errors.Is(err, errWatchClosed) { 55 | logger.L().Ctx(ctx).Warning("watch", helpers.Error(err), 56 | helpers.String("resource", "pods"), 57 | helpers.String("retry in", d.String())) 58 | } 59 | }); err != nil { 60 | logger.L().Ctx(ctx).Fatal("giving up watch", helpers.Error(err), 61 | helpers.String("resource", "pods")) 62 | } 63 | } 64 | 65 | func newBackOff() backoff.BackOff { 66 | b := backoff.NewExponentialBackOff() 67 | // never stop retrying (unless PermanentError is returned) 68 | b.MaxElapsedTime = 0 69 | return b 70 | } 71 | -------------------------------------------------------------------------------- /continuousscanning/gvks.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | 3 | gvks = [ 4 | ['/v1/Pod', '/v1/ServiceAccount', 'apps/v1/DaemonSet', 'apps/v1/Deployment', 'apps/v1/ReplicaSet', 'apps/v1/StatefulSet', 'batch/*/CronJob', 'batch/*/Job', 'rbac.authorization.k8s.io/v1/ClusterRole', 'rbac.authorization.k8s.io/v1/ClusterRoleBinding', 'rbac.authorization.k8s.io/v1/Role', 'rbac.authorization.k8s.io/v1/RoleBinding'], 5 | ['/v1/Service', 'apiregistration.k8s.io/v1/APIService'], 6 | ['/v1/Namespace', 'networking.k8s.io/v1/NetworkPolicy'], 7 | ['/v1/Node', '/v1/Pod', 'apps/v1/DaemonSet', 'apps/v1/Deployment', 'apps/v1/ReplicaSet', 'apps/v1/StatefulSet', 'batch/*/CronJob', 'batch/*/Job'], 8 | ['*/*/ClusterRole', '*/*/ClusterRoleBinding', '*/*/Role'], 9 | ['/v1/Service', 'networking.k8s.io/v1/Ingress'], 10 | ['/v1/Pod', '/v1/ServiceAccount', 'apps/v1/DaemonSet', 'apps/v1/Deployment', 'apps/v1/ReplicaSet', 'apps/v1/StatefulSet', 'batch/*/CronJob', 'batch/*/Job'], 11 | ['/v1/Pod', 'apps/v1/DaemonSet', 'apps/v1/Deployment', 'apps/v1/ReplicaSet', 'apps/v1/StatefulSet', 'batch/*/CronJob', 'batch/*/Job', 'networking.k8s.io/v1/NetworkPolicy'], 12 | ['*/*/Namespace', '*/*/ServiceAccount'], 13 | ['*/*/ClusterRole', '*/*/ClusterRoleBinding', '*/*/ConfigMap', '*/*/Role', '*/*/RoleBinding'], 14 | ['*/*/ConfigMap', '*/*/Deployment'], 15 | ['/v1/Pod', 'apps/v1/DaemonSet', 'apps/v1/Deployment', 'apps/v1/ReplicaSet', 'apps/v1/StatefulSet', 'batch/*/CronJob', 'batch/*/Job', 'policy/*/PodSecurityPolicy'], 16 | ['/v1/Namespace', 'admissionregistration.k8s.io/*/MutatingWebhookConfiguration', 'admissionregistration.k8s.io/*/ValidatingWebhookConfiguration'], 17 | ['rbac.authorization.k8s.io/v1/ClusterRole', 'rbac.authorization.k8s.io/v1/ClusterRoleBinding', 'rbac.authorization.k8s.io/v1/Role', 'rbac.authorization.k8s.io/v1/RoleBinding'], 18 | ['*/*/CronJob', '*/*/DaemonSet', '*/*/Deployment', '*/*/Job', '*/*/Pod', '*/*/ReplicaSet', '*/*/StatefulSet'], 19 | ['/v1/Pod', '/v1/Service', 'apps/v1/DaemonSet', 'apps/v1/Deployment', 'apps/v1/ReplicaSet', 'apps/v1/StatefulSet', 'batch/*/CronJob', 'batch/*/Job'], 20 | ['*/*/ClusterRole', '*/*/ClusterRoleBinding', '*/*/Role', '*/*/RoleBinding'], 21 | ['/v1/Pod', 'apps/v1/DaemonSet', 'apps/v1/Deployment', 'apps/v1/ReplicaSet', 'apps/v1/StatefulSet', 'batch/*/CronJob', 'batch/*/Job'], 22 | ] 23 | 24 | unique_gvks: set[str] = set() 25 | 26 | for gvk_list in gvks: 27 | for gvk in gvk_list: 28 | unique_gvks.add(gvk) 29 | 30 | gvks_str = "\n".join(sorted(unique_gvks)) 31 | print(f'Unique gvks: \n{gvks_str}') 32 | print(f'Unique gvks count: {len(unique_gvks)}') 33 | -------------------------------------------------------------------------------- /watcher/testdata/rs-redis.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "apps/v1", 3 | "kind": "ReplicaSet", 4 | "metadata": { 5 | "annotations": { 6 | "deployment.kubernetes.io/desired-replicas": "1", 7 | "deployment.kubernetes.io/max-replicas": "2", 8 | "deployment.kubernetes.io/revision": "1" 9 | }, 10 | "creationTimestamp": "2024-01-31T09:55:46Z", 11 | "generation": 1, 12 | "labels": { 13 | "app": "redis", 14 | "pod-template-hash": "77b4fdf86c" 15 | }, 16 | "name": "redis-77b4fdf86c", 17 | "namespace": "default", 18 | "ownerReferences": [ 19 | { 20 | "apiVersion": "apps/v1", 21 | "blockOwnerDeletion": true, 22 | "controller": true, 23 | "kind": "Deployment", 24 | "name": "redis", 25 | "uid": "5ebc9233-4ef3-4882-8805-975555b24c11" 26 | } 27 | ], 28 | "resourceVersion": "22510674", 29 | "uid": "1bd7e4b7-0290-46bf-be83-830655657759" 30 | }, 31 | "spec": { 32 | "replicas": 1, 33 | "selector": { 34 | "matchLabels": { 35 | "app": "redis", 36 | "pod-template-hash": "77b4fdf86c" 37 | } 38 | }, 39 | "template": { 40 | "metadata": { 41 | "creationTimestamp": null, 42 | "labels": { 43 | "app": "redis", 44 | "pod-template-hash": "77b4fdf86c" 45 | } 46 | }, 47 | "spec": { 48 | "containers": [ 49 | { 50 | "image": "docker.io/library/redis@sha256:92f3e116c1e719acf78004dd62992c3ad56f68f810c93a8db3fe2351bb9722c2", 51 | "imagePullPolicy": "Always", 52 | "name": "redis", 53 | "resources": {}, 54 | "terminationMessagePath": "/dev/termination-log", 55 | "terminationMessagePolicy": "File" 56 | } 57 | ], 58 | "dnsPolicy": "ClusterFirst", 59 | "restartPolicy": "Always", 60 | "schedulerName": "default-scheduler", 61 | "securityContext": {}, 62 | "terminationGracePeriodSeconds": 30 63 | } 64 | } 65 | }, 66 | "status": { 67 | "availableReplicas": 1, 68 | "fullyLabeledReplicas": 1, 69 | "observedGeneration": 1, 70 | "readyReplicas": 1, 71 | "replicas": 1 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /admission/rules/v1/failureobject.go: -------------------------------------------------------------------------------- 1 | package rules 2 | 3 | import ( 4 | apitypes "github.com/armosec/armoapi-go/armotypes" 5 | "github.com/armosec/utils-k8s-go/wlid" 6 | "github.com/kubescape/operator/admission/rules" 7 | ) 8 | 9 | var _ rules.RuleFailure = (*GenericRuleFailure)(nil) 10 | 11 | type GenericRuleFailure struct { 12 | BaseRuntimeAlert apitypes.BaseRuntimeAlert 13 | RuntimeProcessDetails apitypes.ProcessTree 14 | RuleAlert apitypes.RuleAlert 15 | AdmissionAlert apitypes.AdmissionAlert 16 | RuntimeAlertK8sDetails apitypes.RuntimeAlertK8sDetails 17 | RuleID string 18 | } 19 | 20 | func (rule *GenericRuleFailure) GetBaseRuntimeAlert() apitypes.BaseRuntimeAlert { 21 | return rule.BaseRuntimeAlert 22 | } 23 | 24 | func (rule *GenericRuleFailure) GetRuntimeProcessDetails() apitypes.ProcessTree { 25 | return rule.RuntimeProcessDetails 26 | } 27 | 28 | func (rule *GenericRuleFailure) GetAdmissionsAlert() apitypes.AdmissionAlert { 29 | return rule.AdmissionAlert 30 | } 31 | 32 | func (rule *GenericRuleFailure) GetRuleAlert() apitypes.RuleAlert { 33 | return rule.RuleAlert 34 | } 35 | 36 | func (rule *GenericRuleFailure) GetRuntimeAlertK8sDetails() apitypes.RuntimeAlertK8sDetails { 37 | return rule.RuntimeAlertK8sDetails 38 | } 39 | 40 | func (rule *GenericRuleFailure) GetRuleId() string { 41 | return rule.RuleID 42 | } 43 | 44 | func (rule *GenericRuleFailure) SetBaseRuntimeAlert(baseRuntimeAlert apitypes.BaseRuntimeAlert) { 45 | rule.BaseRuntimeAlert = baseRuntimeAlert 46 | } 47 | 48 | func (rule *GenericRuleFailure) SetRuntimeProcessDetails(runtimeProcessDetails apitypes.ProcessTree) { 49 | rule.RuntimeProcessDetails = runtimeProcessDetails 50 | } 51 | 52 | func (rule *GenericRuleFailure) SetAdmissionsAlert(admissionsAlert apitypes.AdmissionAlert) { 53 | rule.AdmissionAlert = admissionsAlert 54 | } 55 | 56 | func (rule *GenericRuleFailure) SetRuleAlert(ruleAlert apitypes.RuleAlert) { 57 | rule.RuleAlert = ruleAlert 58 | } 59 | 60 | func (rule *GenericRuleFailure) SetRuntimeAlertK8sDetails(runtimeAlertK8sDetails apitypes.RuntimeAlertK8sDetails) { 61 | rule.RuntimeAlertK8sDetails = runtimeAlertK8sDetails 62 | } 63 | 64 | func (rule *GenericRuleFailure) SetWorkloadDetails(workloadDetails string) { 65 | if workloadDetails == "" { 66 | return 67 | } 68 | 69 | rule.RuntimeAlertK8sDetails.ClusterName = wlid.GetClusterFromWlid(workloadDetails) 70 | rule.RuntimeAlertK8sDetails.WorkloadKind = wlid.GetKindFromWlid(workloadDetails) 71 | rule.RuntimeAlertK8sDetails.WorkloadNamespace = wlid.GetNamespaceFromWlid(workloadDetails) 72 | rule.RuntimeAlertK8sDetails.WorkloadName = wlid.GetNameFromWlid(workloadDetails) 73 | } 74 | -------------------------------------------------------------------------------- /watcher/testdata/rs-redis-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "apps/v1", 3 | "kind": "ReplicaSet", 4 | "metadata": { 5 | "annotations": { 6 | "deployment.kubernetes.io/desired-replicas": "1", 7 | "deployment.kubernetes.io/max-replicas": "2", 8 | "deployment.kubernetes.io/revision": "2" 9 | }, 10 | "creationTimestamp": "2024-01-31T16:08:34Z", 11 | "generation": 1, 12 | "labels": { 13 | "app": "redis", 14 | "bla": "bla", 15 | "pod-template-hash": "7bfdd886d9" 16 | }, 17 | "name": "redis-7bfdd886d9", 18 | "namespace": "default", 19 | "ownerReferences": [ 20 | { 21 | "apiVersion": "apps/v1", 22 | "blockOwnerDeletion": true, 23 | "controller": true, 24 | "kind": "Deployment", 25 | "name": "redis", 26 | "uid": "5ebc9233-4ef3-4882-8805-975555b24c11" 27 | } 28 | ], 29 | "resourceVersion": "22680954", 30 | "uid": "12772063-53f5-45ac-97c3-502f14f728d3" 31 | }, 32 | "spec": { 33 | "replicas": 1, 34 | "selector": { 35 | "matchLabels": { 36 | "app": "redis", 37 | "pod-template-hash": "7bfdd886d9" 38 | } 39 | }, 40 | "template": { 41 | "metadata": { 42 | "creationTimestamp": null, 43 | "labels": { 44 | "app": "redis", 45 | "bla": "bla", 46 | "pod-template-hash": "7bfdd886d9" 47 | } 48 | }, 49 | "spec": { 50 | "containers": [ 51 | { 52 | "image": "docker.io/library/redis@sha256:92f3e116c1e719acf78004dd62992c3ad56f68f810c93a8db3fe2351bb9722c2", 53 | "imagePullPolicy": "Always", 54 | "name": "redis", 55 | "resources": {}, 56 | "terminationMessagePath": "/dev/termination-log", 57 | "terminationMessagePolicy": "File" 58 | } 59 | ], 60 | "dnsPolicy": "ClusterFirst", 61 | "restartPolicy": "Always", 62 | "schedulerName": "default-scheduler", 63 | "securityContext": {}, 64 | "terminationGracePeriodSeconds": 30 65 | } 66 | } 67 | }, 68 | "status": { 69 | "availableReplicas": 1, 70 | "fullyLabeledReplicas": 1, 71 | "observedGeneration": 1, 72 | "readyReplicas": 1, 73 | "replicas": 1 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /watcher/cooldownqueue.go: -------------------------------------------------------------------------------- 1 | package watcher 2 | 3 | import ( 4 | "errors" 5 | "strings" 6 | "sync" 7 | "time" 8 | 9 | "istio.io/pkg/cache" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | "k8s.io/apimachinery/pkg/watch" 12 | ) 13 | 14 | const ( 15 | defaultExpiration = 5 * time.Second 16 | evictionInterval = 1 * time.Second 17 | ) 18 | 19 | // CooldownQueue is a queue that lets clients put events into it with a cooldown 20 | // 21 | // When a client puts an event into a queue, it waits for a cooldown period before 22 | // the event is forwarded to the consumer. If an event for the same key is put into the queue 23 | // again before the cooldown period is over, the event is overridden and the cooldown period is reset. 24 | type CooldownQueue struct { 25 | closed bool 26 | mu sync.Mutex // mutex for closed 27 | chanMu *sync.Mutex // mutex for innerChan 28 | seenEvents cache.ExpiringCache 29 | innerChan chan watch.Event 30 | ResultChan <-chan watch.Event 31 | } 32 | 33 | // NewCooldownQueue returns a new Cooldown Queue 34 | func NewCooldownQueue() *CooldownQueue { 35 | return NewCooldownQueueWithParams(defaultExpiration, evictionInterval) 36 | } 37 | 38 | func NewCooldownQueueWithParams(expiration, interval time.Duration) *CooldownQueue { 39 | events := make(chan watch.Event) 40 | chanMu := sync.Mutex{} 41 | callback := func(key, value any) { 42 | chanMu.Lock() 43 | defer chanMu.Unlock() 44 | events <- value.(watch.Event) 45 | } 46 | c := cache.NewTTLWithCallback(expiration, interval, callback) 47 | return &CooldownQueue{ 48 | chanMu: &chanMu, 49 | seenEvents: c, 50 | innerChan: events, 51 | ResultChan: events, 52 | } 53 | } 54 | 55 | // makeEventKey creates a unique key for an event from a watcher 56 | func makeEventKey(e watch.Event) (string, error) { 57 | gvk := e.Object.GetObjectKind().GroupVersionKind() 58 | meta, ok := e.Object.(metav1.Object) 59 | if !ok { 60 | return "", errors.New("object does not implement metav1.Object") 61 | } 62 | return strings.Join([]string{gvk.Group, gvk.Version, gvk.Kind, meta.GetNamespace(), meta.GetName()}, "/"), nil 63 | } 64 | 65 | func (q *CooldownQueue) Closed() bool { 66 | q.mu.Lock() 67 | defer q.mu.Unlock() 68 | return q.closed 69 | } 70 | 71 | // Enqueue enqueues an event in the Cooldown Queue 72 | func (q *CooldownQueue) Enqueue(e watch.Event) { 73 | q.mu.Lock() 74 | defer q.mu.Unlock() 75 | if q.closed { 76 | return 77 | } 78 | eventKey, err := makeEventKey(e) 79 | if err != nil { 80 | return 81 | } 82 | q.seenEvents.Set(eventKey, e) 83 | } 84 | 85 | func (q *CooldownQueue) Stop() { 86 | q.chanMu.Lock() 87 | defer q.chanMu.Unlock() 88 | q.mu.Lock() 89 | defer q.mu.Unlock() 90 | q.closed = true 91 | close(q.innerChan) 92 | } 93 | -------------------------------------------------------------------------------- /.github/workflows/pr-merged.yaml: -------------------------------------------------------------------------------- 1 | name: pr-merged 2 | permissions: read-all 3 | on: 4 | pull_request_target: 5 | types: [closed] 6 | branches: 7 | - 'main' 8 | paths-ignore: 9 | - '*.md' 10 | - '*.yaml' 11 | - '.github/workflows/*' 12 | 13 | concurrency: 14 | group: ${{ github.workflow }}-${{ github.ref }} 15 | cancel-in-progress: true 16 | 17 | jobs: 18 | reset-run-number: 19 | runs-on: ubuntu-latest 20 | name: reset github.run_number 21 | outputs: 22 | run-number: ${{ steps.get-build.outputs.build-number }} 23 | steps: 24 | - name: Get build number 25 | id: get-build 26 | uses: mlilback/build-number@v1 27 | with: 28 | base: -73 29 | run-id: ${{ github.run_number }} 30 | 31 | pr-merged: 32 | if: ${{ github.event.pull_request.merged == true }} ## Skip if not merged 33 | needs: reset-run-number 34 | permissions: 35 | id-token: write 36 | packages: write 37 | contents: write 38 | pull-requests: read 39 | uses: kubescape/workflows/.github/workflows/incluster-comp-pr-merged.yaml@main 40 | with: 41 | IMAGE_NAME: quay.io/${{ github.repository_owner }}/operator 42 | IMAGE_TAG: v0.2.${{ needs.reset-run-number.outputs.run-number }} 43 | COMPONENT_NAME: operator 44 | CGO_ENABLED: 0 45 | GO111MODULE: "on" 46 | BUILD_PLATFORM: linux/amd64,linux/arm64 47 | GO_VERSION: "1.25" 48 | REQUIRED_TESTS: '[ 49 | "vuln_v2_views", 50 | "vuln_scan_triggering_with_cron_job", 51 | "ks_microservice_ns_creation", 52 | "ks_microservice_on_demand", 53 | "ks_microservice_mitre_framework_on_demand", 54 | "ks_microservice_nsa_and_mitre_framework_demand", 55 | "ks_microservice_triggering_with_cron_job", 56 | "ks_microservice_update_cronjob_schedule", 57 | "ks_microservice_delete_cronjob", 58 | "ks_microservice_create_2_cronjob_mitre_and_nsa", 59 | "test_registry_scanning", 60 | "relevantCVEs", 61 | "relevancy_enabled_stop_sniffing", 62 | "relevant_data_is_appended", 63 | "relevancy_large_image", 64 | "relevancy_fix_vuln", 65 | "relevancy_python", 66 | "relevancy_golang", 67 | "relevancy_java", 68 | "relevancy_java_and_python", 69 | "relevancy_golang_dynamic" 70 | ]' 71 | HELM_E2E_TEST: true 72 | COSIGN: true 73 | secrets: inherit 74 | -------------------------------------------------------------------------------- /mainhandler/handlecommandresponse.go: -------------------------------------------------------------------------------- 1 | package mainhandler 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/kubescape/operator/config" 8 | ) 9 | 10 | type HandleCommandResponseCallBack func(ctx context.Context, config config.IConfig, payload interface{}) (bool, *time.Duration) 11 | 12 | const ( 13 | MaxLimitationInsertToCommandResponseChannelGoRoutine = 10 14 | ) 15 | 16 | const ( 17 | KubescapeResponse string = "KubescapeResponse" 18 | ) 19 | 20 | type CommandResponseData struct { 21 | payload interface{} 22 | nextHandledTime *time.Duration 23 | handleCallBack HandleCommandResponseCallBack 24 | commandName string 25 | isCommandResponseNeedToBeRehandled bool 26 | } 27 | 28 | type timerData struct { 29 | timer *time.Timer 30 | payload interface{} 31 | } 32 | 33 | type commandResponseChannelData struct { 34 | commandResponseChannel *chan *CommandResponseData 35 | limitedGoRoutinesCommandResponseChannel *chan *timerData 36 | } 37 | 38 | func createNewCommandResponseData(commandName string, cb HandleCommandResponseCallBack, payload interface{}, nextHandledTime *time.Duration) *CommandResponseData { 39 | return &CommandResponseData{ 40 | commandName: commandName, 41 | handleCallBack: cb, 42 | payload: payload, 43 | nextHandledTime: nextHandledTime, 44 | } 45 | } 46 | 47 | func insertNewCommandResponseData(commandResponseChannel *commandResponseChannelData, data *CommandResponseData) { 48 | timer := time.NewTimer(*data.nextHandledTime) 49 | *commandResponseChannel.limitedGoRoutinesCommandResponseChannel <- &timerData{ 50 | timer: timer, 51 | payload: data, 52 | } 53 | } 54 | 55 | func (mainHandler *MainHandler) waitFroTimer(data *timerData) { 56 | <-data.timer.C 57 | *mainHandler.commandResponseChannel.commandResponseChannel <- data.payload.(*CommandResponseData) 58 | } 59 | 60 | func (mainHandler *MainHandler) handleLimitedGoroutineOfCommandsResponse() { 61 | for { 62 | tData := <-*mainHandler.commandResponseChannel.limitedGoRoutinesCommandResponseChannel 63 | mainHandler.waitFroTimer(tData) 64 | } 65 | } 66 | 67 | func (mainHandler *MainHandler) createInsertCommandsResponseThreadPool() { 68 | for i := 0; i < MaxLimitationInsertToCommandResponseChannelGoRoutine; i++ { 69 | go mainHandler.handleLimitedGoroutineOfCommandsResponse() 70 | } 71 | } 72 | 73 | func (mainHandler *MainHandler) HandleCommandResponse(ctx context.Context) { 74 | mainHandler.createInsertCommandsResponseThreadPool() 75 | for { 76 | data := <-*mainHandler.commandResponseChannel.commandResponseChannel 77 | data.isCommandResponseNeedToBeRehandled, data.nextHandledTime = data.handleCallBack(ctx, mainHandler.config, data.payload) 78 | if data.isCommandResponseNeedToBeRehandled { 79 | insertNewCommandResponseData(mainHandler.commandResponseChannel, data) 80 | } 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /watcher/commandshandler.go: -------------------------------------------------------------------------------- 1 | package watcher 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "time" 8 | 9 | "github.com/armosec/armoapi-go/apis" 10 | "github.com/kubescape/backend/pkg/command" 11 | "github.com/kubescape/backend/pkg/command/types/v1alpha1" 12 | "github.com/kubescape/go-logger" 13 | "github.com/kubescape/go-logger/helpers" 14 | "github.com/kubescape/k8s-interface/k8sinterface" 15 | "github.com/kubescape/operator/config" 16 | "github.com/kubescape/operator/utils" 17 | "github.com/panjf2000/ants/v2" 18 | ) 19 | 20 | type OperatorCommandsHandler struct { 21 | ctx context.Context 22 | pool *ants.PoolWithFunc 23 | 24 | k8sAPI *k8sinterface.KubernetesApi 25 | commands chan v1alpha1.OperatorCommand 26 | commandsWatcher *CommandWatchHandler 27 | config config.IConfig 28 | } 29 | 30 | func NewOperatorCommandsHandler(ctx context.Context, pool *ants.PoolWithFunc, k8sAPI *k8sinterface.KubernetesApi, commandsWatcher *CommandWatchHandler, config config.IConfig) *OperatorCommandsHandler { 31 | return &OperatorCommandsHandler{ 32 | pool: pool, 33 | ctx: ctx, 34 | k8sAPI: k8sAPI, 35 | commands: make(chan v1alpha1.OperatorCommand, 100), 36 | commandsWatcher: commandsWatcher, 37 | config: config, 38 | } 39 | } 40 | 41 | func (ch *OperatorCommandsHandler) Start() { 42 | logger.L().Info("starting OperatorCommandsHandler") 43 | ch.commandsWatcher.RegisterForCommands(ch.commands) 44 | 45 | for { 46 | select { 47 | case cmd := <-ch.commands: 48 | if cmd.Spec.CommandType != string(command.OperatorCommandTypeOperatorAPI) { 49 | logger.L().Info("not generic command: " + cmd.Spec.CommandType) 50 | continue 51 | } 52 | logger.L().Debug("OperatorCommandsHandler: received command", helpers.String("guid", fmt.Sprintf("%v", cmd.Spec.GUID))) 53 | ch.invokeCommand(ch.ctx, cmd) 54 | case <-ch.ctx.Done(): 55 | logger.L().Ctx(ch.ctx).Info("OperatorCommandsHandler: context done") 56 | return 57 | } 58 | } 59 | } 60 | 61 | func (ch *OperatorCommandsHandler) invokeCommand(ctx context.Context, opcmd v1alpha1.OperatorCommand) { 62 | startedAt := time.Now() 63 | var cmd apis.Command 64 | 65 | sessionObj := utils.NewSessionObj(ctx, ch.config, &cmd, "", opcmd.Spec.GUID) 66 | sessionObj.SetOperatorCommandDetails(&utils.OperatorCommandDetails{ 67 | Command: &opcmd, 68 | StartedAt: startedAt, 69 | Client: ch.k8sAPI, 70 | }) 71 | 72 | err := json.Unmarshal(opcmd.Spec.Body, &cmd) 73 | if err != nil { 74 | sessionObj.SetOperatorCommandStatus(ctx, utils.WithError(err)) 75 | return 76 | } 77 | l := utils.Job{} 78 | l.SetContext(ctx) 79 | l.SetObj(*sessionObj) 80 | 81 | // invoke the job - status will be updated in the job 82 | if err := ch.pool.Invoke(l); err != nil { 83 | logger.L().Ctx(ctx).Error("failed to invoke job", helpers.String("ID", cmd.GetID()), helpers.String("command", fmt.Sprintf("%v", cmd)), helpers.Error(err)) 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /watcher/testdata/registry-template-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | cronjobTemplate: |- 4 | apiVersion: batch/v1 5 | kind: CronJob 6 | metadata: 7 | name: registry-scheduler 8 | namespace: kubescape 9 | labels: 10 | app: registry-scheduler 11 | kubescape.io/tier: "core" 12 | tier: ks-control-plane 13 | armo.tier: "registry-scan" 14 | spec: 15 | schedule: "0 0 * * *" 16 | successfulJobsHistoryLimit: 3 17 | failedJobsHistoryLimit: 1 18 | jobTemplate: 19 | spec: 20 | template: 21 | metadata: 22 | labels: 23 | armo.tier: "registry-scan" 24 | kubescape.io/tier: "core" 25 | spec: 26 | containers: 27 | - name: registry-scheduler 28 | image: "quay.io/kubescape/http-request:v0.2.9" 29 | imagePullPolicy: IfNotPresent 30 | securityContext: 31 | allowPrivilegeEscalation: false 32 | readOnlyRootFilesystem: true 33 | runAsNonRoot: true 34 | runAsUser: 100 35 | resources: 36 | limits: 37 | cpu: 10m 38 | memory: 20Mi 39 | requests: 40 | cpu: 1m 41 | memory: 10Mi 42 | args: 43 | - -method=post 44 | - -scheme=http 45 | - -host=operator:4002 46 | - -path=v1/triggerAction 47 | - -headers=Content-Type:application/json 48 | - -path-body=/home/ks/request-body.json 49 | volumeMounts: 50 | - name: "request-body-volume" 51 | mountPath: /home/ks/request-body.json 52 | subPath: request-body.json 53 | readOnly: true 54 | restartPolicy: Never 55 | serviceAccountName: kubevuln 56 | automountServiceAccountToken: false 57 | nodeSelector: 58 | affinity: 59 | tolerations: 60 | volumes: 61 | - name: "request-body-volume" # placeholder 62 | configMap: 63 | name: registry-scheduler 64 | kind: ConfigMap 65 | metadata: 66 | annotations: 67 | meta.helm.sh/release-name: kubescape 68 | meta.helm.sh/release-namespace: kubescape 69 | labels: 70 | app: ks-cloud-config 71 | app.kubernetes.io/component: ks-cloud-config 72 | app.kubernetes.io/instance: kubescape 73 | app.kubernetes.io/managed-by: Helm 74 | app.kubernetes.io/name: kubescape-operator 75 | app.kubernetes.io/version: 1.22.4 76 | helm.sh/chart: kubescape-operator-1.22.4 77 | kubescape.io/ignore: "true" 78 | kubescape.io/tier: core 79 | tier: ks-control-plane 80 | name: registry-scan-cronjob-template 81 | namespace: kubescape -------------------------------------------------------------------------------- /utils/containerprofile.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "slices" 7 | 8 | "github.com/armosec/armoapi-go/apis" 9 | "github.com/kubescape/go-logger" 10 | "github.com/kubescape/go-logger/helpers" 11 | helpersv1 "github.com/kubescape/k8s-interface/instanceidhandler/v1/helpers" 12 | "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" 13 | kssc "github.com/kubescape/storage/pkg/generated/clientset/versioned" 14 | corev1 "k8s.io/api/core/v1" 15 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 16 | ) 17 | 18 | func SkipContainerProfile(annotations map[string]string) (bool, error) { 19 | ann := []string{ 20 | "", // empty string for backward compatibility 21 | helpersv1.Learning, 22 | helpersv1.Completed, 23 | } 24 | 25 | if len(annotations) == 0 { 26 | return true, fmt.Errorf("no annotations") // skip 27 | } 28 | 29 | if status, ok := annotations[helpersv1.StatusMetadataKey]; ok && !slices.Contains(ann, status) { 30 | return true, fmt.Errorf("invalid status") 31 | } 32 | if val, ok := annotations[helpersv1.InstanceIDMetadataKey]; !ok || val == "" { 33 | return true, fmt.Errorf("missing InstanceID annotation") // skip 34 | } 35 | if val, ok := annotations[helpersv1.WlidMetadataKey]; !ok || val == "" { 36 | return true, fmt.Errorf("missing WLID annotation") // skip 37 | } 38 | 39 | return false, nil // do not skip 40 | } 41 | 42 | // GetContainerProfileForRelevancyScan retrieves an container profile from the storage client based on the provided slug and namespace 43 | // If the container profile is found, and it should not be skipped (i.e. correct status, InstanceID and WLID annotations), it is returned, otherwise nil 44 | func GetContainerProfileForRelevancyScan(ctx context.Context, storageClient kssc.Interface, slug, namespace string) *v1beta1.ContainerProfile { 45 | profile, err := storageClient.SpdxV1beta1().ContainerProfiles(namespace).Get(ctx, slug, metav1.GetOptions{ResourceVersion: "metadata"}) 46 | if err == nil && profile != nil { 47 | if skip, err := SkipContainerProfile(profile.Annotations); skip { 48 | logger.L().Info("found container profile, but skipping", helpers.Error(err), helpers.String("id", slug), helpers.String("namespace", namespace), 49 | helpers.Interface("annotations", profile.Annotations)) 50 | return nil 51 | } else { 52 | logger.L().Info("found container profile", helpers.String("id", slug), helpers.String("namespace", namespace)) 53 | return profile 54 | } 55 | } else { 56 | logger.L().Info("container profile not found", helpers.String("id", slug), helpers.String("namespace", namespace)) 57 | } 58 | return nil 59 | } 60 | 61 | func GetContainerProfileScanCommand(profile *v1beta1.ContainerProfile, pod *corev1.Pod) *apis.Command { 62 | return &apis.Command{ 63 | Wlid: profile.Annotations[helpersv1.WlidMetadataKey], 64 | CommandName: apis.TypeScanApplicationProfile, 65 | Args: map[string]interface{}{ 66 | ArgsName: profile.Name, 67 | ArgsNamespace: profile.Namespace, 68 | ArgsPod: pod, 69 | }, 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /restapihandler/triggeraction.go: -------------------------------------------------------------------------------- 1 | package restapihandler 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "net/http" 9 | 10 | "github.com/armosec/armoapi-go/apis" 11 | "github.com/kubescape/go-logger" 12 | "github.com/kubescape/go-logger/helpers" 13 | "github.com/kubescape/operator/utils" 14 | ) 15 | 16 | /*args may contain credentials*/ 17 | func displayReceivedCommand(receivedCommands []byte) { 18 | 19 | var err error 20 | var receivedCommandsWithNoArgs []byte 21 | commands := apis.Commands{} 22 | if err = json.Unmarshal(receivedCommands, &commands); err != nil { 23 | return 24 | } 25 | for i := range commands.Commands { 26 | commands.Commands[i].Args = map[string]interface{}{} 27 | } 28 | 29 | if receivedCommandsWithNoArgs, err = json.Marshal(commands); err != nil { 30 | return 31 | } 32 | logger.L().Info("restAPI receivedCommands: " + string(receivedCommandsWithNoArgs)) 33 | } 34 | 35 | // HandleActionRequest Parse received commands and run the command 36 | func (resthandler *HTTPHandler) HandleActionRequest(ctx context.Context, receivedCommands []byte) error { 37 | commands := apis.Commands{} 38 | if err := json.Unmarshal(receivedCommands, &commands); err != nil { 39 | logger.L().Ctx(ctx).Error(err.Error(), helpers.Error(err)) 40 | return err 41 | } 42 | 43 | displayReceivedCommand(receivedCommands) 44 | 45 | for i := range commands.Commands { 46 | c := commands.Commands[i] 47 | sessionObj := utils.NewSessionObj(ctx, resthandler.config, &c, c.JobTracking.ParentID, c.JobTracking.JobID) 48 | if c.CommandName == "" { 49 | err := fmt.Errorf("command not found. id: %s", c.GetID()) 50 | logger.L().Ctx(ctx).Error(err.Error(), helpers.Error(err)) 51 | sessionObj.SetOperatorCommandStatus(ctx, utils.WithError(err)) 52 | continue 53 | } 54 | l := utils.Job{} 55 | l.SetContext(ctx) 56 | l.SetObj(*sessionObj) 57 | if err := resthandler.pool.Invoke(l); err != nil { 58 | logger.L().Ctx(ctx).Error("failed to invoke job", helpers.String("ID", c.GetID()), helpers.Error(err)) 59 | } 60 | } 61 | return nil 62 | } 63 | 64 | func (resthandler *HTTPHandler) ActionRequest(w http.ResponseWriter, r *http.Request) { 65 | defer func() { 66 | if err := recover(); err != nil { 67 | w.WriteHeader(http.StatusInternalServerError) 68 | bErr, _ := json.Marshal(err) 69 | w.Write(bErr) 70 | logger.L().Fatal("recover in ActionRequest", helpers.Interface("error", err)) 71 | } 72 | }() 73 | 74 | defer r.Body.Close() 75 | var err error 76 | returnValue := []byte("ok") 77 | 78 | httpStatus := http.StatusOK 79 | readBuffer, err := io.ReadAll(r.Body) 80 | if err == nil { 81 | switch r.Method { 82 | case http.MethodPost: 83 | err = resthandler.HandleActionRequest(context.Background(), readBuffer) 84 | default: 85 | httpStatus = http.StatusMethodNotAllowed 86 | err = fmt.Errorf("method '%s' not allowed", r.Method) 87 | } 88 | } 89 | if err != nil { 90 | returnValue = []byte(err.Error()) 91 | httpStatus = http.StatusInternalServerError 92 | } 93 | 94 | w.WriteHeader(httpStatus) 95 | w.Write(returnValue) 96 | } 97 | -------------------------------------------------------------------------------- /watcher/testdata/deployment-redis.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "apps/v1", 3 | "kind": "Deployment", 4 | "metadata": { 5 | "annotations": { 6 | "deployment.kubernetes.io/revision": "1" 7 | }, 8 | "creationTimestamp": "2024-01-31T09:55:46Z", 9 | "generation": 1, 10 | "labels": { 11 | "app": "redis" 12 | }, 13 | "name": "redis", 14 | "namespace": "default", 15 | "resourceVersion": "22510675", 16 | "uid": "5ebc9233-4ef3-4882-8805-975555b24c11" 17 | }, 18 | "spec": { 19 | "progressDeadlineSeconds": 600, 20 | "replicas": 1, 21 | "revisionHistoryLimit": 10, 22 | "selector": { 23 | "matchLabels": { 24 | "app": "redis" 25 | } 26 | }, 27 | "strategy": { 28 | "rollingUpdate": { 29 | "maxSurge": "25%", 30 | "maxUnavailable": "25%" 31 | }, 32 | "type": "RollingUpdate" 33 | }, 34 | "template": { 35 | "metadata": { 36 | "creationTimestamp": null, 37 | "labels": { 38 | "app": "redis" 39 | } 40 | }, 41 | "spec": { 42 | "containers": [ 43 | { 44 | "image": "docker.io/library/redis@sha256:92f3e116c1e719acf78004dd62992c3ad56f68f810c93a8db3fe2351bb9722c2", 45 | "imagePullPolicy": "Always", 46 | "name": "redis", 47 | "resources": {}, 48 | "terminationMessagePath": "/dev/termination-log", 49 | "terminationMessagePolicy": "File" 50 | } 51 | ], 52 | "dnsPolicy": "ClusterFirst", 53 | "restartPolicy": "Always", 54 | "schedulerName": "default-scheduler", 55 | "securityContext": {}, 56 | "terminationGracePeriodSeconds": 30 57 | } 58 | } 59 | }, 60 | "status": { 61 | "availableReplicas": 1, 62 | "conditions": [ 63 | { 64 | "lastTransitionTime": "2024-01-31T09:55:47Z", 65 | "lastUpdateTime": "2024-01-31T09:55:47Z", 66 | "message": "Deployment has minimum availability.", 67 | "reason": "MinimumReplicasAvailable", 68 | "status": "True", 69 | "type": "Available" 70 | }, 71 | { 72 | "lastTransitionTime": "2024-01-31T09:55:46Z", 73 | "lastUpdateTime": "2024-01-31T09:55:47Z", 74 | "message": "ReplicaSet \"redis-77b4fdf86c\" has successfully progressed.", 75 | "reason": "NewReplicaSetAvailable", 76 | "status": "True", 77 | "type": "Progressing" 78 | } 79 | ], 80 | "observedGeneration": 1, 81 | "readyReplicas": 1, 82 | "replicas": 1, 83 | "updatedReplicas": 1 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /utils/utils_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | corev1 "k8s.io/api/core/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | ) 11 | 12 | func TestExtractContainersToImageIDsFromPod(t *testing.T) { 13 | tests := []struct { 14 | pod *corev1.Pod 15 | expected map[string]string 16 | name string 17 | }{ 18 | { 19 | name: "one container", 20 | pod: &corev1.Pod{ 21 | ObjectMeta: metav1.ObjectMeta{ 22 | Name: "pod1", 23 | Namespace: "namespace1", 24 | }, 25 | Status: corev1.PodStatus{ 26 | ContainerStatuses: []corev1.ContainerStatus{ 27 | { 28 | State: corev1.ContainerState{ 29 | Running: &corev1.ContainerStateRunning{}, 30 | }, 31 | ImageID: "docker-pullable://alpine@sha256:1", 32 | Name: "container1", 33 | }, 34 | }, 35 | }, 36 | }, 37 | expected: map[string]string{ 38 | "container1": "alpine@sha256:1", 39 | }, 40 | }, 41 | { 42 | name: "two containers", 43 | pod: &corev1.Pod{ 44 | ObjectMeta: metav1.ObjectMeta{ 45 | Name: "pod2", 46 | Namespace: "namespace2", 47 | }, 48 | Status: corev1.PodStatus{ 49 | ContainerStatuses: []corev1.ContainerStatus{ 50 | { 51 | State: corev1.ContainerState{ 52 | Running: &corev1.ContainerStateRunning{}, 53 | }, 54 | ImageID: "docker-pullable://alpine@sha256:1", 55 | Name: "container1", 56 | }, 57 | { 58 | State: corev1.ContainerState{ 59 | Running: &corev1.ContainerStateRunning{}, 60 | }, 61 | ImageID: "docker-pullable://alpine@sha256:2", 62 | Name: "container2", 63 | }, 64 | }, 65 | }, 66 | }, 67 | expected: map[string]string{ 68 | "container1": "alpine@sha256:1", 69 | "container2": "alpine@sha256:2", 70 | }, 71 | }, 72 | { 73 | name: "init container", 74 | pod: &corev1.Pod{ 75 | ObjectMeta: metav1.ObjectMeta{ 76 | Name: "pod2", 77 | Namespace: "namespace2", 78 | }, 79 | Status: corev1.PodStatus{ 80 | InitContainerStatuses: []corev1.ContainerStatus{ 81 | { 82 | State: corev1.ContainerState{ 83 | Running: &corev1.ContainerStateRunning{}, 84 | }, 85 | ImageID: "docker-pullable://alpine@sha256:1", 86 | Name: "container1", 87 | }, 88 | { 89 | State: corev1.ContainerState{ 90 | Running: &corev1.ContainerStateRunning{}, 91 | }, 92 | ImageID: "docker-pullable://alpine@sha256:2", 93 | Name: "container2", 94 | }, 95 | }, 96 | }, 97 | }, 98 | expected: map[string]string{ 99 | "container1": "alpine@sha256:1", 100 | "container2": "alpine@sha256:2", 101 | }, 102 | }, 103 | } 104 | for _, tt := range tests { 105 | t.Run(tt.name, func(t *testing.T) { 106 | assert.True(t, reflect.DeepEqual(ExtractContainersToImageIDsFromPod(tt.pod), tt.expected)) 107 | }) 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /continuousscanning/loader.go: -------------------------------------------------------------------------------- 1 | package continuousscanning 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "io" 7 | 8 | "k8s.io/apimachinery/pkg/runtime/schema" 9 | ) 10 | 11 | // APIResourceMatch is a definition of a matching rule for API Resources 12 | // 13 | // It defines a rule on how to generate GVRs from it. The rule definition 14 | // captures ANY of the mentioned Groups, Versions and Resources 15 | type APIResourceMatch struct { 16 | Groups []string `json:"apiGroups"` 17 | Versions []string `json:"apiVersions"` 18 | Resources []string `json:"resources"` 19 | } 20 | 21 | // MatchingRules is a definition of resource matching rules 22 | type MatchingRules struct { 23 | APIResources []APIResourceMatch `json:"match"` 24 | Namespaces []string `json:"namespaces"` 25 | } 26 | 27 | // MatchingRuleFetcher fetches Matching Rules from somewhere 28 | type MatchingRuleFetcher interface { 29 | Fetch(ctx context.Context) (*MatchingRules, error) 30 | } 31 | 32 | // targetLoader loads target matching rules 33 | type targetLoader struct { 34 | fetcher MatchingRuleFetcher 35 | } 36 | 37 | type TargetLoader interface { 38 | LoadGVRs(ctx context.Context) []schema.GroupVersionResource 39 | } 40 | 41 | // NewTargetLoader returns a new Target Loader 42 | func NewTargetLoader(f MatchingRuleFetcher) *targetLoader { 43 | return &targetLoader{fetcher: f} 44 | } 45 | 46 | func matchRuleToGVR(apiMatch APIResourceMatch) []schema.GroupVersionResource { 47 | gvrs := []schema.GroupVersionResource{} 48 | 49 | for _, group := range apiMatch.Groups { 50 | for _, version := range apiMatch.Versions { 51 | for _, resource := range apiMatch.Resources { 52 | gvr := schema.GroupVersionResource{ 53 | Group: group, 54 | Version: version, 55 | Resource: resource, 56 | } 57 | gvrs = append(gvrs, gvr) 58 | } 59 | } 60 | } 61 | return gvrs 62 | } 63 | 64 | // LoadGVRs loads GroupVersionResource definitions 65 | func (l *targetLoader) LoadGVRs(ctx context.Context) []schema.GroupVersionResource { 66 | gvrs := []schema.GroupVersionResource{} 67 | 68 | rules, _ := l.fetcher.Fetch(ctx) 69 | 70 | apiResourceMatches := rules.APIResources 71 | for idx := range apiResourceMatches { 72 | ruleGvrs := matchRuleToGVR(apiResourceMatches[idx]) 73 | gvrs = append(gvrs, ruleGvrs...) 74 | } 75 | 76 | return gvrs 77 | } 78 | 79 | type fileFetcher struct { 80 | r io.Reader 81 | } 82 | 83 | func (f *fileFetcher) Fetch(ctx context.Context) (*MatchingRules, error) { 84 | return parseMatchingRules(f.r) 85 | } 86 | 87 | // NewFileFetcher returns a new file-based rule matches fetcher 88 | func NewFileFetcher(r io.Reader) *fileFetcher { 89 | return &fileFetcher{r: r} 90 | } 91 | 92 | // parseMatchingRules takes the data from the reader and parsess it into resource matching rules 93 | func parseMatchingRules(r io.Reader) (*MatchingRules, error) { 94 | data, err := io.ReadAll(r) 95 | if err != nil { 96 | return nil, err 97 | } 98 | 99 | var matches *MatchingRules 100 | err = json.Unmarshal(data, &matches) 101 | return matches, err 102 | } 103 | -------------------------------------------------------------------------------- /watcher/cooldownqueue_test.go: -------------------------------------------------------------------------------- 1 | package watcher 2 | 3 | import ( 4 | "sort" 5 | "testing" 6 | "time" 7 | 8 | "github.com/stretchr/testify/assert" 9 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 10 | "k8s.io/apimachinery/pkg/watch" 11 | ) 12 | 13 | var ( 14 | configmap = unstructured.Unstructured{Object: map[string]interface{}{"kind": "ConfigMap", "metadata": map[string]interface{}{"uid": "748ad4a8-e5ff-44da-ba94-309992c97820"}}} 15 | deployment = unstructured.Unstructured{Object: map[string]interface{}{"kind": "Deployment", "metadata": map[string]interface{}{"uid": "6b1a0c50-277f-4aa1-a4f9-9fc278ce4fe2"}}} 16 | pod = unstructured.Unstructured{Object: map[string]interface{}{"kind": "Pod", "metadata": map[string]interface{}{"uid": "aa5e3e8f-2da5-4c38-93c0-210d3280d10f"}}} 17 | deploymentAdded = watch.Event{Type: watch.Added, Object: &deployment} 18 | podAdded = watch.Event{Type: watch.Added, Object: &pod} 19 | podModified = watch.Event{Type: watch.Modified, Object: &pod} 20 | ) 21 | 22 | func TestCooldownQueue_Enqueue(t *testing.T) { 23 | tests := []struct { 24 | name string 25 | inEvents []watch.Event 26 | outEvents []watch.Event 27 | }{ 28 | { 29 | name: "add pod", 30 | inEvents: []watch.Event{deploymentAdded, podAdded, podModified, podModified, podModified}, 31 | outEvents: []watch.Event{deploymentAdded, podModified}, 32 | }, 33 | } 34 | for _, tt := range tests { 35 | t.Run(tt.name, func(t *testing.T) { 36 | q := NewCooldownQueue() 37 | go func() { 38 | time.Sleep(10 * time.Second) 39 | q.Stop() 40 | }() 41 | for _, e := range tt.inEvents { 42 | time.Sleep(50 * time.Millisecond) // need to sleep to preserve order since the insertion is async 43 | q.Enqueue(e) 44 | } 45 | var outEvents []watch.Event 46 | for e := range q.ResultChan { 47 | outEvents = append(outEvents, e) 48 | } 49 | // sort outEvents to make the comparison easier 50 | sort.Slice(outEvents, func(i, j int) bool { 51 | uidI := outEvents[i].Object.(*unstructured.Unstructured).GetUID() 52 | uidJ := outEvents[j].Object.(*unstructured.Unstructured).GetUID() 53 | return uidI < uidJ 54 | }) 55 | assert.Equal(t, tt.outEvents, outEvents) 56 | }) 57 | } 58 | } 59 | 60 | // key is only based on the UID of the object 61 | func Test_makeEventKey(t *testing.T) { 62 | tests := []struct { 63 | name string 64 | e watch.Event 65 | want string 66 | }{ 67 | { 68 | name: "add pod", 69 | e: watch.Event{ 70 | Type: watch.Added, 71 | Object: &pod, 72 | }, 73 | want: "//Pod//", 74 | }, 75 | { 76 | name: "delete deployment", 77 | e: watch.Event{ 78 | Type: watch.Deleted, 79 | Object: &deployment, 80 | }, 81 | want: "//Deployment//", 82 | }, 83 | { 84 | name: "modify configmap", 85 | e: watch.Event{ 86 | Type: watch.Modified, 87 | Object: &configmap, 88 | }, 89 | want: "//ConfigMap//", 90 | }, 91 | } 92 | for _, tt := range tests { 93 | t.Run(tt.name, func(t *testing.T) { 94 | got, err := makeEventKey(tt.e) 95 | assert.NoError(t, err) 96 | assert.Equal(t, tt.want, got) 97 | }) 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /admission/rulesupdate/updater.go: -------------------------------------------------------------------------------- 1 | package rulesupdate 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | "github.com/google/uuid" 9 | "github.com/kubescape/backend/pkg/command/types/v1alpha1" 10 | "github.com/kubescape/go-logger" 11 | "github.com/kubescape/go-logger/helpers" 12 | "github.com/kubescape/k8s-interface/k8sinterface" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 15 | "k8s.io/apimachinery/pkg/runtime" 16 | ) 17 | 18 | const ( 19 | OperatorCommandTypeRuntimeUpdateRules = "RuntimeUpdateRules" 20 | ) 21 | 22 | type RulesUpdater struct { 23 | k8sClient *k8sinterface.KubernetesApi 24 | interval time.Duration 25 | namespace string 26 | ctx context.Context 27 | cancel context.CancelFunc 28 | } 29 | 30 | type RulesUpdaterConfig struct { 31 | Interval time.Duration `mapstructure:"interval"` 32 | Namespace string `mapstructure:"namespace"` 33 | Enabled bool `mapstructure:"enabled"` 34 | } 35 | 36 | func NewRulesUpdator(ctx context.Context, k8sClient *k8sinterface.KubernetesApi, config RulesUpdaterConfig) *RulesUpdater { 37 | ctx, cancel := context.WithCancel(ctx) 38 | updater := &RulesUpdater{ 39 | k8sClient: k8sClient, 40 | interval: config.Interval, 41 | namespace: config.Namespace, 42 | ctx: ctx, 43 | cancel: cancel, 44 | } 45 | 46 | return updater 47 | } 48 | 49 | func (ru *RulesUpdater) Start() { 50 | logger.L().Info("rules updater started") 51 | go func() { 52 | for { 53 | if err := ru.SendUpdateRulesCommand(); err != nil { 54 | logger.L().Error("error sending update rules command", helpers.Error(err)) 55 | } 56 | select { 57 | case <-ru.ctx.Done(): 58 | ru.cancel() 59 | logger.L().Info("rules updater stopped") 60 | return 61 | case <-time.After(ru.interval): 62 | // continue loop 63 | } 64 | } 65 | }() 66 | } 67 | 68 | func (ru *RulesUpdater) SendUpdateRulesCommand() error { 69 | logger.L().Debug("sending update rules command") 70 | cmd := &v1alpha1.OperatorCommand{ 71 | TypeMeta: metav1.TypeMeta{ 72 | APIVersion: "kubescape.io/v1alpha1", 73 | Kind: "OperatorCommand", 74 | }, 75 | ObjectMeta: metav1.ObjectMeta{ 76 | Name: fmt.Sprintf("update-rules-%s", uuid.New().String()[:8]), 77 | Namespace: ru.namespace, 78 | Labels: map[string]string{ 79 | "kubescape.io/app-name": "node-agent", 80 | "kubescape.io/node-name": "operator", 81 | }, 82 | }, 83 | Spec: v1alpha1.OperatorCommandSpec{ 84 | GUID: uuid.New().String(), 85 | CommandType: OperatorCommandTypeRuntimeUpdateRules, 86 | CommandVersion: "v1", 87 | }, 88 | } 89 | 90 | un, err := runtime.DefaultUnstructuredConverter.ToUnstructured(cmd) 91 | if err != nil { 92 | return fmt.Errorf("error converting OperatorCommand to unstructured: %v", err) 93 | } 94 | 95 | _, err = ru.k8sClient.GetDynamicClient().Resource(v1alpha1.SchemaGroupVersionResource).Namespace(ru.namespace).Create( 96 | ru.ctx, 97 | &unstructured.Unstructured{Object: un}, 98 | metav1.CreateOptions{}, 99 | ) 100 | if err != nil { 101 | return fmt.Errorf("error creating OperatorCommand: %v", err) 102 | } 103 | 104 | logger.L().Debug("update rules command sent") 105 | return nil 106 | } 107 | -------------------------------------------------------------------------------- /admission/webhook/testdata/key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCzUm/ePZFKSMom 3 | DqrnULaHUcqR/W7nRxbGxNhpC/5Ts7medBXAOK6jN1tCshsIHCv8FORm9+ciw4d0 4 | svekpZq7sGr/gqEhhsUz/UIcEW0XDO+EGapXr0ZYTX9ECxJHxPc/WF1Zd1iC/Bpt 5 | rfqaso4DrcF2kQ2QKADfMv63pTaCAqxCoWie1PTLD8EIysDzUNIkKys9yfr3yosS 6 | tlohe9gy8+QbJSB5s2UgLMW6CJYCoBB1lOyKxAyZACgmjQ0HI4r3pA+46aPTDhnE 7 | 3VH9SD+EZSmYEvDPueC/b8cxaSlKAmSkh6dS1gcT2sKDtTLug/5t+38eym0rPywc 8 | kl+gX639pHxR+iYoIPrgwyk8EweDcqjZ8TarbtGeXz7qtjKZ1BDso3Q1mwt1s0A3 9 | dfF2XRMH+OZsRcdF3+Jj7oEN92/z2zb5TltbWolL5C5JfshLU3qFangY/qGUrVP+ 10 | sYgmnEDI4HZfOBStT+UOyL/+FSjrBc2lo8dQqepNX0NB5M8rm9LMepi3wWh6avak 11 | mfQrB1AxMPHCXSOFWpm0hrcI85PkC/fWVb8EwA4Qh+x6mSrSg7HaaDQebOFFC2o+ 12 | GZxcKZebh0RvB7anSUUL9CyRiprjeyRB+WMytU5+k3QbFpzvBf8cWpfaZEQE3MiD 13 | iFZU9w5Cb3rIdg3KGCNXCoUqVqPr3wIDAQABAoICAA3ZwjZCPySjtql13JF9VHvj 14 | P2hDUPugLAwyTBzhBkXkzeOFR+DO//4vWmcuZpQbyPHxkyq43yt6Os4yvJoFCIyS 15 | tdECf/uc80Juv1p3phtvrlhfux6GtUJAyiGxUTol5osIAj3U0Atd+ZSbKY4lWH7E 16 | PvsvfQKzdE3JHI+GvU0Rc8i5uuD5xeahUuGg94JTrceJZg+huDbHeDJ7zz5i2HIn 17 | EGlgVr/PsXHow2T5IM0XG+5ZEGQvfGGc93zyzBzPC/7Qtz4rHbJBlo/0IE8V84IA 18 | ZlQOsnb4/nkK4zHX046Z3LgwmiX1zhokfX260zJFE7PfBxSTwhbjDD8wtx4mBcQ+ 19 | gndBnk+pDYdQehtCg7+thWEv1co3Jfb/VAqyOMhpV7lubbmOmHMhuqgDsIzHTof8 20 | 6/PTyeObbmBz2/TygspQG3vxymr9GfdsGsH65SzXkylN/3vdxg21LRiMGpy9y/ge 21 | TilEN66VgS0RAHai21OZeVZ0oszPkVvJ4R4fsfq3YbpZr/PvuKYYoyRTS3lbm2DO 22 | 14OBMA/U4nBz42la3BbcJfwcLwSvugkJKTo6Spjpdu648abP63DMhJ9e545mBw/P 23 | b/+kH0fF7dHfChPEfXssiLaQ0Z0Tote8gj8Qewmbnl73Sm7hZZVk/Vk+TeZCFbTF 24 | ++zwQ/v0zDeP+tNzmvLRAoIBAQDbiJEMw6SfaV5BCCNmjA1DAFfnD571oBdf/QOL 25 | nIzCHP9gogWAaUcJdGQ0njZyOs1oAXJnupw9d6xODxlygW7NQKP2n+zF4WtpefDF 26 | a00T3Za7EIwo71aTKPBsH8Gc9hNlq8ucxdeus7tLU/AsN/7dxXWoUS0hz2L1W72R 27 | 0y6T+LFzZW6z0yMOA/PbVWhoRHESD6AeMnEH61Z20UhH2/7fpUrX0BcTb6GhY5f5 28 | 1BqtOgwUzqGKoGk+OEMEo8YEtk9JWxoG/s2cCZed1YP1NyZderVE4eAB7v+4nYGZ 29 | 7U1GAjOC2TayIcSoKsAcGqQDouZFBqlE/qPgzs3neoKszVRtAoIBAQDRG+ubRrwu 30 | ECAxv2chqMPnLzLH9Ct6TAUpYfcIbNpJ4Sz5oli/mhq6fvemcUQsOEB1ylY6AV1n 31 | pueot+hByMAt70AjhckHglNGtNkYe6dTid9agNfsJ5kSPPj4RquTiR7CL6SUe7Pj 32 | Wqp3YIrCGl6DBAtEhsEzCHBEowqA5jijSmQVv/+JWzEQnxhlnzujW6J5SpmP+z5a 33 | DRLLBihU3Ln4y+giCS6sYU12P+OOm6OBG9rfxJPDsc1W/C1GLqIzktEfRiEb4Tlm 34 | jUqfKuml6hPMCxmGEfeBT1gMMsPUk5wnXCjItFBcPM/nm1L3WhuPL4oaCFrbpT0J 35 | GQszTnONMZn7AoIBAAI6Q4aGobAAaLVuOLW8rBHG+h1dGjbR1griTVvoTcFWe/zm 36 | B/pClzbwWGf0Rwcns5ffk+KbkSRKh5QPMSnpSE+K3V8ORl/HpsheNWQ7b3/JGtNx 37 | Gor7yrwZiT7/n35RPOcmNB22IDckmbzs8B3yv4JOk6AOms+iBBYLRUQqiWl094/9 38 | Mn9eoBdvUNUj67hLIX6WbACCcIUtDSaNfxdDkzV/vt+ru+A1nMU/dnZRgafKU/8O 39 | DbOTZx37giqU4th/ZRSQeRYjFcEEk+ZFGV6unZQLP7WgOR6r5ypfZsZCEyunsBAe 40 | 1iA+mXk/Xc2rM4jZ6U5AAA20K8OHNa2on5TTTI0CggEANCyjPLsJePTNE5yOPve8 41 | 6ZH9OabSslEFeK3Y1WFTldt5PVAklKn+wUmswPrB58ahTsKNgSuWAqbCZnZ8nO1a 42 | BhwMBND8t8LsxFebU2CG/3EHKa28MlAvAT31YP1BEi7EXe9FQUccOz4ECoUsGgI6 43 | UrZPmWmGCxWv7XBAzqiiuRt2nXaqa7s1ItCPZcu1Na1HfLwAVMx7Yjyg08dViNcK 44 | D+nQoa1o19nvWfHW9V4gDbbFhc+mjs1uTqXdJgW8suGPDB6TZEFt8QiP9ebfJWai 45 | Seb7F1ikSL9jWKfR9NhfvfJgurNC2cDSnxdCPDVfavqRmq06/lLcR8jX4pVQf4pA 46 | wQKCAQEAzDxSk2sxeYI0diP37XrkoYt55O+I0UpBcbHcwMoMqhp6iwLjw1HGyA7m 47 | 0dXMcPUMX7YsPceAaOUPcB9fVZ4v4xo/340UG5zXdSF/A10xbIEiGUaJL1KpJDvh 48 | L/0+uks8wNOxJ+mhkZQUBAn/mMX4tv+csrvtSitV16zdFgaWSd/Xbn1R115wmJJB 49 | 4GDnIyP0IO3aqA33H4jYRm/w5WFVexhaf88BmC4A7mqkxT8hrsUXZe8lqcOMWXj+ 50 | 3YL5n9RNhDTtZO6MMG+0oVQqRxieqHLtpQztgy/qzkataZBIN3al3pJxx3jNSTqf 51 | 28dMm0OdcR0nHv95X8iDumIn5HsFlw== 52 | -----END PRIVATE KEY----- 53 | -------------------------------------------------------------------------------- /mainhandler/vulnscanhandlerhelper_test.go: -------------------------------------------------------------------------------- 1 | package mainhandler 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/armosec/armoapi-go/apis" 7 | "github.com/armosec/armoapi-go/identifiers" 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func getSetCronjobCommand() *apis.Command { 12 | jobParams := apis.CronJobParams{ 13 | JobName: "", 14 | CronTabSchedule: "", 15 | } 16 | return &apis.Command{ 17 | CommandName: apis.TypeSetVulnScanCronJob, 18 | WildWlid: "wlid://cluster-minikube", 19 | Args: map[string]interface{}{ 20 | "jobParams": jobParams, 21 | }, 22 | } 23 | } 24 | 25 | // Extract vuln-scan command from create cronjob command 26 | func TestGetVulnScanRequest(t *testing.T) { 27 | commandSet := getSetCronjobCommand() 28 | commandsScan := getVulnScanRequest(commandSet) 29 | assert.NotEqual(t, commandsScan.Commands[0].CommandName, commandSet.CommandName) 30 | assert.Equal(t, commandsScan.Commands[0].CommandName, (apis.NotificationPolicyType)(apis.TypeScanImages)) 31 | assert.Equal(t, commandsScan.Commands[0].Args, map[string]interface{}(nil)) 32 | 33 | } 34 | 35 | func TestGetNamespaceFromVulnScanCommand(t *testing.T) { 36 | tests := []struct { 37 | name string 38 | command *apis.Command 39 | expectedNamespace string 40 | }{ 41 | { 42 | name: "no namespace in WildWlid - empty string", 43 | command: &apis.Command{ 44 | CommandName: apis.TypeSetVulnScanCronJob, 45 | WildWlid: "wlid://cluster-minikube", 46 | Args: map[string]interface{}{ 47 | "jobParams": apis.CronJobParams{ 48 | JobName: "", 49 | CronTabSchedule: "", 50 | }, 51 | }, 52 | }, 53 | expectedNamespace: "", 54 | }, 55 | { 56 | name: "invalid command - empty string", 57 | command: &apis.Command{ 58 | CommandName: apis.TypeSetVulnScanCronJob, 59 | Args: map[string]interface{}{ 60 | "jobParams": apis.CronJobParams{ 61 | JobName: "", 62 | CronTabSchedule: "", 63 | }, 64 | }, 65 | }, 66 | expectedNamespace: "", 67 | }, 68 | { 69 | name: "namespace from designators", 70 | command: &apis.Command{ 71 | CommandName: apis.TypeSetVulnScanCronJob, 72 | Designators: []identifiers.PortalDesignator{ 73 | { 74 | DesignatorType: identifiers.DesignatorAttributes, 75 | Attributes: map[string]string{ 76 | identifiers.AttributeCluster: "minikube", 77 | identifiers.AttributeNamespace: "test-333", 78 | }, 79 | }, 80 | }, 81 | Args: map[string]interface{}{ 82 | "jobParams": apis.CronJobParams{ 83 | JobName: "", 84 | CronTabSchedule: "", 85 | }, 86 | }, 87 | }, 88 | expectedNamespace: "test-333", 89 | }, 90 | { 91 | name: "namespace from WildWlid", 92 | command: &apis.Command{ 93 | CommandName: apis.TypeSetVulnScanCronJob, 94 | WildWlid: "wlid://cluster-minikube/namespace-test-123", 95 | Args: map[string]interface{}{ 96 | "jobParams": apis.CronJobParams{ 97 | JobName: "", 98 | CronTabSchedule: "", 99 | }, 100 | }, 101 | }, 102 | expectedNamespace: "test-123", 103 | }, 104 | } 105 | 106 | for _, tc := range tests { 107 | t.Run(tc.name, func(t *testing.T) { 108 | ns := getNamespaceFromVulnScanCommand(tc.command) 109 | assert.Equal(t, tc.expectedNamespace, ns) 110 | }) 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /admission/webhook/validator.go: -------------------------------------------------------------------------------- 1 | package webhook 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/kubescape/go-logger" 8 | "github.com/kubescape/go-logger/helpers" 9 | "github.com/kubescape/k8s-interface/k8sinterface" 10 | exporters "github.com/kubescape/operator/admission/exporter" 11 | "github.com/kubescape/operator/admission/rulebinding" 12 | "github.com/kubescape/operator/objectcache" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 15 | "k8s.io/apimachinery/pkg/runtime/schema" 16 | "k8s.io/apiserver/pkg/admission" 17 | "k8s.io/client-go/kubernetes" 18 | ) 19 | 20 | type AdmissionValidator struct { 21 | kubernetesClient *k8sinterface.KubernetesApi 22 | objectCache objectcache.ObjectCache 23 | exporter exporters.Exporter 24 | ruleBindingCache rulebinding.RuleBindingCache 25 | } 26 | 27 | func NewAdmissionValidator(kubernetesClient *k8sinterface.KubernetesApi, objectCache objectcache.ObjectCache, exporter exporters.Exporter, ruleBindingCache rulebinding.RuleBindingCache) *AdmissionValidator { 28 | return &AdmissionValidator{ 29 | kubernetesClient: kubernetesClient, 30 | objectCache: objectCache, 31 | exporter: exporter, 32 | ruleBindingCache: ruleBindingCache, 33 | } 34 | } 35 | 36 | func (av *AdmissionValidator) GetClientset() kubernetes.Interface { 37 | return av.objectCache.GetKubernetesCache().GetClientset() 38 | } 39 | 40 | // We are implementing the Validate method from the ValidationInterface interface. 41 | func (av *AdmissionValidator) Validate(ctx context.Context, attrs admission.Attributes, o admission.ObjectInterfaces) (err error) { 42 | if attrs.GetObject() != nil { 43 | var object *unstructured.Unstructured 44 | // Fetch the resource if it is a pod and the object is not a pod. 45 | if attrs.GetResource().Resource == "pods" && attrs.GetKind().Kind != "Pod" { 46 | object, err = av.fetchResource(ctx, attrs) 47 | if err != nil { 48 | return admission.NewForbidden(attrs, fmt.Errorf("failed to fetch resource: %w", err)) 49 | } 50 | } else { 51 | object = attrs.GetObject().(*unstructured.Unstructured) 52 | } 53 | 54 | rules := av.ruleBindingCache.ListRulesForObject(ctx, object) 55 | for _, rule := range rules { 56 | failure := rule.ProcessEvent(attrs, av) 57 | if failure != nil { 58 | logger.L().Info("Rule failed", helpers.Interface("failure", failure)) 59 | av.exporter.SendAdmissionAlert(failure) 60 | return admission.NewForbidden(attrs, nil) 61 | } 62 | } 63 | } 64 | 65 | return nil 66 | } 67 | 68 | // Fetch resource/objects from the Kubernetes API based on the given attributes. 69 | func (av *AdmissionValidator) fetchResource(ctx context.Context, attrs admission.Attributes) (*unstructured.Unstructured, error) { 70 | // Get the GVR 71 | gvr := schema.GroupVersionResource{ 72 | Group: attrs.GetResource().Group, 73 | Version: attrs.GetResource().Version, 74 | Resource: attrs.GetResource().Resource, 75 | } 76 | 77 | // Fetch the resource 78 | resource, err := av.kubernetesClient.DynamicClient.Resource(gvr).Namespace(attrs.GetNamespace()).Get(ctx, attrs.GetName(), metav1.GetOptions{}) 79 | if err != nil { 80 | return nil, fmt.Errorf("failed to fetch resource: %w", err) 81 | } 82 | 83 | return resource, nil 84 | } 85 | 86 | // We are implementing the Handles method from the ValidationInterface interface. 87 | // This method returns true if this admission controller can handle the given operation, we accept all operations. 88 | func (av *AdmissionValidator) Handles(operation admission.Operation) bool { 89 | return true 90 | } 91 | -------------------------------------------------------------------------------- /watcher/testdata/deployment.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "apps/v1", 3 | "kind": "Deployment", 4 | "metadata": { 5 | "annotations": { 6 | "deployment.kubernetes.io/revision": "1", 7 | "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"app\":\"nginx\"},\"name\":\"nginx-deployment\",\"namespace\":\"test\"},\"spec\":{\"replicas\":3,\"selector\":{\"matchLabels\":{\"app\":\"nginx\"}},\"template\":{\"metadata\":{\"labels\":{\"app\":\"nginx\"}},\"spec\":{\"containers\":[{\"image\":\"nginx:1.14.2\",\"name\":\"nginx\",\"ports\":[{\"containerPort\":80}]}]}}}}\n" 8 | }, 9 | "creationTimestamp": "2023-02-13T08:38:43Z", 10 | "generation": 1, 11 | "labels": { 12 | "app": "nginx" 13 | }, 14 | "name": "nginx-deployment", 15 | "namespace": "test", 16 | "resourceVersion": "59145", 17 | "uid": "90dc30ed-bcc4-484a-a995-0cef8118e2a5" 18 | }, 19 | "spec": { 20 | "progressDeadlineSeconds": 600, 21 | "replicas": 3, 22 | "revisionHistoryLimit": 10, 23 | "selector": { 24 | "matchLabels": { 25 | "app": "nginx" 26 | } 27 | }, 28 | "strategy": { 29 | "rollingUpdate": { 30 | "maxSurge": "25%", 31 | "maxUnavailable": "25%" 32 | }, 33 | "type": "RollingUpdate" 34 | }, 35 | "template": { 36 | "metadata": { 37 | "creationTimestamp": null, 38 | "labels": { 39 | "app": "nginx" 40 | } 41 | }, 42 | "spec": { 43 | "containers": [ 44 | { 45 | "image": "nginx:1.14.2", 46 | "imagePullPolicy": "IfNotPresent", 47 | "name": "nginx", 48 | "ports": [ 49 | { 50 | "containerPort": 80, 51 | "protocol": "TCP" 52 | } 53 | ], 54 | "resources": {}, 55 | "terminationMessagePath": "/dev/termination-log", 56 | "terminationMessagePolicy": "File" 57 | } 58 | ], 59 | "dnsPolicy": "ClusterFirst", 60 | "restartPolicy": "Always", 61 | "schedulerName": "default-scheduler", 62 | "securityContext": {}, 63 | "terminationGracePeriodSeconds": 30 64 | } 65 | } 66 | }, 67 | "status": { 68 | "availableReplicas": 3, 69 | "conditions": [ 70 | { 71 | "lastTransitionTime": "2023-02-13T08:39:02Z", 72 | "lastUpdateTime": "2023-02-13T08:39:02Z", 73 | "message": "Deployment has minimum availability.", 74 | "reason": "MinimumReplicasAvailable", 75 | "status": "True", 76 | "type": "Available" 77 | }, 78 | { 79 | "lastTransitionTime": "2023-02-13T08:38:43Z", 80 | "lastUpdateTime": "2023-02-13T08:39:02Z", 81 | "message": "ReplicaSet \"nginx-deployment-6595874d85\" has successfully progressed.", 82 | "reason": "NewReplicaSetAvailable", 83 | "status": "True", 84 | "type": "Progressing" 85 | } 86 | ], 87 | "observedGeneration": 1, 88 | "readyReplicas": 3, 89 | "replicas": 3, 90 | "updatedReplicas": 3 91 | } 92 | } -------------------------------------------------------------------------------- /mainhandler/testdata/vulnscan/deployment.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "apps/v1", 3 | "kind": "Deployment", 4 | "metadata": { 5 | "annotations": { 6 | "deployment.kubernetes.io/revision": "1", 7 | "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"app\":\"nginx\"},\"name\":\"nginx-deployment\",\"namespace\":\"test\"},\"spec\":{\"replicas\":3,\"selector\":{\"matchLabels\":{\"app\":\"nginx\"}},\"template\":{\"metadata\":{\"labels\":{\"app\":\"nginx\"}},\"spec\":{\"containers\":[{\"image\":\"nginx:1.14.2\",\"name\":\"nginx\",\"ports\":[{\"containerPort\":80}]}]}}}}\n" 8 | }, 9 | "creationTimestamp": "2023-02-13T08:38:43Z", 10 | "generation": 1, 11 | "labels": { 12 | "app": "nginx" 13 | }, 14 | "name": "nginx-deployment", 15 | "namespace": "test", 16 | "resourceVersion": "59145", 17 | "uid": "90dc30ed-bcc4-484a-a995-0cef8118e2a5" 18 | }, 19 | "spec": { 20 | "progressDeadlineSeconds": 600, 21 | "replicas": 3, 22 | "revisionHistoryLimit": 10, 23 | "selector": { 24 | "matchLabels": { 25 | "app": "nginx" 26 | } 27 | }, 28 | "strategy": { 29 | "rollingUpdate": { 30 | "maxSurge": "25%", 31 | "maxUnavailable": "25%" 32 | }, 33 | "type": "RollingUpdate" 34 | }, 35 | "template": { 36 | "metadata": { 37 | "creationTimestamp": null, 38 | "labels": { 39 | "app": "nginx" 40 | } 41 | }, 42 | "spec": { 43 | "containers": [ 44 | { 45 | "image": "nginx:1.14.2", 46 | "imagePullPolicy": "IfNotPresent", 47 | "name": "nginx", 48 | "ports": [ 49 | { 50 | "containerPort": 80, 51 | "protocol": "TCP" 52 | } 53 | ], 54 | "resources": {}, 55 | "terminationMessagePath": "/dev/termination-log", 56 | "terminationMessagePolicy": "File" 57 | } 58 | ], 59 | "dnsPolicy": "ClusterFirst", 60 | "restartPolicy": "Always", 61 | "schedulerName": "default-scheduler", 62 | "securityContext": {}, 63 | "terminationGracePeriodSeconds": 30 64 | } 65 | } 66 | }, 67 | "status": { 68 | "availableReplicas": 3, 69 | "conditions": [ 70 | { 71 | "lastTransitionTime": "2023-02-13T08:39:02Z", 72 | "lastUpdateTime": "2023-02-13T08:39:02Z", 73 | "message": "Deployment has minimum availability.", 74 | "reason": "MinimumReplicasAvailable", 75 | "status": "True", 76 | "type": "Available" 77 | }, 78 | { 79 | "lastTransitionTime": "2023-02-13T08:38:43Z", 80 | "lastUpdateTime": "2023-02-13T08:39:02Z", 81 | "message": "ReplicaSet \"nginx-deployment-6595874d85\" has successfully progressed.", 82 | "reason": "NewReplicaSetAvailable", 83 | "status": "True", 84 | "type": "Progressing" 85 | } 86 | ], 87 | "observedGeneration": 1, 88 | "readyReplicas": 3, 89 | "replicas": 3, 90 | "updatedReplicas": 3 91 | } 92 | } -------------------------------------------------------------------------------- /mainhandler/imageregistryhandler.go: -------------------------------------------------------------------------------- 1 | package mainhandler 2 | 3 | import ( 4 | "context" 5 | "encoding/base64" 6 | "encoding/json" 7 | "fmt" 8 | "strings" 9 | 10 | regCommon "github.com/armosec/registryx/common" 11 | "github.com/kubescape/k8s-interface/k8sinterface" 12 | "github.com/kubescape/k8s-interface/workloadinterface" 13 | corev1 "k8s.io/api/core/v1" 14 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 15 | "k8s.io/apimachinery/pkg/runtime" 16 | "k8s.io/client-go/tools/pager" 17 | ) 18 | 19 | type AuthMethods string 20 | 21 | const ( 22 | registryScanConfigmap = "kubescape-registry-scan" 23 | registryNameField = "registryName" 24 | secretNameField = "secretName" 25 | imagesToScanLimit = 500 26 | registriesAuthFieldInSecret = "registriesAuth" 27 | accessTokenAuth AuthMethods = "accesstoken" 28 | registryCronjobTemplate = "registry-scan-cronjob-template" 29 | tagsPageSize = 1000 30 | registryScanDocumentation = "https://hub.armosec.io/docs/registry-vulnerability-scan" 31 | ) 32 | 33 | type registryAuth struct { 34 | SkipTLSVerify *bool `json:"skipTLSVerify,omitempty"` 35 | Insecure *bool `json:"http,omitempty"` 36 | Registry string `json:"registry,omitempty"` 37 | AuthMethod string `json:"auth_method,omitempty"` 38 | Username string `json:"username,omitempty"` 39 | Password string `json:"password,omitempty"` 40 | RegistryToken string `json:"registryToken,omitempty"` 41 | Kind regCommon.RegistryKind `json:"kind,omitempty"` 42 | } 43 | 44 | func parseRegistryAuthSecret(secret k8sinterface.IWorkload) ([]registryAuth, error) { 45 | secretData := secret.GetData() 46 | var registriesAuth []registryAuth 47 | registriesAuthStr, ok := secretData[registriesAuthFieldInSecret].(string) 48 | if !ok { 49 | return nil, fmt.Errorf("error parsing Secret: %s field must be a string", registriesAuthFieldInSecret) 50 | } 51 | data, err := base64.StdEncoding.DecodeString(registriesAuthStr) 52 | if err != nil { 53 | return nil, fmt.Errorf("error parsing Secret: %s", err.Error()) 54 | } 55 | registriesAuthStr = strings.Replace(string(data), "\n", "", -1) 56 | 57 | if e := json.Unmarshal([]byte(registriesAuthStr), ®istriesAuth); e != nil { 58 | return nil, fmt.Errorf("error parsing Secret: %s", e.Error()) 59 | } 60 | 61 | return registriesAuth, nil 62 | } 63 | 64 | func getRegistryScanSecrets(k8sAPI *k8sinterface.KubernetesApi, namespace, secretName string) ([]k8sinterface.IWorkload, error) { 65 | if secretName != "" { 66 | secret, err := k8sAPI.GetWorkload(namespace, "Secret", secretName) 67 | if err == nil && secret != nil { 68 | return []k8sinterface.IWorkload{secret}, err 69 | } 70 | } 71 | 72 | // when secret name is not provided, we will try to find all secrets starting with kubescape-registry-scan 73 | var registryScanSecrets []k8sinterface.IWorkload 74 | err := pager.New(func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) { 75 | return k8sAPI.KubernetesClient.CoreV1().Secrets(namespace).List(ctx, opts) 76 | }).EachListItem(k8sAPI.Context, metav1.ListOptions{}, func(obj runtime.Object) error { 77 | secret := obj.(*corev1.Secret) 78 | if strings.HasPrefix(secret.GetName(), registryScanConfigmap) { 79 | unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(secret) 80 | if err == nil { 81 | wl := workloadinterface.NewWorkloadObj(unstructuredObj) 82 | registryScanSecrets = append(registryScanSecrets, wl) 83 | } 84 | } 85 | return nil 86 | }) 87 | return registryScanSecrets, err 88 | } 89 | -------------------------------------------------------------------------------- /continuousscanning/service.go: -------------------------------------------------------------------------------- 1 | package continuousscanning 2 | 3 | import ( 4 | "context" 5 | 6 | armoapi "github.com/armosec/armoapi-go/apis" 7 | "github.com/kubescape/go-logger" 8 | "github.com/kubescape/go-logger/helpers" 9 | "github.com/kubescape/operator/config" 10 | "github.com/kubescape/operator/watcher" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "k8s.io/apimachinery/pkg/watch" 13 | "k8s.io/client-go/dynamic" 14 | ) 15 | 16 | type ContinuousScanningService struct { 17 | cfg config.IConfig 18 | tl TargetLoader 19 | shutdownRequested chan struct{} 20 | workDone chan struct{} 21 | k8sdynamic dynamic.Interface 22 | eventHandlers []EventHandler 23 | eventQueue *watcher.CooldownQueue 24 | } 25 | 26 | func (s *ContinuousScanningService) listen(ctx context.Context) <-chan armoapi.Command { 27 | producedCommands := make(chan armoapi.Command) 28 | 29 | listOpts := metav1.ListOptions{} 30 | resourceEventsCh := make(chan watch.Event, 100) 31 | 32 | gvrs := s.tl.LoadGVRs(ctx) 33 | logger.L().Info("fetched gvrs", helpers.Interface("gvrs", gvrs)) 34 | wp, _ := NewWatchPool(ctx, s.k8sdynamic, gvrs, listOpts) 35 | wp.Run(ctx, resourceEventsCh) 36 | logger.L().Info("ran watch pool") 37 | 38 | go func(shutdownCh <-chan struct{}, resourceEventsCh <-chan watch.Event, out *watcher.CooldownQueue) { 39 | defer out.Stop() 40 | 41 | for { 42 | select { 43 | case e := <-resourceEventsCh: 44 | logger.L().Debug( 45 | "got event from channel", 46 | helpers.Interface("event", e), 47 | ) 48 | if s.cfg.SkipNamespace(e.Object.(metav1.Object).GetNamespace()) { 49 | continue 50 | } 51 | out.Enqueue(e) 52 | case <-shutdownCh: 53 | return 54 | } 55 | } 56 | 57 | }(s.shutdownRequested, resourceEventsCh, s.eventQueue) 58 | 59 | return producedCommands 60 | } 61 | 62 | func (s *ContinuousScanningService) work(ctx context.Context) { 63 | for e := range s.eventQueue.ResultChan { 64 | logger.L().Debug( 65 | "got an event to process", 66 | helpers.Interface("event", e), 67 | ) 68 | for idx := range s.eventHandlers { 69 | handler := s.eventHandlers[idx] 70 | err := handler.Handle(ctx, e) 71 | if err != nil { 72 | logger.L().Ctx(ctx).Error( 73 | "failed to handle event", 74 | helpers.Interface("event", e), 75 | helpers.Error(err), 76 | ) 77 | } 78 | } 79 | } 80 | 81 | close(s.workDone) 82 | } 83 | 84 | // Launch launches the service. 85 | // 86 | // It sets up the provided watches, listens for events they deliver in the 87 | // background and dispatches them to registered event handlers. 88 | // Launch blocks until all the underlying watches are ready to accept events. 89 | func (s *ContinuousScanningService) Launch(ctx context.Context) <-chan armoapi.Command { 90 | out := make(chan armoapi.Command) 91 | 92 | s.listen(ctx) 93 | go s.work(ctx) 94 | 95 | return out 96 | } 97 | 98 | func (s *ContinuousScanningService) AddEventHandler(fn EventHandler) { 99 | s.eventHandlers = append(s.eventHandlers, fn) 100 | } 101 | 102 | func (s *ContinuousScanningService) Stop() { 103 | close(s.shutdownRequested) 104 | <-s.workDone 105 | } 106 | 107 | func NewContinuousScanningService(cfg config.IConfig, client dynamic.Interface, tl TargetLoader, h ...EventHandler) *ContinuousScanningService { 108 | doneCh := make(chan struct{}) 109 | eventQueue := watcher.NewCooldownQueue() 110 | workDone := make(chan struct{}) 111 | 112 | return &ContinuousScanningService{ 113 | cfg: cfg, 114 | tl: tl, 115 | k8sdynamic: client, 116 | shutdownRequested: doneCh, 117 | eventHandlers: h, 118 | eventQueue: eventQueue, 119 | workDone: workDone, 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /continuousscanning/watchbuilder.go: -------------------------------------------------------------------------------- 1 | package continuousscanning 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | 7 | "github.com/kubescape/go-logger" 8 | "github.com/kubescape/go-logger/helpers" 9 | "github.com/kubescape/k8s-interface/k8sinterface" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | "k8s.io/apimachinery/pkg/runtime/schema" 12 | "k8s.io/apimachinery/pkg/watch" 13 | "k8s.io/client-go/dynamic" 14 | ) 15 | 16 | func NewDynamicWatch(ctx context.Context, client dynamic.Interface, gvr schema.GroupVersionResource, opts metav1.ListOptions) (watch.Interface, error) { 17 | var w watch.Interface 18 | var err error 19 | if k8sinterface.IsNamespaceScope(&gvr) { 20 | w, err = client.Resource(gvr).Namespace("").Watch(ctx, opts) 21 | } else { 22 | w, err = client.Resource(gvr).Watch(ctx, opts) 23 | } 24 | return w, err 25 | } 26 | 27 | type SelfHealingWatch struct { 28 | opts metav1.ListOptions 29 | client dynamic.Interface 30 | currWatch watch.Interface 31 | makeWatchFunc func(ctx context.Context, client dynamic.Interface, gvr schema.GroupVersionResource, opts metav1.ListOptions) (watch.Interface, error) 32 | gvr schema.GroupVersionResource 33 | } 34 | 35 | func NewSelfHealingWatch(client dynamic.Interface, gvr schema.GroupVersionResource, opts metav1.ListOptions) *SelfHealingWatch { 36 | return &SelfHealingWatch{ 37 | client: client, 38 | gvr: gvr, 39 | opts: opts, 40 | makeWatchFunc: NewDynamicWatch, 41 | } 42 | } 43 | 44 | func (w *SelfHealingWatch) RunUntilWatchCloses(ctx context.Context, out chan<- watch.Event) error { 45 | for { 46 | watchEvents := w.currWatch.ResultChan() 47 | select { 48 | case event, ok := <-watchEvents: 49 | if ok { 50 | out <- event 51 | } else { 52 | return nil 53 | } 54 | case <-ctx.Done(): 55 | return ctx.Err() 56 | } 57 | } 58 | } 59 | 60 | func (w *SelfHealingWatch) Run(ctx context.Context, readyWg *sync.WaitGroup, out chan<- watch.Event) error { 61 | watchInitializedAtLeastOnce := false 62 | 63 | for { 64 | select { 65 | case <-ctx.Done(): 66 | return ctx.Err() 67 | default: 68 | gvr := helpers.String("gvr", w.gvr.String()) 69 | logger.L().Debug("creating watch for GVR", gvr) 70 | watchFunc, err := w.makeWatchFunc(ctx, w.client, w.gvr, w.opts) 71 | if err != nil { 72 | logger.L().Ctx(ctx).Warning( 73 | "got error when creating a watch for gvr", 74 | gvr, 75 | helpers.Error(err), 76 | ) 77 | continue 78 | } 79 | logger.L().Debug("watch created\n") 80 | w.currWatch = watchFunc 81 | 82 | // Watch is considered ready once it is successfully acquired 83 | // Signal we are done only the first time because 84 | // WaitGroups panic when trying to decrease below zero 85 | if !watchInitializedAtLeastOnce { 86 | readyWg.Done() 87 | watchInitializedAtLeastOnce = true 88 | } 89 | w.RunUntilWatchCloses(ctx, out) 90 | } 91 | 92 | } 93 | } 94 | 95 | type WatchPool struct { 96 | pool []*SelfHealingWatch 97 | } 98 | 99 | func (wp *WatchPool) Run(ctx context.Context, out chan<- watch.Event) { 100 | logger.L().Info("Watch pool: starting") 101 | 102 | wg := &sync.WaitGroup{} 103 | for idx := range wp.pool { 104 | wg.Add(1) 105 | go wp.pool[idx].Run(ctx, wg, out) 106 | } 107 | wg.Wait() 108 | 109 | logger.L().Info("Watch pool: started ok") 110 | } 111 | 112 | func NewWatchPool(_ context.Context, client dynamic.Interface, gvrs []schema.GroupVersionResource, opts metav1.ListOptions) (*WatchPool, error) { 113 | watches := make([]*SelfHealingWatch, len(gvrs)) 114 | 115 | for idx := range gvrs { 116 | gvr := gvrs[idx] 117 | selfHealingWatch := NewSelfHealingWatch(client, gvr, opts) 118 | 119 | watches[idx] = selfHealingWatch 120 | } 121 | 122 | pool := &WatchPool{pool: watches} 123 | 124 | return pool, nil 125 | } 126 | -------------------------------------------------------------------------------- /utils/containerprofile_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | helpersv1 "github.com/kubescape/k8s-interface/instanceidhandler/v1/helpers" 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestSkipContainerProfile(t *testing.T) { 12 | tests := []struct { 13 | annotations map[string]string 14 | name string 15 | wantSkip bool 16 | expectedErr error 17 | }{ 18 | { 19 | name: "status is empty", 20 | annotations: map[string]string{ 21 | helpersv1.CompletionMetadataKey: "complete", 22 | helpersv1.StatusMetadataKey: "", 23 | helpersv1.WlidMetadataKey: "wlid", 24 | helpersv1.InstanceIDMetadataKey: "instanceID", 25 | }, 26 | wantSkip: false, 27 | }, 28 | { 29 | name: "status is Ready", 30 | annotations: map[string]string{ 31 | helpersv1.CompletionMetadataKey: "complete", 32 | helpersv1.StatusMetadataKey: helpersv1.Learning, 33 | helpersv1.WlidMetadataKey: "wlid", 34 | helpersv1.InstanceIDMetadataKey: "instanceID", 35 | }, 36 | wantSkip: false, 37 | }, 38 | { 39 | name: "partial AP", 40 | annotations: map[string]string{ 41 | helpersv1.CompletionMetadataKey: "partial", 42 | helpersv1.StatusMetadataKey: helpersv1.Learning, 43 | helpersv1.WlidMetadataKey: "wlid", 44 | helpersv1.InstanceIDMetadataKey: "instanceID", 45 | }, 46 | wantSkip: false, 47 | }, 48 | { 49 | name: "invalid completion status", 50 | annotations: map[string]string{ 51 | helpersv1.CompletionMetadataKey: "invalid", 52 | helpersv1.StatusMetadataKey: helpersv1.Learning, 53 | helpersv1.WlidMetadataKey: "wlid", 54 | helpersv1.InstanceIDMetadataKey: "instanceID", 55 | }, 56 | wantSkip: false, 57 | }, 58 | { 59 | name: "missing completion status", 60 | annotations: map[string]string{ 61 | helpersv1.StatusMetadataKey: helpersv1.Learning, 62 | helpersv1.WlidMetadataKey: "wlid", 63 | helpersv1.InstanceIDMetadataKey: "instanceID", 64 | }, 65 | wantSkip: false, 66 | }, 67 | { 68 | name: "status is Completed", 69 | annotations: map[string]string{ 70 | helpersv1.CompletionMetadataKey: "complete", 71 | helpersv1.StatusMetadataKey: helpersv1.Completed, 72 | helpersv1.WlidMetadataKey: "wlid", 73 | helpersv1.InstanceIDMetadataKey: "instanceID", 74 | }, 75 | wantSkip: false, 76 | }, 77 | { 78 | name: "status is not recognized", 79 | annotations: map[string]string{ 80 | helpersv1.CompletionMetadataKey: "complete", 81 | helpersv1.StatusMetadataKey: "NotRecognized", 82 | }, 83 | wantSkip: true, 84 | expectedErr: fmt.Errorf("invalid status"), 85 | }, 86 | { 87 | name: "no annotations", 88 | annotations: map[string]string{}, 89 | wantSkip: true, 90 | expectedErr: fmt.Errorf("no annotations"), 91 | }, 92 | { 93 | name: "missing instance WLID annotation", 94 | annotations: map[string]string{ 95 | helpersv1.CompletionMetadataKey: "complete", 96 | helpersv1.StatusMetadataKey: helpersv1.Learning, 97 | helpersv1.InstanceIDMetadataKey: "instanceID", 98 | }, 99 | wantSkip: true, 100 | expectedErr: fmt.Errorf("missing WLID annotation"), 101 | }, 102 | { 103 | name: "missing instance ID annotation", 104 | annotations: map[string]string{ 105 | helpersv1.CompletionMetadataKey: "complete", 106 | helpersv1.StatusMetadataKey: helpersv1.Learning, 107 | helpersv1.WlidMetadataKey: "wlid", 108 | }, 109 | wantSkip: true, 110 | expectedErr: fmt.Errorf("missing InstanceID annotation"), 111 | }, 112 | } 113 | 114 | for _, tt := range tests { 115 | t.Run(tt.name, func(t *testing.T) { 116 | gotSkip, err := SkipContainerProfile(tt.annotations) 117 | assert.Equal(t, tt.wantSkip, gotSkip) 118 | assert.Equal(t, tt.expectedErr, err) 119 | }) 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /admission/rules/v1/r2001_portforward.go: -------------------------------------------------------------------------------- 1 | package rules 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | apitypes "github.com/armosec/armoapi-go/armotypes" 8 | "github.com/kubescape/go-logger" 9 | "github.com/kubescape/go-logger/helpers" 10 | "github.com/kubescape/operator/admission/rules" 11 | "github.com/kubescape/operator/objectcache" 12 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 13 | "k8s.io/apiserver/pkg/admission" 14 | "k8s.io/apiserver/pkg/authentication/user" 15 | ) 16 | 17 | const ( 18 | R2001ID = "R2001" 19 | R2001Name = "Port forward" 20 | ) 21 | 22 | var R2001PortForwardRuleDescriptor = RuleDescriptor{ 23 | ID: R2001ID, 24 | Name: R2001Name, 25 | Description: "Detecting port forward", 26 | Tags: []string{"portforward"}, 27 | Priority: RulePriorityLow, 28 | RuleCreationFunc: func() rules.RuleEvaluator { 29 | return CreateRuleR2001PortForward() 30 | }, 31 | } 32 | 33 | type R2001PortForward struct { 34 | BaseRule 35 | } 36 | 37 | func CreateRuleR2001PortForward() *R2001PortForward { 38 | return &R2001PortForward{} 39 | } 40 | func (rule *R2001PortForward) Name() string { 41 | return R2001Name 42 | } 43 | 44 | func (rule *R2001PortForward) ID() string { 45 | return R2001ID 46 | } 47 | 48 | func (rule *R2001PortForward) DeleteRule() { 49 | } 50 | 51 | func (rule *R2001PortForward) ProcessEvent(event admission.Attributes, access objectcache.KubernetesCache) rules.RuleFailure { 52 | if event == nil { 53 | return nil 54 | } 55 | 56 | if event.GetKind().Kind != "PodPortForwardOptions" { 57 | return nil 58 | } 59 | 60 | var oldObject *unstructured.Unstructured 61 | if event.GetOldObject() != nil { 62 | oldObject = event.GetOldObject().(*unstructured.Unstructured) 63 | } 64 | 65 | var options *unstructured.Unstructured 66 | if event.GetOperationOptions() != nil { 67 | options = event.GetOperationOptions().(*unstructured.Unstructured) 68 | } 69 | 70 | client := access.GetClientset() 71 | 72 | workloadKind, workloadName, workloadNamespace, nodeName, err := GetControllerDetails(event, client) 73 | if err != nil { 74 | logger.L().Error("Failed to get parent workload details", helpers.Error(err)) 75 | return nil 76 | } 77 | ruleFailure := GenericRuleFailure{ 78 | BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ 79 | AlertName: rule.Name(), 80 | FixSuggestions: "If this is a legitimate action, please consider removing this workload from the binding of this rule", 81 | Severity: R2001PortForwardRuleDescriptor.Priority, 82 | Timestamp: time.Unix(0, time.Now().UnixNano()), 83 | UniqueID: fmt.Sprintf("%s%s%s", event.GetNamespace(), event.GetName(), workloadName), 84 | }, 85 | AdmissionAlert: apitypes.AdmissionAlert{ 86 | Kind: event.GetKind(), 87 | ObjectName: event.GetName(), 88 | RequestNamespace: event.GetNamespace(), 89 | Resource: event.GetResource(), 90 | Operation: event.GetOperation(), 91 | Object: event.GetObject().(*unstructured.Unstructured), 92 | Subresource: event.GetSubresource(), 93 | UserInfo: &user.DefaultInfo{ 94 | Name: event.GetUserInfo().GetName(), 95 | UID: event.GetUserInfo().GetUID(), 96 | Groups: event.GetUserInfo().GetGroups(), 97 | Extra: event.GetUserInfo().GetExtra(), 98 | }, 99 | 100 | DryRun: event.IsDryRun(), 101 | Options: options, 102 | OldObject: oldObject, 103 | }, 104 | RuleAlert: apitypes.RuleAlert{ 105 | RuleDescription: fmt.Sprintf("Port forward detected on pod %s", event.GetName()), 106 | }, 107 | RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ 108 | PodName: event.GetName(), 109 | Namespace: event.GetNamespace(), 110 | PodNamespace: event.GetNamespace(), 111 | WorkloadName: workloadName, 112 | WorkloadNamespace: workloadNamespace, 113 | WorkloadKind: workloadKind, 114 | NodeName: nodeName, 115 | }, 116 | RuleID: R2001ID, 117 | } 118 | 119 | return &ruleFailure 120 | } 121 | -------------------------------------------------------------------------------- /admission/webhook/server_test.go: -------------------------------------------------------------------------------- 1 | package webhook 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "crypto/tls" 7 | "encoding/json" 8 | "io" 9 | "net/http" 10 | "net/http/httptest" 11 | "testing" 12 | "time" 13 | 14 | "github.com/kubescape/node-agent/pkg/watcher" 15 | "github.com/stretchr/testify/assert" 16 | admissionv1 "k8s.io/api/admission/v1" 17 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 18 | "k8s.io/apimachinery/pkg/runtime" 19 | "k8s.io/apimachinery/pkg/runtime/serializer" 20 | "k8s.io/apiserver/pkg/admission" 21 | ) 22 | 23 | // MockValidator is a mock implementation of the admission.ValidationInterface for testing purposes 24 | type MockValidator struct{} 25 | 26 | func (v *MockValidator) Handles(o admission.Operation) bool { 27 | return true 28 | } 29 | 30 | func (v *MockValidator) Validate(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { 31 | return nil 32 | } 33 | 34 | func TestHandleHealth(t *testing.T) { 35 | wh := &webhook{} 36 | req := httptest.NewRequest("GET", "http://localhost/health", nil) 37 | w := httptest.NewRecorder() 38 | 39 | wh.handleHealth(w, req) 40 | 41 | resp := w.Result() 42 | body, _ := io.ReadAll(resp.Body) 43 | 44 | assert.Equal(t, "OK", string(body)) 45 | assert.Equal(t, http.StatusOK, resp.StatusCode) 46 | } 47 | 48 | func TestHandleWebhookValidate(t *testing.T) { 49 | scheme := runtime.NewScheme() 50 | codecs := serializer.NewCodecFactory(scheme) 51 | decoder := codecs.UniversalDeserializer() 52 | 53 | wh := &webhook{ 54 | validator: &MockValidator{}, 55 | objectInferfaces: admission.NewObjectInterfacesFromScheme(scheme), 56 | decoder: decoder, 57 | } 58 | 59 | review := admissionv1.AdmissionReview{ 60 | Request: &admissionv1.AdmissionRequest{ 61 | UID: "12345", 62 | Kind: metav1.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"}, 63 | Object: runtime.RawExtension{ 64 | Raw: []byte(`{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"name":"test"}}`), 65 | }, 66 | }, 67 | } 68 | reviewBytes, _ := json.Marshal(review) 69 | req := httptest.NewRequest("POST", "http://localhost/validate", bytes.NewReader(reviewBytes)) 70 | req.Header.Set("Content-Type", "application/json") 71 | w := httptest.NewRecorder() 72 | 73 | wh.handleWebhookValidate(w, req) 74 | 75 | resp := w.Result() 76 | body, _ := io.ReadAll(resp.Body) 77 | 78 | assert.Equal(t, http.StatusOK, resp.StatusCode) 79 | 80 | var admissionReview admissionv1.AdmissionReview 81 | err := json.Unmarshal(body, &admissionReview) 82 | assert.NoError(t, err) 83 | assert.Equal(t, review.Request.UID, admissionReview.Response.UID) 84 | assert.True(t, admissionReview.Response.Allowed) 85 | } 86 | 87 | func TestRun(t *testing.T) { 88 | admissionController := New(":8443", "testdata/cert.pem", "testdata/key.pem", runtime.NewScheme(), &MockValidator{}, watcher.NewWatcherMock()) 89 | 90 | ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) 91 | defer cancel() 92 | 93 | errChan := make(chan error) 94 | go func() { 95 | errChan <- admissionController.Run(ctx) 96 | }() 97 | 98 | // Allow more time for the server to start 99 | time.Sleep(5 * time.Second) 100 | 101 | // Make a health check request 102 | client := &http.Client{ 103 | Transport: &http.Transport{ 104 | TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, 105 | }, 106 | } 107 | resp, err := client.Get("https://localhost:8443/health") 108 | if err != nil { 109 | t.Fatalf("failed to get health check: %v", err) 110 | } 111 | defer resp.Body.Close() 112 | 113 | body, _ := io.ReadAll(resp.Body) 114 | assert.Equal(t, "OK", string(body)) 115 | assert.Equal(t, http.StatusOK, resp.StatusCode) 116 | 117 | // Cancel the context to stop the server 118 | cancel() 119 | 120 | // Wait for the server to shut down 121 | select { 122 | case err := <-errChan: 123 | if err != nil && err != http.ErrServerClosed && err != context.Canceled { 124 | t.Fatalf("Run method returned error: %v", err) 125 | } 126 | case <-time.After(15 * time.Second): 127 | t.Fatal("server did not shut down in time") 128 | default: 129 | return 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /mainhandler/cronjobhandler.go: -------------------------------------------------------------------------------- 1 | package mainhandler 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "strings" 8 | 9 | armoapi "github.com/armosec/armoapi-go/apis" 10 | "github.com/armosec/armoapi-go/armotypes" 11 | "github.com/kubescape/k8s-interface/k8sinterface" 12 | v1 "k8s.io/api/batch/v1" 13 | corev1 "k8s.io/api/core/v1" 14 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 15 | "sigs.k8s.io/yaml" 16 | ) 17 | 18 | const ( 19 | requestBodyFile = "request-body.json" 20 | requestVolumeName = "request-body-volume" 21 | cronjobTemplateName = "cronjobTemplate" 22 | ) 23 | 24 | func fixK8sCronJobNameLimit(jobName string) string { 25 | return fixK8sNameLimit(jobName, 52) 26 | } 27 | 28 | // convert to K8s valid name, lower-case, don't end with '-', maximum X characters 29 | // https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names 30 | func fixK8sNameLimit(jobName string, nameLimit int) string { 31 | if len(jobName) > nameLimit { 32 | jobName = jobName[:nameLimit] 33 | } 34 | lastIdx := len(jobName) - 1 35 | for lastIdx >= 0 && jobName[lastIdx] == '-' { 36 | jobName = jobName[:lastIdx] 37 | lastIdx = len(jobName) - 1 38 | } 39 | if lastIdx == -1 { 40 | jobName = "invalid name was given" 41 | } 42 | jobName = k8sNamesRegex.ReplaceAllString(jobName, "-") 43 | return strings.ToLower(jobName) 44 | } 45 | 46 | func getCronJobTemplate(k8sAPI *k8sinterface.KubernetesApi, name, namespace string) (*v1.CronJob, error) { 47 | template, err := k8sAPI.KubernetesClient.CoreV1().ConfigMaps(namespace).Get(context.Background(), name, metav1.GetOptions{}) 48 | if err != nil { 49 | return nil, err 50 | } 51 | 52 | // create cronJob 53 | jobTemplateStr, ok := template.Data[cronjobTemplateName] 54 | if !ok { 55 | return nil, fmt.Errorf("getCronJobTemplate: jobTemplate not found") 56 | } 57 | 58 | jobTemplateObj := &v1.CronJob{} 59 | if err := yaml.Unmarshal([]byte(jobTemplateStr), jobTemplateObj); err != nil { 60 | return nil, err 61 | } 62 | return jobTemplateObj, nil 63 | } 64 | 65 | func getJobParams(command *armoapi.Command) *armoapi.CronJobParams { 66 | 67 | if jobParams := command.GetCronJobParams(); jobParams != nil { 68 | return jobParams 69 | } 70 | 71 | return nil 72 | } 73 | 74 | func createConfigMapForTriggerRequest(k8sAPI *k8sinterface.KubernetesApi, namespace string, name string, req *armoapi.Commands) error { 75 | // create config map 76 | configMap := corev1.ConfigMap{} 77 | configMap.Name = name 78 | if configMap.Labels == nil { 79 | configMap.Labels = make(map[string]string) 80 | } 81 | configMap.Labels["app"] = name 82 | 83 | if configMap.Data == nil { 84 | configMap.Data = make(map[string]string) 85 | } 86 | reqByte, err := json.Marshal(req) 87 | if err != nil { 88 | return err 89 | } 90 | 91 | configMap.Data[requestBodyFile] = string(reqByte) 92 | if _, err := k8sAPI.KubernetesClient.CoreV1().ConfigMaps(namespace).Create(context.Background(), &configMap, metav1.CreateOptions{}); err != nil { 93 | return err 94 | } 95 | return nil 96 | } 97 | 98 | func setCronJobForTriggerRequest(jobTemplateObj *v1.CronJob, name, schedule, jobID string) { 99 | 100 | jobTemplateObj.Name = name 101 | if schedule != "" { 102 | jobTemplateObj.Spec.Schedule = schedule 103 | } 104 | 105 | // update volume name 106 | for i, v := range jobTemplateObj.Spec.JobTemplate.Spec.Template.Spec.Volumes { 107 | if v.Name == requestVolumeName { 108 | jobTemplateObj.Spec.JobTemplate.Spec.Template.Spec.Volumes[i].ConfigMap.Name = name 109 | } 110 | } 111 | 112 | // add annotations 113 | if jobTemplateObj.Spec.JobTemplate.Spec.Template.Annotations == nil { 114 | jobTemplateObj.Spec.JobTemplate.Spec.Template.Annotations = make(map[string]string) 115 | } 116 | jobTemplateObj.Spec.JobTemplate.Spec.Template.Annotations[armotypes.CronJobTemplateAnnotationArmoCloudJobIDKeyDeprecated] = jobID // deprecated 117 | jobTemplateObj.Spec.JobTemplate.Spec.Template.Annotations[armotypes.CronJobTemplateAnnotationJobIDKey] = jobID 118 | 119 | // add annotations 120 | if jobTemplateObj.ObjectMeta.Labels == nil { 121 | jobTemplateObj.ObjectMeta.Labels = make(map[string]string) 122 | } 123 | jobTemplateObj.ObjectMeta.Labels["app"] = name 124 | 125 | } 126 | -------------------------------------------------------------------------------- /admission/rules/v1/helpers.go: -------------------------------------------------------------------------------- 1 | package rules 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | corev1 "k8s.io/api/core/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 10 | "k8s.io/apimachinery/pkg/runtime" 11 | "k8s.io/apiserver/pkg/admission" 12 | "k8s.io/client-go/kubernetes" 13 | ) 14 | 15 | // GetControllerDetails returns the kind, name, namespace, and node name of the controller that owns the pod. 16 | func GetControllerDetails(event admission.Attributes, clientset kubernetes.Interface) (string, string, string, string, error) { 17 | podName, namespace := event.GetName(), event.GetNamespace() 18 | 19 | if podName == "" || namespace == "" { 20 | return "", "", "", "", fmt.Errorf("invalid pod details from admission event") 21 | } 22 | 23 | pod, err := GetPodDetails(clientset, podName, namespace) 24 | if err != nil { 25 | return "", "", "", "", fmt.Errorf("failed to get pod details: %w", err) 26 | } 27 | 28 | workloadKind, workloadName, workloadNamespace := ExtractPodOwner(pod, clientset) 29 | nodeName := pod.Spec.NodeName 30 | 31 | return workloadKind, workloadName, workloadNamespace, nodeName, nil 32 | } 33 | 34 | // GetPodDetails returns the pod details from the Kubernetes API server. 35 | func GetPodDetails(clientset kubernetes.Interface, podName, namespace string) (*corev1.Pod, error) { 36 | pod, err := clientset.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{}) 37 | if err != nil { 38 | return nil, fmt.Errorf("failed to get pod: %w", err) 39 | } 40 | return pod, nil 41 | } 42 | 43 | // ExtractPodOwner returns the kind, name, and namespace of the controller that owns the pod. 44 | func ExtractPodOwner(pod *corev1.Pod, clientset kubernetes.Interface) (string, string, string) { 45 | for _, ownerRef := range pod.OwnerReferences { 46 | switch ownerRef.Kind { 47 | case "ReplicaSet": 48 | return resolveReplicaSet(ownerRef, pod.Namespace, clientset) 49 | case "Job": 50 | return resolveJob(ownerRef, pod.Namespace, clientset) 51 | case "StatefulSet", "DaemonSet": 52 | return ownerRef.Kind, ownerRef.Name, pod.Namespace 53 | } 54 | } 55 | return "", "", "" 56 | } 57 | 58 | // resolveReplicaSet returns the kind, name, and namespace of the controller that owns the replica set. 59 | func resolveReplicaSet(ownerRef metav1.OwnerReference, namespace string, clientset kubernetes.Interface) (string, string, string) { 60 | rs, err := clientset.AppsV1().ReplicaSets(namespace).Get(context.TODO(), ownerRef.Name, metav1.GetOptions{}) 61 | if err == nil && len(rs.OwnerReferences) > 0 && rs.OwnerReferences[0].Kind == "Deployment" { 62 | return "Deployment", rs.OwnerReferences[0].Name, namespace 63 | } 64 | return "ReplicaSet", ownerRef.Name, namespace 65 | } 66 | 67 | // resolveJob resolves the owner of a Kubernetes Job resource. 68 | // It checks if the given Job is owned by a CronJob, and if so, it returns the CronJob's details. 69 | // Otherwise, it returns the Job's details. 70 | func resolveJob(ownerRef metav1.OwnerReference, namespace string, clientset kubernetes.Interface) (string, string, string) { 71 | job, err := clientset.BatchV1().Jobs(namespace).Get(context.TODO(), ownerRef.Name, metav1.GetOptions{}) 72 | if err == nil && len(job.OwnerReferences) > 0 && job.OwnerReferences[0].Kind == "CronJob" { 73 | return "CronJob", job.OwnerReferences[0].Name, namespace 74 | } 75 | return "Job", ownerRef.Name, namespace 76 | } 77 | 78 | // GetContainerNameFromExecToPodEvent returns the container name from the admission event for exec operations. 79 | func GetContainerNameFromExecToPodEvent(event admission.Attributes) (string, error) { 80 | if event.GetSubresource() != "exec" { 81 | return "", fmt.Errorf("not an exec subresource") 82 | } 83 | 84 | obj := event.GetObject() 85 | if obj == nil { 86 | return "", fmt.Errorf("event object is nil") 87 | } 88 | 89 | unstructuredObj, ok := obj.(*unstructured.Unstructured) 90 | if !ok { 91 | return "", fmt.Errorf("object is not of type *unstructured.Unstructured") 92 | } 93 | 94 | podExecOptions := &corev1.PodExecOptions{} 95 | if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredObj.Object, podExecOptions); err != nil { 96 | return "", fmt.Errorf("failed to decode PodExecOptions: %w", err) 97 | } 98 | 99 | return podExecOptions.Container, nil 100 | } 101 | -------------------------------------------------------------------------------- /watcher/testdata/deployment-two-containers.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "apps/v1", 3 | "kind": "Deployment", 4 | "metadata": { 5 | "annotations": { 6 | "deployment.kubernetes.io/revision": "1", 7 | "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"app\":\"nginx\"},\"name\":\"nginx-deployment\",\"namespace\":\"test\"},\"spec\":{\"replicas\":3,\"selector\":{\"matchLabels\":{\"app\":\"nginx\"}},\"template\":{\"metadata\":{\"labels\":{\"app\":\"nginx\"}},\"spec\":{\"containers\":[{\"image\":\"nginx:1.14.2\",\"name\":\"nginx\",\"ports\":[{\"containerPort\":80}]}]}}}}\n" 8 | }, 9 | "creationTimestamp": "2023-02-13T08:38:43Z", 10 | "generation": 1, 11 | "labels": { 12 | "app": "nginx" 13 | }, 14 | "name": "nginx-deployment", 15 | "namespace": "test", 16 | "resourceVersion": "59145", 17 | "uid": "90dc30ed-bcc4-484a-a995-0cef8118e2a5" 18 | }, 19 | "spec": { 20 | "progressDeadlineSeconds": 600, 21 | "replicas": 3, 22 | "revisionHistoryLimit": 10, 23 | "selector": { 24 | "matchLabels": { 25 | "app": "nginx" 26 | } 27 | }, 28 | "strategy": { 29 | "rollingUpdate": { 30 | "maxSurge": "25%", 31 | "maxUnavailable": "25%" 32 | }, 33 | "type": "RollingUpdate" 34 | }, 35 | "template": { 36 | "metadata": { 37 | "creationTimestamp": null, 38 | "labels": { 39 | "app": "nginx" 40 | } 41 | }, 42 | "spec": { 43 | "containers": [ 44 | { 45 | "image": "nginx:1.14.2", 46 | "imagePullPolicy": "IfNotPresent", 47 | "name": "nginx", 48 | "ports": [ 49 | { 50 | "containerPort": 80, 51 | "protocol": "TCP" 52 | } 53 | ], 54 | "resources": {}, 55 | "terminationMessagePath": "/dev/termination-log", 56 | "terminationMessagePolicy": "File" 57 | }, 58 | { 59 | "image": "nginx:1.14.2", 60 | "imagePullPolicy": "IfNotPresent", 61 | "name": "nginx2", 62 | "ports": [ 63 | { 64 | "containerPort": 80, 65 | "protocol": "TCP" 66 | } 67 | ], 68 | "resources": {}, 69 | "terminationMessagePath": "/dev/termination-log", 70 | "terminationMessagePolicy": "File" 71 | } 72 | ], 73 | "dnsPolicy": "ClusterFirst", 74 | "restartPolicy": "Always", 75 | "schedulerName": "default-scheduler", 76 | "securityContext": {}, 77 | "terminationGracePeriodSeconds": 30 78 | } 79 | } 80 | }, 81 | "status": { 82 | "availableReplicas": 3, 83 | "conditions": [ 84 | { 85 | "lastTransitionTime": "2023-02-13T08:39:02Z", 86 | "lastUpdateTime": "2023-02-13T08:39:02Z", 87 | "message": "Deployment has minimum availability.", 88 | "reason": "MinimumReplicasAvailable", 89 | "status": "True", 90 | "type": "Available" 91 | }, 92 | { 93 | "lastTransitionTime": "2023-02-13T08:38:43Z", 94 | "lastUpdateTime": "2023-02-13T08:39:02Z", 95 | "message": "ReplicaSet \"nginx-deployment-6595874d85\" has successfully progressed.", 96 | "reason": "NewReplicaSetAvailable", 97 | "status": "True", 98 | "type": "Progressing" 99 | } 100 | ], 101 | "observedGeneration": 1, 102 | "readyReplicas": 3, 103 | "replicas": 3, 104 | "updatedReplicas": 3 105 | } 106 | } -------------------------------------------------------------------------------- /mainhandler/vulnscan_test.go: -------------------------------------------------------------------------------- 1 | package mainhandler 2 | 3 | import ( 4 | "context" 5 | _ "embed" 6 | "encoding/json" 7 | "fmt" 8 | "os" 9 | "sort" 10 | "testing" 11 | 12 | dockerregistry "github.com/docker/docker/api/types/registry" 13 | "github.com/kubescape/k8s-interface/k8sinterface" 14 | "github.com/stretchr/testify/assert" 15 | corev1 "k8s.io/api/core/v1" 16 | "k8s.io/apimachinery/pkg/runtime" 17 | k8sfake "k8s.io/client-go/kubernetes/fake" 18 | "k8s.io/utils/ptr" 19 | ) 20 | 21 | func fileToPod(filePath string) *corev1.Pod { 22 | b, err := os.ReadFile(filePath) 23 | if err != nil { 24 | return nil 25 | } 26 | var pod *corev1.Pod 27 | err = json.Unmarshal(b, &pod) 28 | if err != nil { 29 | return nil 30 | } 31 | return pod 32 | } 33 | 34 | func fileToSecret(filePath string) *corev1.Secret { 35 | b, err := os.ReadFile(filePath) 36 | if err != nil { 37 | return nil 38 | } 39 | var secret *corev1.Secret 40 | err = json.Unmarshal(b, &secret) 41 | if err != nil { 42 | return nil 43 | } 44 | return secret 45 | } 46 | 47 | func Test_ActionHandler_getImageScanConfig(t *testing.T) { 48 | type args struct { 49 | namespace string 50 | pod *corev1.Pod 51 | imageTag string 52 | } 53 | tests := []struct { 54 | name string 55 | args args 56 | objects []runtime.Object 57 | want *ImageScanConfig 58 | wantErr assert.ErrorAssertionFunc 59 | }{ 60 | { 61 | name: "no registry treated as docker.io", 62 | args: args{ 63 | imageTag: "nginx:latest", 64 | }, 65 | objects: []runtime.Object{fileToSecret("testdata/vulnscan/registry-secret.json")}, 66 | want: &ImageScanConfig{ 67 | insecure: ptr.To(true), 68 | authConfigs: []dockerregistry.AuthConfig{ 69 | {Username: "test-user", Password: "test-pass", ServerAddress: "docker.io"}, 70 | {Username: "test-user-quay", Password: "test-pass-quay", ServerAddress: "quay.io"}, 71 | }, 72 | }, 73 | wantErr: assert.NoError, 74 | }, 75 | { 76 | name: "quay.IO", 77 | args: args{ 78 | imageTag: "quay.IO/kubescape/nginx:latest", 79 | }, 80 | objects: []runtime.Object{fileToSecret("testdata/vulnscan/registry-secret.json")}, 81 | want: &ImageScanConfig{ 82 | skipTLSVerify: ptr.To(true), 83 | authConfigs: []dockerregistry.AuthConfig{ 84 | {Username: "test-user", Password: "test-pass", ServerAddress: "docker.io"}, 85 | {Username: "test-user-quay", Password: "test-pass-quay", ServerAddress: "quay.io"}, 86 | }, 87 | }, 88 | wantErr: assert.NoError, 89 | }, 90 | { 91 | name: "pod with registry secret", 92 | args: args{ 93 | pod: fileToPod("testdata/vulnscan/pod.json"), 94 | }, 95 | objects: []runtime.Object{fileToSecret("testdata/vulnscan/regcreds.json")}, 96 | want: &ImageScanConfig{ 97 | authConfigs: []dockerregistry.AuthConfig{ 98 | {Username: "matthyx", Password: "toto", Auth: "bWF0dGh5eDp0b3Rv", ServerAddress: "https://index.docker.io/v1/"}, 99 | {Username: "YWRtaW4=", Password: "SGFyYm9yMTIzNDU=", Auth: "YWRtaW46SGFyYm9yMTIzNDU=", ServerAddress: "private.docker.io"}, 100 | }, 101 | }, 102 | wantErr: assert.NoError, 103 | }, 104 | { 105 | name: "pod with registry secret with httpheaders", 106 | args: args{ 107 | pod: fileToPod("testdata/vulnscan/pod.json"), 108 | }, 109 | objects: []runtime.Object{fileToSecret("testdata/vulnscan/regcreds-with-httpheaders.json")}, 110 | want: &ImageScanConfig{ 111 | authConfigs: []dockerregistry.AuthConfig{ 112 | {Username: "", Password: "", Auth: "", ServerAddress: "https://index.docker.io/v1/"}, 113 | {Username: "", Password: "", Auth: "", ServerAddress: "registry.horizons.sh"}, 114 | }, 115 | }, 116 | wantErr: assert.NoError, 117 | }, 118 | } 119 | for _, tt := range tests { 120 | t.Run(tt.name, func(t *testing.T) { 121 | k8sApiMock := &k8sinterface.KubernetesApi{ 122 | Context: context.TODO(), 123 | KubernetesClient: k8sfake.NewClientset(tt.objects...), 124 | } 125 | got, err := getImageScanConfig(k8sApiMock, tt.args.namespace, tt.args.pod, tt.args.imageTag) 126 | if !tt.wantErr(t, err, fmt.Sprintf("getImageScanConfig(%v, %v, %v, %v)", k8sApiMock, tt.args.namespace, tt.args.pod, tt.args.imageTag)) { 127 | return 128 | } 129 | // sort for stable comparison 130 | sort.Slice(got.authConfigs, func(i, j int) bool { 131 | return got.authConfigs[i].ServerAddress < got.authConfigs[j].ServerAddress 132 | }) 133 | assert.Equalf(t, tt.want, got, "getImageScanConfig(%v, %v, %v, %v)", k8sApiMock, tt.args.namespace, tt.args.pod, tt.args.imageTag) 134 | }) 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /utils/types.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "time" 7 | 8 | "github.com/armosec/armoapi-go/apis" 9 | "github.com/kubescape/backend/pkg/command/types/v1alpha1" 10 | "github.com/kubescape/go-logger" 11 | "github.com/kubescape/go-logger/helpers" 12 | "github.com/kubescape/k8s-interface/k8sinterface" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | "k8s.io/apimachinery/pkg/types" 15 | ) 16 | 17 | // Commands list of commands received from websocket 18 | type SessionObj struct { 19 | CustomerGUID string 20 | JobID string 21 | Timestamp time.Time 22 | Command *apis.Command `json:"command"` 23 | ParentJobID string 24 | // optional - if command was created by an OperatorCommand CRD 25 | ParentCommandDetails *OperatorCommandDetails `json:"parentCommandDetails,omitempty"` 26 | } 27 | 28 | type OperatorCommandDetails struct { 29 | Command *v1alpha1.OperatorCommand 30 | StartedAt time.Time 31 | Client *k8sinterface.KubernetesApi 32 | } 33 | 34 | type patchStatus struct { 35 | errors []error 36 | success bool 37 | payload []byte 38 | } 39 | 40 | func WithPayload(payload []byte) func(*patchStatus) { 41 | return func(s *patchStatus) { 42 | s.payload = payload 43 | } 44 | } 45 | 46 | func WithMultipleErrors(errors []error) func(*patchStatus) { 47 | return func(s *patchStatus) { 48 | s.errors = errors 49 | } 50 | } 51 | 52 | func WithError(err error) func(*patchStatus) { 53 | return func(s *patchStatus) { 54 | s.errors = []error{err} 55 | } 56 | } 57 | 58 | func WithSuccess() func(*patchStatus) { 59 | return func(s *patchStatus) { 60 | s.success = true 61 | } 62 | } 63 | 64 | func (s *SessionObj) SetOperatorCommandStatus(ctx context.Context, options ...func(*patchStatus)) { 65 | // if the command was not created by an OperatorCommand CRD, do nothing 66 | if s.ParentCommandDetails == nil { 67 | return 68 | } 69 | 70 | ps := &patchStatus{} 71 | for _, o := range options { 72 | o(ps) 73 | } 74 | 75 | status := v1alpha1.OperatorCommandStatus{ 76 | Executer: "operator", 77 | Started: true, 78 | StartedAt: &metav1.Time{Time: s.ParentCommandDetails.StartedAt}, 79 | Completed: true, 80 | CompletedAt: &metav1.Time{Time: time.Now()}, 81 | Payload: ps.payload, 82 | } 83 | 84 | if len(ps.errors) == 1 { 85 | status.Error = &v1alpha1.OperatorCommandStatusError{Message: ps.errors[0].Error()} 86 | } else if len(ps.errors) > 1 { 87 | status.Error = &v1alpha1.OperatorCommandStatusError{Message: "Failed with multiple errors"} 88 | 89 | // convert all errors to strings and store them in the payload 90 | errorMessages := make([]string, len(ps.errors)) 91 | for i, err := range ps.errors { 92 | errorMessages[i] = err.Error() 93 | } 94 | 95 | // Marshal []string to JSON 96 | payload, err := json.Marshal(errorMessages) 97 | if err != nil { 98 | return 99 | } 100 | status.Payload = payload 101 | } 102 | 103 | patchBytes, err := json.Marshal(map[string]v1alpha1.OperatorCommandStatus{"status": status}) 104 | if err != nil { 105 | logger.L().Error("patchCommandStatus - failed to marshal status patch", helpers.Error(err)) 106 | return 107 | } 108 | 109 | _, err = s.ParentCommandDetails.Client.GetDynamicClient().Resource(v1alpha1.SchemaGroupVersionResource).Namespace(s.ParentCommandDetails.Command.Namespace).Patch( 110 | ctx, 111 | s.ParentCommandDetails.Command.Name, 112 | types.MergePatchType, 113 | patchBytes, 114 | metav1.PatchOptions{}, 115 | "status", 116 | ) 117 | if err != nil { 118 | logger.L().Error("patchCommandStatus - failed to patch command status", helpers.Error(err)) 119 | return 120 | } 121 | logger.L().Info("patchCommandStatus: command status patched successfully") 122 | } 123 | 124 | type ContainerData struct { 125 | ImageTag string // imageTag (from container.Image) 126 | ImageID string // imageID (from containerStatus.ImageID) 127 | InstanceID string // instanceID.GetStringFormatted() 128 | ContainerName string // containerName 129 | ContainerType string // containerType (init or regular) 130 | Slug string // represent the unique identifier of the container 131 | Wlid string // workloadID 132 | } 133 | 134 | // CredStruct holds the various credentials needed to do login into CA BE 135 | type CredStruct struct { 136 | User string `json:"user"` 137 | Password string `json:"password"` 138 | Customer string `json:"customer"` 139 | } 140 | 141 | type Job struct { 142 | ctx context.Context 143 | sessionObj SessionObj 144 | } 145 | 146 | func (j *Job) Context() context.Context { 147 | return j.ctx 148 | } 149 | 150 | func (j *Job) Obj() SessionObj { 151 | return j.sessionObj 152 | } 153 | 154 | func (j *Job) SetContext(ctx context.Context) { 155 | j.ctx = ctx 156 | } 157 | 158 | func (j *Job) SetObj(sessionObj SessionObj) { 159 | j.sessionObj = sessionObj 160 | } 161 | -------------------------------------------------------------------------------- /mainhandler/vulnscanhandler.go: -------------------------------------------------------------------------------- 1 | package mainhandler 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "math/rand" 8 | "time" 9 | 10 | "github.com/armosec/armoapi-go/armotypes" 11 | "github.com/kubescape/go-logger" 12 | "go.opentelemetry.io/otel" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | ) 15 | 16 | const VulnScanCronjobTemplateName = "kubevuln-cronjob-template" 17 | 18 | func (actionHandler *ActionHandler) setVulnScanCronJob(ctx context.Context) error { 19 | _, span := otel.Tracer("").Start(ctx, "actionHandler.setVulnScanCronJob") 20 | defer span.End() 21 | 22 | if !actionHandler.config.Components().KubevulnScheduler.Enabled { 23 | return errors.New("KubevulnScheduler is not enabled") 24 | } 25 | 26 | req := getVulnScanRequest(actionHandler.sessionObj.Command) 27 | 28 | name := fixK8sCronJobNameLimit(fmt.Sprintf("%s-%d", "kubevuln-schedule", rand.NewSource(time.Now().UnixNano()).Int63())) 29 | 30 | if err := createConfigMapForTriggerRequest(actionHandler.k8sAPI, actionHandler.config.Namespace(), name, req); err != nil { 31 | return err 32 | } 33 | 34 | jobTemplateObj, err := getCronJobTemplate(actionHandler.k8sAPI, VulnScanCronjobTemplateName, actionHandler.config.Namespace()) 35 | if err != nil { 36 | return err 37 | } 38 | 39 | scanJobParams := getJobParams(actionHandler.sessionObj.Command) 40 | if scanJobParams == nil || scanJobParams.CronTabSchedule == "" { 41 | return fmt.Errorf("setVulnScanCronJob: CronTabSchedule not found") 42 | } 43 | setCronJobForTriggerRequest(jobTemplateObj, name, scanJobParams.CronTabSchedule, actionHandler.sessionObj.Command.JobTracking.JobID) 44 | 45 | // add namespace annotation 46 | namespace := getNamespaceFromVulnScanCommand(actionHandler.sessionObj.Command) 47 | logger.L().Info(fmt.Sprintf("setVulnScanCronJob: command namespace - '%s'", namespace)) 48 | jobTemplateObj.Spec.JobTemplate.Spec.Template.Annotations[armotypes.CronJobTemplateAnnotationNamespaceKeyDeprecated] = namespace // deprecated 49 | jobTemplateObj.Spec.JobTemplate.Spec.Template.Annotations[armotypes.CronJobTemplateAnnotationNamespaceKey] = namespace 50 | 51 | if _, err := actionHandler.k8sAPI.KubernetesClient.BatchV1().CronJobs(actionHandler.config.Namespace()).Create(context.Background(), jobTemplateObj, metav1.CreateOptions{}); err != nil { 52 | return err 53 | } 54 | 55 | return nil 56 | } 57 | 58 | func (actionHandler *ActionHandler) updateVulnScanCronJob(ctx context.Context) error { 59 | _, span := otel.Tracer("").Start(ctx, "actionHandler.updateVulnScanCronJob") 60 | defer span.End() 61 | 62 | if !actionHandler.config.Components().KubevulnScheduler.Enabled { 63 | return errors.New("KubevulnScheduler is not enabled") 64 | } 65 | 66 | scanJobParams := getJobParams(actionHandler.sessionObj.Command) 67 | if scanJobParams == nil || scanJobParams.CronTabSchedule == "" { 68 | return fmt.Errorf("updateVulnScanCronJob: CronTabSchedule not found") 69 | } 70 | if scanJobParams.JobName == "" { 71 | return fmt.Errorf("updateVulnScanCronJob: jobName not found") 72 | } 73 | 74 | jobTemplateObj, err := actionHandler.k8sAPI.KubernetesClient.BatchV1().CronJobs(actionHandler.config.Namespace()).Get(context.Background(), scanJobParams.JobName, metav1.GetOptions{}) 75 | if err != nil { 76 | return err 77 | } 78 | 79 | jobTemplateObj.Spec.Schedule = scanJobParams.CronTabSchedule 80 | if jobTemplateObj.Spec.JobTemplate.Spec.Template.Annotations == nil { 81 | jobTemplateObj.Spec.JobTemplate.Spec.Template.Annotations = make(map[string]string) 82 | } 83 | jobTemplateObj.Spec.JobTemplate.Spec.Template.Annotations[armotypes.CronJobTemplateAnnotationUpdateJobIDDeprecated] = actionHandler.sessionObj.Command.JobTracking.JobID // deprecated 84 | jobTemplateObj.Spec.JobTemplate.Spec.Template.Annotations[armotypes.CronJobTemplateAnnotationUpdateJobID] = actionHandler.sessionObj.Command.JobTracking.JobID 85 | 86 | _, err = actionHandler.k8sAPI.KubernetesClient.BatchV1().CronJobs(actionHandler.config.Namespace()).Update(context.Background(), jobTemplateObj, metav1.UpdateOptions{}) 87 | if err != nil { 88 | return err 89 | } 90 | return nil 91 | } 92 | 93 | func (actionHandler *ActionHandler) deleteVulnScanCronJob(ctx context.Context) error { 94 | _, span := otel.Tracer("").Start(ctx, "actionHandler.deleteVulnScanCronJob") 95 | defer span.End() 96 | 97 | if !actionHandler.config.Components().KubevulnScheduler.Enabled { 98 | return errors.New("KubevulnScheduler is not enabled") 99 | } 100 | 101 | scanJobParams := getJobParams(actionHandler.sessionObj.Command) 102 | if scanJobParams == nil || scanJobParams.JobName == "" { 103 | return fmt.Errorf("deleteVulnScanCronJob: CronTabSchedule not found") 104 | } 105 | 106 | return actionHandler.deleteCronjob(scanJobParams.JobName, actionHandler.config.Namespace()) 107 | 108 | } 109 | 110 | func (actionHandler *ActionHandler) deleteCronjob(name, namespace string) error { 111 | if err := actionHandler.k8sAPI.KubernetesClient.BatchV1().CronJobs(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}); err != nil { 112 | return err 113 | } 114 | 115 | if err := actionHandler.k8sAPI.KubernetesClient.CoreV1().ConfigMaps(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}); err != nil { 116 | return err 117 | } 118 | return nil 119 | 120 | } 121 | -------------------------------------------------------------------------------- /continuousscanning/loader_test.go: -------------------------------------------------------------------------------- 1 | package continuousscanning 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "io" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | "k8s.io/apimachinery/pkg/runtime/schema" 11 | ) 12 | 13 | type stubReader struct { 14 | data []byte 15 | e error 16 | } 17 | 18 | func (r stubReader) Read(p []byte) (int, error) { 19 | if r.e != nil { 20 | return 0, r.e 21 | } 22 | n := copy(p, r.data) 23 | return n, io.EOF 24 | } 25 | 26 | func TestFileFetcher(t *testing.T) { 27 | validData := `{ 28 | "match": [ 29 | { 30 | "apiGroups": [], 31 | "apiVersions": ["v1"], 32 | "resources": ["Deployment"] 33 | }, 34 | { 35 | "apiGroups": ["rbac.authorization.k8s.io"], 36 | "apiVersions": ["v1"], 37 | "resources": ["ClusterRoleBinding"] 38 | } 39 | ], 40 | "namespaces": ["kube-system", "default"] 41 | }` 42 | tt := []struct { 43 | name string 44 | inputData []byte 45 | inputDataReader io.Reader 46 | wantRules *MatchingRules 47 | wantError bool 48 | }{ 49 | { 50 | name: "valid data parses correctly", 51 | inputDataReader: &stubReader{data: []byte(validData), e: nil}, 52 | wantRules: &MatchingRules{ 53 | APIResources: []APIResourceMatch{ 54 | { 55 | Groups: []string{}, 56 | Versions: []string{"v1"}, 57 | Resources: []string{"Deployment"}, 58 | }, 59 | { 60 | Groups: []string{"rbac.authorization.k8s.io"}, 61 | Versions: []string{"v1"}, 62 | Resources: []string{"ClusterRoleBinding"}, 63 | }, 64 | }, 65 | Namespaces: []string{"kube-system", "default"}, 66 | }, 67 | }, 68 | { 69 | name: "malformed JSON as input returns error", 70 | inputDataReader: &stubReader{data: []byte{}, e: nil}, 71 | wantRules: nil, 72 | wantError: true, 73 | }, 74 | { 75 | name: "reader error returns error", 76 | inputDataReader: &stubReader{data: []byte(validData), e: errors.New("some error")}, 77 | wantRules: nil, 78 | wantError: true, 79 | }, 80 | } 81 | 82 | for _, tc := range tt { 83 | t.Run(tc.name, func(t *testing.T) { 84 | ctx := context.Background() 85 | var f MatchingRuleFetcher 86 | f = NewFileFetcher(tc.inputDataReader) 87 | 88 | gotRules, gotError := f.Fetch(ctx) 89 | 90 | assert.Equal(t, tc.wantRules, gotRules) 91 | if tc.wantError { 92 | assert.Error(t, gotError) 93 | } 94 | }) 95 | } 96 | } 97 | 98 | type stubFetcher struct { 99 | data *MatchingRules 100 | } 101 | 102 | func (f *stubFetcher) Fetch(ctx context.Context) (*MatchingRules, error) { 103 | return f.data, nil 104 | } 105 | 106 | func TestTargetLoader(t *testing.T) { 107 | tt := []struct { 108 | name string 109 | inputMatchingRules *MatchingRules 110 | wantGVRs []schema.GroupVersionResource 111 | wantErr bool 112 | }{ 113 | { 114 | name: "single valid GVRs should return appropriate values", 115 | inputMatchingRules: &MatchingRules{ 116 | APIResources: []APIResourceMatch{ 117 | { 118 | Groups: []string{""}, 119 | Versions: []string{"v1"}, 120 | Resources: []string{"Pod", "ReplicaSet"}, 121 | }, 122 | }, 123 | }, 124 | wantGVRs: []schema.GroupVersionResource{ 125 | {Group: "", Version: "v1", Resource: "Pod"}, 126 | {Group: "", Version: "v1", Resource: "ReplicaSet"}, 127 | }, 128 | }, 129 | { 130 | name: "single valid GVRs should return appropriate values", 131 | inputMatchingRules: &MatchingRules{ 132 | APIResources: []APIResourceMatch{ 133 | { 134 | Groups: []string{""}, 135 | Versions: []string{"v1"}, 136 | Resources: []string{"Pod", "ReplicaSet"}, 137 | }, 138 | { 139 | Groups: []string{"rbac.authorization.k8s.io"}, 140 | Versions: []string{"v1"}, 141 | Resources: []string{"ClusterRoleBinding"}, 142 | }, 143 | }, 144 | }, 145 | wantGVRs: []schema.GroupVersionResource{ 146 | {Group: "", Version: "v1", Resource: "Pod"}, 147 | {Group: "", Version: "v1", Resource: "ReplicaSet"}, 148 | {Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "ClusterRoleBinding"}, 149 | }, 150 | }, 151 | { 152 | name: "multiple valid GVRs should return appropriate values", 153 | inputMatchingRules: &MatchingRules{ 154 | APIResources: []APIResourceMatch{ 155 | { 156 | Groups: []string{""}, 157 | Versions: []string{"v1", "v2"}, 158 | Resources: []string{"Pod", "ReplicaSet"}, 159 | }, 160 | { 161 | Groups: []string{"rbac.authorization.k8s.io"}, 162 | Versions: []string{"v1"}, 163 | Resources: []string{"ClusterRoleBinding"}, 164 | }, 165 | }, 166 | }, 167 | wantGVRs: []schema.GroupVersionResource{ 168 | {Group: "", Version: "v1", Resource: "Pod"}, 169 | {Group: "", Version: "v1", Resource: "ReplicaSet"}, 170 | {Group: "", Version: "v2", Resource: "Pod"}, 171 | {Group: "", Version: "v2", Resource: "ReplicaSet"}, 172 | {Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "ClusterRoleBinding"}, 173 | }, 174 | }, 175 | } 176 | 177 | for _, tc := range tt { 178 | t.Run(tc.name, func(t *testing.T) { 179 | ctx := context.Background() 180 | var fetcher MatchingRuleFetcher 181 | fetcher = &stubFetcher{tc.inputMatchingRules} 182 | var l TargetLoader 183 | l = NewTargetLoader(fetcher) 184 | 185 | gotData := l.LoadGVRs(ctx) 186 | 187 | assert.Equal(t, tc.wantGVRs, gotData) 188 | }) 189 | } 190 | 191 | } 192 | -------------------------------------------------------------------------------- /watcher/commandswatcher.go: -------------------------------------------------------------------------------- 1 | package watcher 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "time" 8 | 9 | "github.com/cenkalti/backoff" 10 | mapset "github.com/deckarep/golang-set/v2" 11 | "github.com/kubescape/backend/pkg/command" 12 | "github.com/kubescape/backend/pkg/command/types/v1alpha1" 13 | "github.com/kubescape/go-logger" 14 | "github.com/kubescape/go-logger/helpers" 15 | "github.com/kubescape/k8s-interface/k8sinterface" 16 | "github.com/kubescape/operator/config" 17 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 18 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 19 | "k8s.io/apimachinery/pkg/runtime" 20 | "k8s.io/apimachinery/pkg/watch" 21 | "k8s.io/client-go/tools/pager" 22 | ) 23 | 24 | const minOperatorCommandAge = 30 * time.Minute 25 | 26 | type CommandWatchHandler struct { 27 | k8sAPI *k8sinterface.KubernetesApi 28 | eventQueue *CooldownQueue 29 | commandReceivers mapset.Set[chan v1alpha1.OperatorCommand] 30 | config config.IConfig 31 | } 32 | 33 | func NewCommandWatchHandler(k8sAPI *k8sinterface.KubernetesApi, config config.IConfig) *CommandWatchHandler { 34 | return &CommandWatchHandler{ 35 | k8sAPI: k8sAPI, 36 | eventQueue: NewCooldownQueue(), 37 | commandReceivers: mapset.NewSet[chan v1alpha1.OperatorCommand](), 38 | config: config, 39 | } 40 | } 41 | 42 | func (cwh *CommandWatchHandler) RegisterForCommands(receiver chan v1alpha1.OperatorCommand) { 43 | cwh.commandReceivers.Add(receiver) 44 | } 45 | 46 | func (cwh *CommandWatchHandler) CommandWatch(ctx context.Context) { 47 | logger.L().Info("start watching CommandWatchHandler") 48 | // list commands and add them to the queue, this is for the commands that were created before the watch started 49 | cwh.listCommands(ctx) 50 | // start watching 51 | go cwh.watchRetry(ctx) 52 | 53 | // process events 54 | for event := range cwh.eventQueue.ResultChan { 55 | switch event.Type { 56 | case watch.Added: 57 | cwh.AddHandler(event.Object) 58 | } 59 | } 60 | } 61 | 62 | func (cwh *CommandWatchHandler) listCommands(ctx context.Context) { 63 | if err := pager.New(func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) { 64 | return cwh.k8sAPI.GetDynamicClient().Resource(v1alpha1.SchemaGroupVersionResource).Namespace(cwh.config.Namespace()).List(context.Background(), opts) 65 | }).EachListItem(ctx, metav1.ListOptions{ 66 | LabelSelector: fmt.Sprintf("%s=%s", command.OperatorCommandAppNameLabelKey, "operator"), 67 | }, func(obj runtime.Object) error { 68 | cwh.eventQueue.Enqueue(watch.Event{ 69 | Type: watch.Added, 70 | Object: obj, 71 | }) 72 | return nil 73 | }); err != nil { 74 | logger.L().Ctx(ctx).Error("failed add list of commands", helpers.Error(err)) 75 | } 76 | } 77 | 78 | func (cwh *CommandWatchHandler) watchRetry(ctx context.Context) { 79 | watchOpts := metav1.ListOptions{ 80 | Watch: true, 81 | LabelSelector: fmt.Sprintf("%s=%s", command.OperatorCommandAppNameLabelKey, "operator"), 82 | } 83 | if err := backoff.RetryNotify(func() error { 84 | watcher, err := cwh.k8sAPI.GetDynamicClient().Resource(v1alpha1.SchemaGroupVersionResource).Namespace(cwh.config.Namespace()).Watch(context.Background(), watchOpts) 85 | if err != nil { 86 | return fmt.Errorf("failed to get commands watcher: %w", err) 87 | } 88 | for { 89 | event, chanActive := <-watcher.ResultChan() 90 | if metaObject, ok := event.Object.(resourceVersionGetter); ok { 91 | watchOpts.ResourceVersion = metaObject.GetResourceVersion() 92 | } 93 | if cwh.eventQueue.Closed() { 94 | watcher.Stop() 95 | return backoff.Permanent(errors.New("event queue closed")) 96 | } 97 | if !chanActive { 98 | // channel closed, retry 99 | return errWatchClosed 100 | } 101 | if event.Type == watch.Error { 102 | return fmt.Errorf("watch error: %s", event.Object) 103 | } 104 | cwh.eventQueue.Enqueue(event) 105 | } 106 | }, newBackOff(), func(err error, d time.Duration) { 107 | if !errors.Is(err, errWatchClosed) { 108 | logger.L().Ctx(ctx).Warning("watch", helpers.Error(err), 109 | helpers.String("resource", "commands"), 110 | helpers.String("retry in", d.String())) 111 | } 112 | }); err != nil { 113 | logger.L().Ctx(ctx).Fatal("giving up watch", helpers.Error(err), 114 | helpers.String("resource", "commands")) 115 | } 116 | } 117 | 118 | func (cwh *CommandWatchHandler) AddHandler(obj runtime.Object) { 119 | if un, ok := obj.(*unstructured.Unstructured); ok { 120 | // Convert the unstructured object to a typed object. 121 | cmd, err := ConvertUnstructuredToOperatorCommand(un) 122 | if err != nil { 123 | logger.L().Error("Failed to convert unstructured object to OperatorCommand", helpers.Error(err)) 124 | return 125 | } 126 | 127 | // Skip the command if it is older than the creation threshold 128 | if cmd.CreationTimestamp.Time.Before(time.Now().Add(-minOperatorCommandAge)) { 129 | logger.L().Info("Skipping old OperatorCommand", helpers.String("command", cmd.Name), helpers.String("GUID", cmd.Spec.GUID), helpers.String("CreationTimestamp", cmd.CreationTimestamp.String())) 130 | return 131 | } 132 | 133 | // Skip the command if it has already been processed. 134 | if cmd.Status.Completed { 135 | logger.L().Info("Command has already been processed, skipping.", helpers.String("command", cmd.Name)) 136 | return 137 | } 138 | 139 | for receiver := range cwh.commandReceivers.Iter() { 140 | receiver <- *cmd 141 | } 142 | } 143 | } 144 | 145 | func ConvertUnstructuredToOperatorCommand(un *unstructured.Unstructured) (*v1alpha1.OperatorCommand, error) { 146 | cmd := &v1alpha1.OperatorCommand{} 147 | if err := runtime.DefaultUnstructuredConverter.FromUnstructured(un.Object, cmd); err != nil { 148 | return nil, err 149 | } 150 | 151 | return cmd, nil 152 | } 153 | -------------------------------------------------------------------------------- /config/config_test.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/armosec/utils-k8s-go/armometadata" 8 | "github.com/kubescape/backend/pkg/utils" 9 | "github.com/kubescape/operator/admission/rulesupdate" 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestLoadCapabilities(t *testing.T) { 14 | type args struct { 15 | path string 16 | } 17 | tests := []struct { 18 | name string 19 | args args 20 | want CapabilitiesConfig 21 | wantErr bool 22 | }{ 23 | { 24 | name: "TestLoadCapabilities", 25 | args: args{ 26 | path: "../configuration", 27 | }, 28 | want: CapabilitiesConfig{ 29 | Capabilities: Capabilities{ 30 | ConfigurationScan: "enable", 31 | ContinuousScan: "disable", 32 | NodeScan: "enable", 33 | Relevancy: "enable", 34 | VulnerabilityScan: "enable", 35 | AdmissionController: "enable", 36 | }, 37 | Components: Components{ 38 | HostScanner: Component{Enabled: true}, 39 | Kubescape: Component{Enabled: true}, 40 | KubescapeScheduler: Component{Enabled: true}, 41 | Kubevuln: Component{Enabled: true}, 42 | KubevulnScheduler: Component{Enabled: true}, 43 | NodeAgent: Component{Enabled: true}, 44 | Operator: Component{Enabled: true}, 45 | OtelCollector: Component{Enabled: true}, 46 | ServiceDiscovery: Component{Enabled: true}, 47 | Storage: Component{Enabled: true}, 48 | }, 49 | Configurations: Configurations{ 50 | Persistence: "enable", 51 | Server: Server{ 52 | DiscoveryURL: "foo.com", 53 | }, 54 | }, 55 | ServiceScanConfig: ServiceScanConfig{ 56 | Interval: 60 * time.Second, 57 | Enabled: true, 58 | }, 59 | }, 60 | }, 61 | } 62 | for _, tt := range tests { 63 | t.Run(tt.name, func(t *testing.T) { 64 | got, err := LoadCapabilitiesConfig(tt.args.path) 65 | if (err != nil) != tt.wantErr { 66 | t.Errorf("LoadCapabilitiesConfig() error = %v, wantErr %v", err, tt.wantErr) 67 | return 68 | } 69 | assert.Equal(t, tt.want, got) 70 | }) 71 | } 72 | } 73 | 74 | func TestLoadConfig(t *testing.T) { 75 | type args struct { 76 | path string 77 | } 78 | tests := []struct { 79 | name string 80 | args args 81 | want Config 82 | wantErr bool 83 | }{ 84 | { 85 | name: "TestLoadConfig", 86 | args: args{ 87 | path: "../configuration", 88 | }, 89 | want: Config{ 90 | Namespace: "kubescape", 91 | RestAPIPort: "4002", 92 | CleanUpRoutineInterval: 10 * time.Minute, 93 | ConcurrencyWorkers: 3, 94 | TriggerSecurityFramework: false, 95 | MatchingRulesFilename: "/etc/config/matchingRules.json", 96 | EventDeduplicationInterval: 2 * time.Minute, 97 | ExcludeNamespaces: []string{"kube-system", "kubescape"}, 98 | IncludeNamespaces: []string{}, 99 | PodScanGuardTime: time.Hour, 100 | RulesUpdateConfig: rulesupdate.RulesUpdaterConfig{ 101 | Enabled: false, 102 | Interval: 5 * time.Minute, 103 | Namespace: "default", 104 | }, 105 | }, 106 | }, 107 | } 108 | for _, tt := range tests { 109 | t.Run(tt.name, func(t *testing.T) { 110 | got, err := LoadConfig(tt.args.path) 111 | if (err != nil) != tt.wantErr { 112 | t.Errorf("LoadConfig() error = %v, wantErr %v", err, tt.wantErr) 113 | return 114 | } 115 | assert.Equal(t, tt.want, got) 116 | }) 117 | } 118 | } 119 | 120 | func TestValidateConfig(t *testing.T) { 121 | type args struct { 122 | clusterConfig armometadata.ClusterConfig 123 | components CapabilitiesConfig 124 | credentials *utils.Credentials 125 | } 126 | tests := []struct { 127 | name string 128 | args args 129 | wantErr bool 130 | }{ 131 | { 132 | name: "no clusterName: error", 133 | args: args{ 134 | clusterConfig: armometadata.ClusterConfig{}, 135 | components: CapabilitiesConfig{}, 136 | credentials: &utils.Credentials{}, 137 | }, 138 | wantErr: true, 139 | }, 140 | { 141 | name: "no discovery, no account: error", 142 | args: args{ 143 | clusterConfig: armometadata.ClusterConfig{ 144 | ClusterName: "foo", 145 | }, 146 | components: CapabilitiesConfig{}, 147 | credentials: &utils.Credentials{}, 148 | }, 149 | }, 150 | { 151 | name: "discovery, no account: error", 152 | args: args{ 153 | clusterConfig: armometadata.ClusterConfig{ 154 | ClusterName: "foo", 155 | }, 156 | components: CapabilitiesConfig{ 157 | Components: Components{ServiceDiscovery: Component{Enabled: true}}, 158 | }, 159 | credentials: &utils.Credentials{}, 160 | }, 161 | wantErr: true, 162 | }, 163 | { 164 | name: "no discovery, account: no error", 165 | args: args{ 166 | clusterConfig: armometadata.ClusterConfig{ 167 | ClusterName: "foo", 168 | }, 169 | credentials: &utils.Credentials{ 170 | Account: "123", 171 | AccessKey: "abc", 172 | }, 173 | components: CapabilitiesConfig{}, 174 | }, 175 | }, 176 | { 177 | name: "discovery, account: no error", 178 | args: args{ 179 | clusterConfig: armometadata.ClusterConfig{ 180 | ClusterName: "foo", 181 | }, 182 | credentials: &utils.Credentials{ 183 | Account: "123", 184 | AccessKey: "abc", 185 | }, 186 | components: CapabilitiesConfig{ 187 | Components: Components{ServiceDiscovery: Component{Enabled: true}}, 188 | }, 189 | }, 190 | }, 191 | } 192 | for _, tt := range tests { 193 | t.Run(tt.name, func(t *testing.T) { 194 | operatorConfig := NewOperatorConfig(tt.args.components, tt.args.clusterConfig, tt.args.credentials, Config{}) 195 | err := ValidateConfig(operatorConfig) 196 | if (err != nil) != tt.wantErr { 197 | t.Errorf("ValidateConfig() error = %v, wantErr %v", err, tt.wantErr) 198 | return 199 | } 200 | }) 201 | } 202 | } 203 | -------------------------------------------------------------------------------- /admission/rulebinding/cache/helpers_test.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "testing" 5 | 6 | typesv1 "github.com/kubescape/node-agent/pkg/rulebindingmanager/types/v1" 7 | "github.com/stretchr/testify/assert" 8 | corev1 "k8s.io/api/core/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 11 | ) 12 | 13 | func TestResourcesToWatch(t *testing.T) { 14 | tests := []struct { 15 | name string 16 | }{ 17 | { 18 | name: "Test with valid resources", 19 | }, 20 | } 21 | 22 | for _, tt := range tests { 23 | t.Run(tt.name, func(t *testing.T) { 24 | result := resourcesToWatch() 25 | 26 | assert.Equal(t, 1, len(result)) 27 | 28 | rbResource := result[0] 29 | assert.Equal(t, typesv1.RuleBindingAlertGvr, rbResource.GroupVersionResource()) 30 | assert.Equal(t, metav1.ListOptions{}, rbResource.ListOptions()) 31 | }) 32 | } 33 | } 34 | 35 | func TestUnstructuredToRuleBinding(t *testing.T) { 36 | tests := []struct { 37 | obj *unstructured.Unstructured 38 | name string 39 | wantErr bool 40 | }{ 41 | { 42 | name: "Test with valid rule binding", 43 | obj: &unstructured.Unstructured{ 44 | Object: map[string]interface{}{ 45 | "apiVersion": "v1", 46 | "kind": "RuntimeAlertRuleBinding", 47 | "metadata": map[string]interface{}{ 48 | "name": "rule-1", 49 | "namespace": "default", 50 | }, 51 | "spec": map[string]interface{}{ 52 | "ruleName": "rule-1", 53 | }, 54 | }, 55 | }, 56 | wantErr: false, 57 | }, 58 | { 59 | name: "Test with invalid rule binding", 60 | obj: &unstructured.Unstructured{ 61 | Object: map[string]interface{}{ 62 | "apiVersion": "v1", 63 | "kind": "RuntimeAlertRuleBinding", 64 | "metadata": map[string]interface{}{ 65 | "name": "rule-1", 66 | "namespace": "default", 67 | }, 68 | "spec": "invalid", 69 | }, 70 | }, 71 | wantErr: true, 72 | }, 73 | } 74 | 75 | for _, tt := range tests { 76 | t.Run(tt.name, func(t *testing.T) { 77 | _, err := unstructuredToRuleBinding(tt.obj) 78 | if (err != nil) != tt.wantErr { 79 | t.Errorf("unstructuredToRuleBinding() error = %v, wantErr %v", err, tt.wantErr) 80 | return 81 | } 82 | }) 83 | } 84 | } 85 | 86 | func TestUniqueName(t *testing.T) { 87 | tests := []struct { 88 | name string 89 | obj metav1.Object 90 | expected string 91 | }{ 92 | { 93 | name: "Pod with valid namespace and name", 94 | obj: &corev1.Pod{ 95 | ObjectMeta: metav1.ObjectMeta{ 96 | Name: "pod-1", 97 | Namespace: "default", 98 | }, 99 | }, 100 | expected: "default/pod-1", 101 | }, 102 | { 103 | name: "Pod with empty namespace", 104 | obj: &corev1.Pod{ 105 | ObjectMeta: metav1.ObjectMeta{ 106 | Name: "pod-1", 107 | Namespace: "", 108 | }, 109 | }, 110 | expected: "/pod-1", 111 | }, 112 | { 113 | name: "Pod with empty name", 114 | obj: &corev1.Pod{ 115 | ObjectMeta: metav1.ObjectMeta{ 116 | Name: "", 117 | Namespace: "default", 118 | }, 119 | }, 120 | expected: "default/", 121 | }, 122 | { 123 | name: "Pod with empty namespace and name", 124 | obj: &corev1.Pod{ 125 | ObjectMeta: metav1.ObjectMeta{ 126 | Name: "", 127 | Namespace: "", 128 | }, 129 | }, 130 | expected: "/", 131 | }, 132 | { 133 | name: "RuntimeAlertRuleBinding with valid namespace and name", 134 | obj: &typesv1.RuntimeAlertRuleBinding{ 135 | ObjectMeta: metav1.ObjectMeta{ 136 | Name: "name-1", 137 | Namespace: "default", 138 | }, 139 | }, 140 | expected: "default/name-1", 141 | }, 142 | { 143 | name: "RuntimeAlertRuleBinding with empty namespace", 144 | obj: &typesv1.RuntimeAlertRuleBinding{ 145 | ObjectMeta: metav1.ObjectMeta{ 146 | Name: "name-1", 147 | Namespace: "", 148 | }, 149 | }, 150 | expected: "/name-1", 151 | }, 152 | { 153 | name: "RuntimeAlertRuleBinding with empty name", 154 | obj: &typesv1.RuntimeAlertRuleBinding{ 155 | ObjectMeta: metav1.ObjectMeta{ 156 | Name: "", 157 | Namespace: "default", 158 | }, 159 | }, 160 | expected: "default/", 161 | }, 162 | { 163 | name: "RuntimeAlertRuleBinding with empty namespace and name", 164 | obj: &typesv1.RuntimeAlertRuleBinding{ 165 | ObjectMeta: metav1.ObjectMeta{ 166 | Name: "", 167 | Namespace: "", 168 | }, 169 | }, 170 | expected: "/", 171 | }, 172 | { 173 | name: "Unstructured with valid namespace and name", 174 | obj: &unstructured.Unstructured{ 175 | Object: map[string]interface{}{ 176 | "metadata": map[string]interface{}{ 177 | "name": "name-1", 178 | "namespace": "default", 179 | }, 180 | }, 181 | }, 182 | expected: "default/name-1", 183 | }, 184 | { 185 | name: "Unstructured with empty namespace", 186 | obj: &unstructured.Unstructured{ 187 | Object: map[string]interface{}{ 188 | "metadata": map[string]interface{}{ 189 | "name": "name-1", 190 | "namespace": "", 191 | }, 192 | }, 193 | }, 194 | expected: "/name-1", 195 | }, 196 | { 197 | name: "Unstructured with empty name", 198 | obj: &unstructured.Unstructured{ 199 | Object: map[string]interface{}{ 200 | "metadata": map[string]interface{}{ 201 | "name": "", 202 | "namespace": "default", 203 | }, 204 | }, 205 | }, 206 | expected: "default/", 207 | }, 208 | { 209 | name: "Unstructured with empty namespace and name", 210 | obj: &unstructured.Unstructured{ 211 | Object: map[string]interface{}{ 212 | "metadata": map[string]interface{}{ 213 | "name": "", 214 | "namespace": "", 215 | }, 216 | }, 217 | }, 218 | expected: "/", 219 | }, 220 | } 221 | 222 | for _, tt := range tests { 223 | t.Run(tt.name, func(t *testing.T) { 224 | result := uniqueName(tt.obj) 225 | assert.Equal(t, tt.expected, result) 226 | }) 227 | } 228 | } 229 | -------------------------------------------------------------------------------- /admission/rules/v1/r2000_exec_to_pod.go: -------------------------------------------------------------------------------- 1 | package rules 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | "time" 7 | 8 | apitypes "github.com/armosec/armoapi-go/armotypes" 9 | "github.com/armosec/armoapi-go/armotypes/common" 10 | "github.com/kubescape/go-logger" 11 | "github.com/kubescape/go-logger/helpers" 12 | "github.com/kubescape/operator/admission/rules" 13 | "github.com/kubescape/operator/objectcache" 14 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 15 | "k8s.io/apiserver/pkg/admission" 16 | "k8s.io/apiserver/pkg/authentication/user" 17 | ) 18 | 19 | const ( 20 | R2000ID = "R2000" 21 | R2000Name = "Exec to pod" 22 | ) 23 | 24 | var R2000ExecToPodRuleDescriptor = RuleDescriptor{ 25 | ID: R2000ID, 26 | Name: R2000Name, 27 | Description: "Detecting exec to pod", 28 | Tags: []string{"exec"}, 29 | Priority: RulePriorityLow, 30 | RuleCreationFunc: func() rules.RuleEvaluator { 31 | return CreateRuleR2000ExecToPod() 32 | }, 33 | } 34 | 35 | type R2000ExecToPod struct { 36 | BaseRule 37 | } 38 | 39 | func CreateRuleR2000ExecToPod() *R2000ExecToPod { 40 | return &R2000ExecToPod{} 41 | } 42 | func (rule *R2000ExecToPod) Name() string { 43 | return R2000Name 44 | } 45 | 46 | func (rule *R2000ExecToPod) ID() string { 47 | return R2000ID 48 | } 49 | 50 | func (rule *R2000ExecToPod) DeleteRule() { 51 | } 52 | 53 | func (rule *R2000ExecToPod) ProcessEvent(event admission.Attributes, access objectcache.KubernetesCache) rules.RuleFailure { 54 | if event == nil { 55 | return nil 56 | } 57 | 58 | if event.GetKind().Kind != "PodExecOptions" { 59 | return nil 60 | } 61 | 62 | var oldObject *unstructured.Unstructured 63 | if event.GetOldObject() != nil { 64 | oldObject = event.GetOldObject().(*unstructured.Unstructured) 65 | } 66 | 67 | var options *unstructured.Unstructured 68 | if event.GetOperationOptions() != nil { 69 | options = event.GetOperationOptions().(*unstructured.Unstructured) 70 | } 71 | 72 | client := access.GetClientset() 73 | 74 | workloadKind, workloadName, workloadNamespace, nodeName, err := GetControllerDetails(event, client) 75 | if err != nil { 76 | logger.L().Error("Failed to get parent workload details", helpers.Error(err)) 77 | return nil 78 | } 79 | 80 | containerName, err := GetContainerNameFromExecToPodEvent(event) 81 | if err != nil { 82 | logger.L().Error("Failed to get container name from exec to pod event", helpers.Error(err)) 83 | containerName = "" 84 | } 85 | 86 | cmdline, err := getCommandLine(event.GetObject().(*unstructured.Unstructured)) 87 | if err != nil { 88 | logger.L().Error("Failed to get command line from exec to pod event", helpers.Error(err)) 89 | cmdline = "" 90 | } 91 | 92 | ruleFailure := GenericRuleFailure{ 93 | BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ 94 | AlertName: rule.Name(), 95 | FixSuggestions: "If this is a legitimate action, please consider removing this workload from the binding of this rule", 96 | Severity: R2000ExecToPodRuleDescriptor.Priority, 97 | Timestamp: time.Unix(0, time.Now().UnixNano()), 98 | Identifiers: &common.Identifiers{ 99 | Process: &common.ProcessEntity{ 100 | Name: extractComm(cmdline), 101 | CommandLine: cmdline, 102 | }, 103 | }, 104 | UniqueID: fmt.Sprintf("%s%s%s", event.GetNamespace(), event.GetName(), containerName), 105 | }, 106 | AdmissionAlert: apitypes.AdmissionAlert{ 107 | Kind: event.GetKind(), 108 | ObjectName: event.GetName(), 109 | RequestNamespace: event.GetNamespace(), 110 | Resource: event.GetResource(), 111 | Operation: event.GetOperation(), 112 | Object: event.GetObject().(*unstructured.Unstructured), 113 | Subresource: event.GetSubresource(), 114 | UserInfo: &user.DefaultInfo{ 115 | Name: event.GetUserInfo().GetName(), 116 | UID: event.GetUserInfo().GetUID(), 117 | Groups: event.GetUserInfo().GetGroups(), 118 | Extra: event.GetUserInfo().GetExtra(), 119 | }, 120 | 121 | DryRun: event.IsDryRun(), 122 | Options: options, 123 | OldObject: oldObject, 124 | }, 125 | RuleAlert: apitypes.RuleAlert{ 126 | RuleDescription: fmt.Sprintf("Exec to pod detected on pod %s", event.GetName()), 127 | }, 128 | RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ 129 | PodName: event.GetName(), 130 | PodNamespace: event.GetNamespace(), 131 | Namespace: event.GetNamespace(), 132 | WorkloadName: workloadName, 133 | WorkloadNamespace: workloadNamespace, 134 | WorkloadKind: workloadKind, 135 | NodeName: nodeName, 136 | ContainerName: containerName, 137 | }, 138 | RuleID: R2000ID, 139 | RuntimeProcessDetails: apitypes.ProcessTree{ 140 | ProcessTree: apitypes.Process{ 141 | Cmdline: cmdline, 142 | Comm: extractComm(cmdline), 143 | }, 144 | }, 145 | } 146 | 147 | return &ruleFailure 148 | } 149 | 150 | func getCommandLine(object *unstructured.Unstructured) (string, error) { 151 | commandField, ok := object.Object["command"] 152 | if !ok { 153 | return "", fmt.Errorf("alert is missing admission alert object command") 154 | } 155 | command, ok := interfaceToStringSlice(commandField) 156 | if !ok { 157 | return "", fmt.Errorf("alert cannot convert alert object command to string list") 158 | } 159 | 160 | return strings.Join(command, " "), nil 161 | } 162 | 163 | func extractComm(cmdline string) string { 164 | comm := strings.Split(cmdline, " ") 165 | if len(comm) == 0 { 166 | return cmdline 167 | } 168 | 169 | return comm[0] 170 | } 171 | 172 | func interfaceToStringSlice(data interface{}) ([]string, bool) { 173 | switch v := data.(type) { 174 | case []string: 175 | return v, true 176 | case []interface{}: 177 | result := make([]string, len(v)) 178 | for i, item := range v { 179 | str, ok := item.(string) 180 | if !ok { 181 | return nil, false 182 | } 183 | result[i] = str 184 | } 185 | return result, true 186 | case string: 187 | return []string{v}, true 188 | default: 189 | return nil, false 190 | } 191 | } 192 | -------------------------------------------------------------------------------- /mainhandler/kubescapehandlerhelper_test.go: -------------------------------------------------------------------------------- 1 | package mainhandler 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/armosec/armoapi-go/apis" 7 | "github.com/armosec/utils-go/boolutils" 8 | utilsmetadata "github.com/armosec/utils-k8s-go/armometadata" 9 | beUtils "github.com/kubescape/backend/pkg/utils" 10 | utilsapisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1" 11 | utilsmetav1 "github.com/kubescape/opa-utils/httpserver/meta/v1" 12 | "github.com/kubescape/operator/config" 13 | "github.com/kubescape/operator/utils" 14 | "github.com/stretchr/testify/assert" 15 | v1 "k8s.io/api/batch/v1" 16 | ) 17 | 18 | func TestGetKubescapeV1ScanRequest(t *testing.T) { 19 | { 20 | actionHandler := ActionHandler{ 21 | sessionObj: &utils.SessionObj{ 22 | Command: &apis.Command{ 23 | Args: map[string]interface{}{ 24 | utils.KubescapeScanV1: nil, 25 | }, 26 | }, 27 | }, 28 | } 29 | req, err := getKubescapeV1ScanRequest(actionHandler.sessionObj.Command.Args) 30 | assert.NoError(t, err) 31 | assert.NotNil(t, 0, req) 32 | } 33 | { 34 | actionHandler := ActionHandler{ 35 | sessionObj: &utils.SessionObj{ 36 | Command: &apis.Command{Args: map[string]interface{}{utils.KubescapeScanV1: map[string]interface{}{"format": "json"}}}, 37 | }, 38 | } 39 | req, err := getKubescapeV1ScanRequest(actionHandler.sessionObj.Command.Args) 40 | assert.NoError(t, err) 41 | assert.Equal(t, "json", req.Format) 42 | } 43 | { 44 | actionHandler := ActionHandler{ 45 | sessionObj: &utils.SessionObj{ 46 | Command: &apis.Command{Args: map[string]interface{}{utils.KubescapeScanV1: map[string]interface{}{}}}, 47 | }, 48 | } 49 | req, err := getKubescapeV1ScanRequest(actionHandler.sessionObj.Command.Args) 50 | assert.NoError(t, err) 51 | assert.Equal(t, "all", req.TargetNames[0]) 52 | assert.Equal(t, utilsapisv1.KindFramework, req.TargetType) 53 | } 54 | { 55 | actionHandler := ActionHandler{ 56 | sessionObj: &utils.SessionObj{ 57 | Command: &apis.Command{Args: map[string]interface{}{utils.KubescapeScanV1: map[string]interface{}{"targetType": utilsapisv1.KindFramework, "targetNames": []string{""}}}}, 58 | }, 59 | } 60 | req, err := getKubescapeV1ScanRequest(actionHandler.sessionObj.Command.Args) 61 | assert.NoError(t, err) 62 | assert.Equal(t, "all", req.TargetNames[0]) 63 | assert.Equal(t, utilsapisv1.KindFramework, req.TargetType) 64 | } 65 | } 66 | 67 | func TestUpdateCronJobTemplate(t *testing.T) { 68 | { 69 | jobTemplateObj := &v1.CronJob{} 70 | name := "1234" 71 | schedule := "* * * * *" 72 | jobID := "5678" 73 | setCronJobTemplate(jobTemplateObj, name, schedule, jobID, "nsa", utilsapisv1.KindFramework, boolutils.BoolPointer(true)) 74 | assert.Equal(t, name, jobTemplateObj.ObjectMeta.Name) 75 | assert.Equal(t, schedule, jobTemplateObj.Spec.Schedule) 76 | assert.Equal(t, jobID, jobTemplateObj.Spec.JobTemplate.Spec.Template.Annotations["armo.jobid"]) 77 | assert.Equal(t, "nsa", jobTemplateObj.Spec.JobTemplate.Spec.Template.Annotations["armo.framework"]) 78 | assert.Equal(t, "true", jobTemplateObj.Spec.JobTemplate.Spec.Template.Annotations["armo.host-scanner"]) 79 | } 80 | } 81 | 82 | func TestFixK8sNameLimit(t *testing.T) { 83 | if res := fixK8sNameLimit("AA-bb-", 63); res != "aa-bb" { 84 | t.Errorf("invalid k8s:%s", res) 85 | } 86 | if res := fixK8sNameLimit("aa-bb-fddddddddddddDDDDDdfdsfsdfdsfdsere122347985-046mntwensd8yf98", 63); res != "aa-bb-fddddddddddddddddddfdsfsdfdsfdsere122347985-046mntwensd8y" { 87 | t.Errorf("invalid k8s:%s", res) 88 | } 89 | if res := fixK8sNameLimit("aa-bb-fddddddddddddDDDDDdfdsfsdfdsfdsere122347985_046mntwensd--f98", 63); res != "aa-bb-fddddddddddddddddddfdsfsdfdsfdsere122347985-046mntwensd" { 90 | t.Errorf("invalid k8s:%s", res) 91 | } 92 | 93 | } 94 | 95 | func TestGetKubescapeV1ScanURL(t *testing.T) { 96 | cfg := config.NewOperatorConfig(config.CapabilitiesConfig{}, utilsmetadata.ClusterConfig{ 97 | KubescapeURL: "kubescape", 98 | }, &beUtils.Credentials{}, config.Config{}) 99 | u := getKubescapeV1ScanURL(cfg) 100 | assert.Equal(t, "http://kubescape/v1/scan?keep=false", u.String()) 101 | } 102 | 103 | func TestGetKubescapeV1ScanStatusURL(t *testing.T) { 104 | cfg := config.NewOperatorConfig(config.CapabilitiesConfig{}, utilsmetadata.ClusterConfig{ 105 | KubescapeURL: "armo-kubescape:8080", 106 | }, &beUtils.Credentials{}, config.Config{}) 107 | 108 | url := getKubescapeV1ScanStatusURL(cfg, "123").String() 109 | assert.Equal(t, url, "http://armo-kubescape:8080/v1/status?ID=123", "getKubescapeV1ScanStatusURL failed") 110 | } 111 | 112 | func TestAppendSecurityFramework(t *testing.T) { 113 | tests := []struct { 114 | postScanRequest *utilsmetav1.PostScanRequest 115 | expected *utilsmetav1.PostScanRequest 116 | name string 117 | }{ 118 | { 119 | name: "framework scan with one framework ", 120 | postScanRequest: &utilsmetav1.PostScanRequest{TargetType: utilsapisv1.KindFramework, TargetNames: []string{"nsa"}}, 121 | expected: &utilsmetav1.PostScanRequest{TargetType: utilsapisv1.KindFramework, TargetNames: []string{"nsa", "security"}}, 122 | }, 123 | { 124 | name: "framework scan with all", 125 | postScanRequest: &utilsmetav1.PostScanRequest{TargetType: utilsapisv1.KindFramework, TargetNames: []string{"all"}}, 126 | expected: &utilsmetav1.PostScanRequest{TargetType: utilsapisv1.KindFramework, TargetNames: []string{"all", "security"}}, 127 | }, 128 | { 129 | name: "framework scan with security", 130 | postScanRequest: &utilsmetav1.PostScanRequest{TargetType: utilsapisv1.KindFramework, TargetNames: []string{"security"}}, 131 | expected: &utilsmetav1.PostScanRequest{TargetType: utilsapisv1.KindFramework, TargetNames: []string{"security"}}, 132 | }, 133 | { 134 | name: "not framework scan", 135 | postScanRequest: &utilsmetav1.PostScanRequest{TargetType: utilsapisv1.KindControl, TargetNames: []string{"c-0001"}}, 136 | expected: &utilsmetav1.PostScanRequest{TargetType: utilsapisv1.KindControl, TargetNames: []string{"c-0001"}}, 137 | }, 138 | } 139 | 140 | for _, test := range tests { 141 | t.Run(test.name, func(t *testing.T) { 142 | appendSecurityFramework(test.postScanRequest) 143 | assert.Equal(t, test.expected, test.postScanRequest) 144 | }) 145 | } 146 | 147 | } 148 | -------------------------------------------------------------------------------- /utils/utils.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "errors" 7 | "net/http" 8 | "slices" 9 | "strings" 10 | 11 | "github.com/armosec/armoapi-go/apis" 12 | "github.com/armosec/utils-go/httputils" 13 | pkgwlid "github.com/armosec/utils-k8s-go/wlid" 14 | "github.com/kubescape/k8s-interface/instanceidhandler" 15 | instanceidhandlerv1 "github.com/kubescape/k8s-interface/instanceidhandler/v1" 16 | "github.com/kubescape/k8s-interface/k8sinterface" 17 | "github.com/kubescape/k8s-interface/workloadinterface" 18 | "github.com/kubescape/operator/config" 19 | "github.com/panjf2000/ants/v2" 20 | corev1 "k8s.io/api/core/v1" 21 | ) 22 | 23 | const KubescapeScanV1 = "scanV1" 24 | const KubescapeRequestPathV1 = "v1/scan" 25 | const KubescapeRequestStatusV1 = "v1/status" 26 | const ArgdContainerToImageIds = "containerToImageIDs" 27 | const ArgsPod = "pod" 28 | const ArgsContainerData = "containerData" 29 | const ArgsName = "name" 30 | const ArgsNamespace = "namespace" 31 | const dockerPullableURN = "docker-pullable://" 32 | 33 | const CommandScanContainerProfile = "scanContainerProfile" 34 | 35 | func MapToString(m map[string]interface{}) []string { 36 | s := []string{} 37 | for i := range m { 38 | s = append(s, i) 39 | } 40 | return s 41 | } 42 | 43 | type ClientMock struct { 44 | } 45 | 46 | func (c *ClientMock) Do(req *http.Request) (*http.Response, error) { 47 | return &http.Response{ 48 | Status: "200 OK", 49 | StatusCode: 200, 50 | Body: http.NoBody}, nil 51 | } 52 | 53 | func InitHttpClient(url string) httputils.IHttpClient { 54 | // If the url is not configured, then the HttpClient defined as a mock 55 | if url == "" { 56 | return &ClientMock{} 57 | } 58 | return &http.Client{} 59 | } 60 | 61 | func ExtractImageID(imageID string) string { 62 | return strings.TrimPrefix(imageID, dockerPullableURN) 63 | } 64 | 65 | func AddCommandToChannel(ctx context.Context, config config.IConfig, cmd *apis.Command, workerPool *ants.PoolWithFunc) error { 66 | newSessionObj := NewSessionObj(ctx, config, cmd, "", "") 67 | return workerPool.Invoke(Job{ctx: ctx, sessionObj: *newSessionObj}) 68 | } 69 | 70 | func ExtractContainersToImageIDsFromPod(pod *corev1.Pod) map[string]string { 71 | containersToImageIDs := make(map[string]string) 72 | for _, containerStatus := range pod.Status.ContainerStatuses { 73 | if containerStatus.State.Running != nil { 74 | imageID := ExtractImageID(containerStatus.ImageID) 75 | containersToImageIDs[containerStatus.Name] = imageID 76 | } 77 | } 78 | 79 | for _, containerStatus := range pod.Status.InitContainerStatuses { 80 | if containerStatus.State.Running != nil { 81 | imageID := ExtractImageID(containerStatus.ImageID) 82 | containersToImageIDs[containerStatus.Name] = imageID 83 | } 84 | } 85 | 86 | return containersToImageIDs 87 | } 88 | 89 | func PodToContainerData(k8sAPI *k8sinterface.KubernetesApi, pod *corev1.Pod, instanceID instanceidhandler.IInstanceID, clusterName string) (*ContainerData, error) { 90 | 91 | wlid, err := GetParentIDForPod(k8sAPI, pod, clusterName) 92 | if err != nil { 93 | return nil, err 94 | } 95 | slug, _ := instanceID.GetSlug(false) 96 | 97 | imageTag, imageID, ok := getImage(pod, instanceID) 98 | if !ok { 99 | // this should never happen 100 | return nil, errors.New("failed to get image ID") 101 | } 102 | 103 | return &ContainerData{ 104 | ContainerName: instanceID.GetContainerName(), 105 | ImageID: imageID, 106 | Slug: slug, 107 | Wlid: wlid, 108 | ContainerType: string(instanceID.GetInstanceType()), 109 | ImageTag: imageTag, 110 | InstanceID: instanceID.GetStringFormatted(), 111 | }, nil 112 | } 113 | 114 | func GetParentIDForPod(k8sAPI *k8sinterface.KubernetesApi, pod *corev1.Pod, clusterName string) (string, error) { 115 | pod.TypeMeta.Kind = "Pod" 116 | podMarshalled, err := json.Marshal(pod) 117 | if err != nil { 118 | return "", err 119 | } 120 | wl, err := workloadinterface.NewWorkload(podMarshalled) 121 | if err != nil { 122 | return "", err 123 | } 124 | kind, name, err := k8sAPI.CalculateWorkloadParentRecursive(wl) 125 | if kind == "Node" { 126 | return pkgwlid.GetWLID(clusterName, wl.GetNamespace(), wl.GetNamespace(), wl.GetName()), nil 127 | } 128 | if err != nil { 129 | return "", err 130 | } 131 | return pkgwlid.GetWLID(clusterName, wl.GetNamespace(), kind, name), nil 132 | } 133 | 134 | func getImage(pod *corev1.Pod, instanceID instanceidhandler.IInstanceID) (string, string, bool) { 135 | var imageTag, imageID string 136 | switch instanceID.GetInstanceType() { 137 | case instanceidhandlerv1.Container: 138 | imageTag = getImageFromSpec(instanceID, pod.Spec.Containers) 139 | // consider getting imageTag from status 140 | _, imageID = getImageFromStatus(instanceID, pod.Status.ContainerStatuses) 141 | case instanceidhandlerv1.InitContainer: 142 | imageTag = getImageFromSpec(instanceID, pod.Spec.InitContainers) 143 | // consider getting imageTag from status 144 | _, imageID = getImageFromStatus(instanceID, pod.Status.InitContainerStatuses) 145 | // FIXME add ephemeralContainer 146 | } 147 | 148 | if imageTag == "" || imageID == "" { 149 | return "", "", false 150 | } 151 | return imageTag, imageID, true 152 | } 153 | 154 | // returns the image and imageID of the container 155 | func getImageFromStatus(instanceID instanceidhandler.IInstanceID, containerStatuses []corev1.ContainerStatus) (string, string) { 156 | for _, containerStatus := range containerStatuses { 157 | if instanceID.GetContainerName() == containerStatus.Name { 158 | return containerStatus.Image, ExtractImageID(containerStatus.ImageID) 159 | } 160 | } 161 | return "", "" 162 | } 163 | 164 | func getImageFromSpec(instanceID instanceidhandler.IInstanceID, containers []corev1.Container) string { 165 | for _, container := range containers { 166 | if instanceID.GetContainerName() == container.Name { 167 | return container.Image 168 | } 169 | } 170 | return "" 171 | } 172 | 173 | func PodHasParent(pod *corev1.Pod) bool { 174 | if pod == nil { 175 | return false 176 | } 177 | if len(pod.OwnerReferences) > 0 { 178 | return slices.Contains([]string{"apps/v1", "batch/v1", "batch/v1beta1"}, pod.OwnerReferences[0].APIVersion) 179 | } 180 | if podHash, ok := pod.Labels["pod-template-hash"]; ok && podHash != "" { 181 | return true 182 | } 183 | return false 184 | } 185 | --------------------------------------------------------------------------------