├── .github
└── workflows
│ ├── ci.yaml
│ ├── namespaced-system-tests.yaml
│ ├── release-helm-chart.yaml
│ ├── release.yaml
│ └── system-tests.yaml
├── .gitignore
├── Containerfile
├── LICENSE
├── Makefile
├── README.md
├── SECURITY.md
├── artifacts
└── .gitkeep
├── chart
├── demo
│ ├── .helmignore
│ ├── Chart.yaml
│ ├── templates
│ │ ├── _helpers.tpl
│ │ ├── alertmanager-configmap.yaml
│ │ ├── alertmanager-service.yaml
│ │ └── alertmanager.yaml
│ └── values.yaml
└── kubecop
│ ├── .helmignore
│ ├── Chart.yaml
│ ├── charts
│ ├── clustered-crds
│ │ ├── Chart.yaml
│ │ ├── crds
│ │ │ └── runtime-rule-binding.crd.yaml
│ │ └── values.yaml
│ └── namespaced-crds
│ │ ├── Chart.yaml
│ │ ├── crds
│ │ └── runtime-rule-binding.crd.yaml
│ │ └── values.yaml
│ ├── crds
│ └── app-profile.crd.yaml
│ ├── files
│ ├── clamd.conf
│ └── freshclam.conf
│ ├── templates
│ ├── _helpers.tpl
│ ├── clusterrole.yaml
│ ├── clusterrolebinding.yaml
│ ├── configmap.yaml
│ ├── controller-statefulset.yaml
│ ├── deamonset.yaml
│ ├── default-rule-binding.yaml
│ ├── podmonitor.yaml
│ └── serviceaccount.yaml
│ └── values.yaml
├── cmd
├── main.go
├── malicious-exe
│ ├── Containerfile
│ └── malicious.go
├── rule_md_gen
│ └── generate_md.go
└── single-tracer-test
│ ├── build_n_run.sh
│ └── single-trace.go
├── demo
├── README.md
├── assets
│ ├── alertmanager.png
│ ├── fileless-malware.png
│ ├── kubectl.png
│ ├── ls.png
│ ├── malwares.png
│ ├── ping.png
│ ├── pods.png
│ ├── service-account-token.png
│ └── webapp.png
├── fileless_exec
│ ├── kubernetes-manifest.yaml
│ └── kubernetes-manifests.yaml
├── general_attack
│ ├── commands.md
│ └── webapp
│ │ ├── Containerfile
│ │ ├── index.html
│ │ ├── ping-app.yaml
│ │ ├── ping.php
│ │ └── setup.sh
├── malwares_image
│ └── Containerfile
└── miner
│ ├── Containerfile
│ └── miner-pod.yaml
├── dev
├── devpod-clamav.yaml
├── devpod.yaml
└── nginx
│ ├── nginx-app-profile.yaml
│ └── nginx-deployment.yaml
├── docs
├── development.md
└── images
│ └── kubecop-software-design.png
├── go.mod
├── go.sum
├── pkg
├── approfilecache
│ ├── cache.go
│ ├── cache_test.go
│ ├── stats_prometheus.go
│ └── types.go
├── engine
│ ├── apiserver.go
│ ├── container.go
│ ├── container_test.go
│ ├── containeridcache.go
│ ├── engine.go
│ ├── engine_test.go
│ ├── poll.go
│ ├── processing.go
│ ├── router.go
│ ├── rule
│ │ ├── README.md
│ │ ├── factory.go
│ │ ├── factory_test.go
│ │ ├── gen_rule_files.py
│ │ ├── mock.go
│ │ ├── r0001_unexpected_process_launched.go
│ │ ├── r0001_unexpected_process_launched_test.go
│ │ ├── r0002_unexpected_file_access.go
│ │ ├── r0002_unexpected_file_access_test.go
│ │ ├── r0003_unexpected_system_call.go
│ │ ├── r0003_unexpected_system_call_test.go
│ │ ├── r0004_unexpected_capability_used.go
│ │ ├── r0004_unexpected_capability_used_test.go
│ │ ├── r0005_unexpected_domain_request.go
│ │ ├── r0005_unexpected_domain_request_test.go
│ │ ├── r0006_unexpected_service_account_token_access.go
│ │ ├── r0006_unexpected_service_account_token_access_test.go
│ │ ├── r0007_kubernetes_client_executed.go
│ │ ├── r0007_kubernetes_client_executed_test.go
│ │ ├── r1000_exec_from_malicious_source.go
│ │ ├── r1000_exec_from_malicious_source_test.go
│ │ ├── r1001_exec_binary_not_in_base_image.go
│ │ ├── r1001_exec_binary_not_in_base_image_test.go
│ │ ├── r1002_load_kernel_module.go
│ │ ├── r1002_load_kernel_module_test.go
│ │ ├── r1003_malicious_ssh_connection.go
│ │ ├── r1003_malicious_ssh_connection_test.go
│ │ ├── r1004_exec_from_mount.go
│ │ ├── r1004_exec_from_mount_test.go
│ │ ├── r1006_unshare_system_call.go
│ │ ├── r1006_unshare_system_call_test.go
│ │ ├── r1007_crypto_miners.go
│ │ ├── r1007_crypto_miners_test.go
│ │ ├── rule.go
│ │ └── types.go
│ ├── stats.go
│ ├── stats_prometheus.go
│ └── stats_test.go
├── exporters
│ ├── README.md
│ ├── alert_manager.go
│ ├── alert_manager_test.go
│ ├── csv_exporter.go
│ ├── csv_exporter_test.go
│ ├── exporter.go
│ ├── exporters_bus.go
│ ├── http_exporter.go
│ ├── http_exporter_test.go
│ ├── stdout_exporter.go
│ ├── stdout_exporter_test.go
│ ├── syslog_exporter.go
│ ├── syslog_exporter_test.go
│ ├── utils.go
│ └── utils_test.go
├── rulebindingstore
│ ├── README.md
│ ├── store.go
│ ├── store_test.go
│ ├── structures.go
│ └── testdata
│ │ ├── rulebindingsfiles
│ │ ├── all-rules-all-pods.yaml
│ │ ├── all-rules-for-app-nginx-default-ns.yaml
│ │ ├── all-rules-for-app-nginx.yaml
│ │ ├── all-rules-for-default-ns.yaml
│ │ ├── no-rules-for-app-nginx-default-ns.yaml
│ │ └── single-rule-for-app-nginx-default-ns.yaml
│ │ └── ruleparamsfiles
│ │ ├── rule-file-params.yaml
│ │ └── rule-ssh-params.yaml
└── scan
│ ├── clamav
│ ├── clamav.go
│ └── config.go
│ ├── fileutil.go
│ ├── k8sutil.go
│ ├── overlay.go
│ └── types.go
├── resources
├── clamav
│ ├── Dockerfile
│ ├── create-filtered-clam-db.sh
│ └── init.sh
├── latest
│ └── kubecop-values.yaml
└── system-tests
│ └── kubecop-values.yaml
├── scripts
├── install-in-pod.sh
├── open-shell-in-pod.sh
├── resolve-pools-addresses.sh
├── run-system-tests.sh
├── setup-system-test-cluster.sh
└── validate-crd.sh
└── system-tests
├── all_alerts_from_malicious_app.py
├── basic_alert_tests.py
├── basic_load_activities.py
├── change_rule_binding_in_the_middle.py
├── crashlooping-container
└── crashlooping-deploy.yaml
├── creation_app_profile_memory_leak.py
├── finalization_alert_test.py
├── kill_in_the_middle.py
├── kubernetes_wrappers.py
├── load_10k_alerts_no_memory.py
├── load_app_by_app_no_memory_cpu_load.py
├── locustimage
├── Containerfile
└── locustfile.py
├── pprof.py
├── promtopic.py
├── requirements.txt
├── resources
├── locust-deployment.yaml
├── malicious-job-app-profile-namespaced.yaml
├── malicious-job-app-profile.yaml
├── malicious-job.yaml
├── nginx-app-profile-namespaced.yaml
├── nginx-app-profile.yaml
├── nginx-deployment.yaml
└── nginx-service.yaml
├── rule_binding_apply_test.py
├── rule_binding_crds_files
├── all-valid.yaml
├── dup-fields-id-tag.yaml
├── dup-fields-name-id.yaml
├── dup-fields-name-tag.yaml
├── invalid-id.yaml
├── invalid-name.yaml
└── invalid-tag.yaml
└── run.py
/.github/workflows/ci.yaml:
--------------------------------------------------------------------------------
1 | name: KubeCop CI
2 |
3 | on:
4 | pull_request:
5 | paths-ignore:
6 | - '**/*.md'
7 | branches:
8 | - main
9 |
10 |
11 | jobs:
12 | build:
13 | name: Build and Test
14 | runs-on: ubuntu-latest
15 |
16 | steps:
17 | - name: Checkout code
18 | uses: actions/checkout@v4
19 |
20 | - name: Set up Go
21 | uses: actions/setup-go@v4
22 | with:
23 | go-version: '^1.21'
24 |
25 | - name: Validate binding CRD is up to date
26 | run: make validate-crd
27 | env:
28 | GO111MODULE: on
29 |
30 | - name: Set up Helm
31 | uses: azure/setup-helm@v3
32 |
33 | - name: Validate Helm template
34 | run: |
35 | helm template kubecop chart/kubecop --set kubecop.recording.finalizationDuration=120s -f resources/system-tests/kubecop-values.yaml -n kubescape --debug
36 |
37 | - name: Validate Namespaced Helm template
38 | run: |
39 | helm template kubecop chart/kubecop --set isNamespaced=true --set isClusterScoped=false --set kubecop.recording.finalizationDuration=120s -f resources/system-tests/kubecop-values.yaml -n kubescape --debug
40 |
41 | - name: Run Unit Tests
42 | run: make test
43 | env:
44 | GO111MODULE: on
45 |
46 | - name: Print Coverage Report
47 | run: go tool cover -func=coverage.out
48 |
49 | - name: Build Project
50 | run: make build
51 | env:
52 | GO111MODULE: on
53 |
54 | - name: Build Container Image
55 | run: make build-image
56 | env:
57 | GO111MODULE: on
58 |
--------------------------------------------------------------------------------
/.github/workflows/release-helm-chart.yaml:
--------------------------------------------------------------------------------
1 | name: Helm chart release
2 |
3 | on:
4 | workflow_dispatch:
5 |
6 | workflow_call:
7 | inputs:
8 | COMMIT_REF:
9 | required: true
10 | type: string
11 |
12 | jobs:
13 | helm-chart-release:
14 | runs-on: ubuntu-latest
15 | steps:
16 | - name: Checkout
17 | uses: actions/checkout@v3
18 | with:
19 | fetch-depth: 0
20 | # will change to: ref: release
21 |
22 | - name: git status
23 | run: git status
24 |
25 | - name: Configure Git
26 | run: |
27 | git config user.name "$GITHUB_ACTOR"
28 | git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
29 |
30 | - name: Install Helm
31 | uses: azure/setup-helm@v3.5
32 |
33 | - name: Run chart-releaser
34 | uses: helm/chart-releaser-action@v1.6.0
35 | with:
36 | charts_dir: "chart"
37 | env:
38 | CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
39 |
40 |
--------------------------------------------------------------------------------
/.github/workflows/release.yaml:
--------------------------------------------------------------------------------
1 | name: Release Workflow
2 |
3 | on:
4 | push:
5 | tags:
6 | - 'v*'
7 |
8 | jobs:
9 | build-and-push:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - name: Checkout code
13 | uses: actions/checkout@v4
14 |
15 | - name: Set up Docker Buildx
16 | uses: docker/setup-buildx-action@v3
17 |
18 | - name: Login to Quay.io
19 | uses: docker/login-action@v3
20 | with:
21 | registry: quay.io/armosec
22 | username: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
23 | password: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }}
24 |
25 | - name: Extract tag name
26 | run: echo "TAG_NAME=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
27 |
28 | - name: Build and push
29 | uses: docker/build-push-action@v2
30 | with:
31 | context: .
32 | file: ./Containerfile
33 | push: true
34 | tags: quay.io/armosec/kubecop:${{ env.TAG_NAME }}
35 | platforms: linux/amd64,linux/arm64
36 |
37 | create-release:
38 | needs: build-and-push
39 | runs-on: ubuntu-latest
40 | steps:
41 | - name: Create Release
42 | uses: actions/create-release@v1
43 | env:
44 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
45 | with:
46 | tag_name: ${{ github.ref }}
47 | release_name: Release ${{ github.ref }}
48 | draft: true
49 | prerelease: true
50 | body: "Autogenerated release for ${{ github.ref }}"
51 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | ### Go ###
2 | # If you prefer the allow list template instead of the deny list, see community template:
3 | # https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
4 | #
5 | # Binaries for programs and plugins
6 | *.exe
7 | *.exe~
8 | *.dll
9 | *.so
10 | *.dylib
11 |
12 | # Test binary, built with `go test -c`
13 | *.test
14 |
15 | # Output of the go coverage tool, specifically when used with LiteIDE
16 | *.out
17 |
18 | # Dependency directories (remove the comment below to include it)
19 | # vendor/
20 |
21 | # Go workspace file
22 | go.work
23 |
24 | ### Linux ###
25 | *~
26 |
27 | # temporary files which can be created if a process still has a handle open of a deleted file
28 | .fuse_hidden*
29 |
30 | # KDE directory preferences
31 | .directory
32 |
33 | # Linux trash folder which might appear on any partition or disk
34 | .Trash-*
35 |
36 | # .nfs files are created when an open file is removed but is still being accessed
37 | .nfs*
38 |
39 | ### VisualStudioCode ###
40 | .vscode/*
41 | !.vscode/settings.json
42 | !.vscode/tasks.json
43 | !.vscode/launch.json
44 | !.vscode/extensions.json
45 | !.vscode/*.code-snippets
46 |
47 | # Local History for Visual Studio Code
48 | .history/
49 |
50 | # Built Visual Studio Code Extensions
51 | *.vsix
52 |
53 | ### VisualStudioCode Patch ###
54 | # Ignore all local history of files
55 | .history
56 | .ionide
57 |
58 | /kubecop
59 | cop_pids.txt
60 | am-values.yaml
61 |
62 | *.pyc
63 | /*.png
64 | /tmp
65 | *.gz
66 | /single-trace
67 | *.pprof
68 | /subprojects
--------------------------------------------------------------------------------
/Containerfile:
--------------------------------------------------------------------------------
1 | FROM golang:1.21.1-alpine3.18 as builder
2 | RUN apk add --no-cache git
3 | WORKDIR /go/src/app
4 | COPY . .
5 | RUN CGO_ENABLED=0 GOOS=linux go build -o kubecop ./cmd/main.go
6 |
7 | FROM alpine:3.18
8 | COPY --from=builder /go/src/app/kubecop /kubecop
9 | ENTRYPOINT ["/kubecop"]
10 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | # Variables
2 | GOCMD = go
3 | GOBUILD_ENVS = CGO_ENABLED=0 GOOS=linux GOARCH=amd64
4 | GOBUILD = $(GOCMD) build
5 | GOCLEAN = $(GOCMD) clean
6 | GOTEST = $(GOCMD) test
7 | GOTEST_SUDO_PREFIX = sudo --preserve-env=HOME --preserve-env=GOPATH
8 | GOGET = $(GOCMD) get
9 | BINARY_NAME = kubecop
10 | GOFILES = $(shell find . -type f -name '*.go')
11 |
12 | # Take image name from env variable or use default
13 | IMAGE_NAME ?= kubecop:latest
14 |
15 |
16 | $(BINARY_NAME): $(GOFILES) go.mod go.sum Makefile
17 | CGO_ENABLED=0 go build -o $(BINARY_NAME) cmd/main.go
18 |
19 | test:
20 | $(GOTEST) -v ./... -coverprofile=coverage.out
21 |
22 | install: $(BINARY_NAME)
23 | ./scripts/install-in-pod.sh $(BINARY_NAME)
24 |
25 | open-shell:
26 | ./scripts/open-shell-in-pod.sh
27 |
28 | close-shell:
29 | cat cop_pids.txt | xargs kill -15
30 |
31 | deploy-dev-pod:
32 | kubectl apply -f chart/kubecop/crds/app-profile.crd.yaml -f chart/kubecop/charts/clustered-crds/crds/runtime-rule-binding.crd.yaml
33 | kubectl apply -f dev/devpod.yaml
34 |
35 | build: $(BINARY_NAME)
36 |
37 | build-image: $(GOFILES) go.mod go.sum Makefile
38 | docker build -t $(IMAGE_NAME) -f ./Containerfile .
39 |
40 | build-image-and-push: build-image
41 | docker push $(IMAGE_NAME)
42 |
43 | clean:
44 | rm -f $(BINARY_NAME)
45 |
46 | validate-crd:
47 | ./scripts/validate-crd.sh
48 |
49 | all: $(BINARY_NAME)
50 |
51 | .PHONY: clean all install deploy-dev-pod test open-shell build validate-crd
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | # Security Policy
2 |
3 | # Reporting Security Issues
4 |
5 | To report a security issue or vulnerability, submit a [private vulnerability report via GitHub](https://github.com/armosec/kubecop/security/advisories/new) to the repository maintainers with a description of the issue, the steps you took to create the issue, affected versions, and, if known, mitigations for the issue.
6 |
7 | The maintainers will respond within 7 working days of your report. If the issue is confirmed as a vulnerability, we will open a Security Advisory and acknowledge your contributions as part of it. This project follows a 90 day disclosure timeline.
8 |
9 | Other contacts: cncf-kubescape-maintainers@lists.cncf.io
10 |
--------------------------------------------------------------------------------
/artifacts/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/armosec/kubecop/758dd96345c315e269291d120a9a02766a9fa314/artifacts/.gitkeep
--------------------------------------------------------------------------------
/chart/demo/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *.orig
18 | *~
19 | # Various IDEs
20 | .project
21 | .idea/
22 | *.tmproj
23 | .vscode/
24 |
--------------------------------------------------------------------------------
/chart/demo/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: kubecop-alertmanager
3 | description: A Helm chart for kubecop-alertmanager
4 |
5 | type: application
6 |
7 | version: 0.0.1
8 |
9 | appVersion: "0.0.1"
10 |
--------------------------------------------------------------------------------
/chart/demo/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/*
2 | Expand the name of the chart.
3 | */}}
4 | {{- define "..name" -}}
5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
6 | {{- end }}
7 |
8 | {{/*
9 | Create a default fully qualified app name.
10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
11 | If release name contains chart name it will be used as a full name.
12 | */}}
13 | {{- define "..fullname" -}}
14 | {{- if .Values.fullnameOverride }}
15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
16 | {{- else }}
17 | {{- $name := default .Chart.Name .Values.nameOverride }}
18 | {{- if contains $name .Release.Name }}
19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }}
20 | {{- else }}
21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
22 | {{- end }}
23 | {{- end }}
24 | {{- end }}
25 |
26 | {{/*
27 | Create chart name and version as used by the chart label.
28 | */}}
29 | {{- define "..chart" -}}
30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
31 | {{- end }}
32 |
33 | {{/*
34 | Common labels
35 | */}}
36 | {{- define "..labels" -}}
37 | helm.sh/chart: {{ include "..chart" . }}
38 | {{ include "..selectorLabels" . }}
39 | {{- if .Chart.AppVersion }}
40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
41 | {{- end }}
42 | app.kubernetes.io/managed-by: {{ .Release.Service }}
43 | {{- end }}
44 |
45 | {{/*
46 | Selector labels
47 | */}}
48 | {{- define "..selectorLabels" -}}
49 | app.kubernetes.io/name: {{ include "..name" . }}
50 | app.kubernetes.io/instance: {{ .Release.Name }}
51 | {{- end }}
52 |
53 | {{/*
54 | Create the name of the service account to use
55 | */}}
56 | {{- define "..serviceAccountName" -}}
57 | {{- if .Values.serviceAccount.create }}
58 | {{- default (include "..fullname" .) .Values.serviceAccount.name }}
59 | {{- else }}
60 | {{- default "default" .Values.serviceAccount.name }}
61 | {{- end }}
62 | {{- end }}
63 |
--------------------------------------------------------------------------------
/chart/demo/templates/alertmanager-configmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: alertmanager-config
5 | namespace: kubescape
6 | data:
7 | alertmanager.yml: |
8 | global:
9 | smtp_smarthost: {{ .Values.kubecop.alertmanager.smtp_endpoint }}
10 | smtp_from: '{{ .Values.kubecop.alertmanager.smtp_from }}'
11 | smtp_auth_username: '{{ .Values.kubecop.alertmanager.smtp_auth_username }}'
12 | smtp_auth_password: '{{ .Values.kubecop.alertmanager.smtp_auth_password }}'
13 | smtp_auth_secret: '{{ .Values.kubecop.alertmanager.smtp_auth_secret }}'
14 | smtp_auth_identity: '{{ .Values.kubecop.alertmanager.smtp_auth_identity }}'
15 | smtp_require_tls: {{ .Values.kubecop.alertmanager.smtp_require_tls }}
16 |
17 | route:
18 | group_by: ['alertname']
19 | receiver: 'email-notifications'
20 |
21 | receivers:
22 | - name: 'email-notifications'
23 | email_configs:
24 | - to: '{{ .Values.kubecop.alertmanager.email_to }}'
25 |
--------------------------------------------------------------------------------
/chart/demo/templates/alertmanager-service.yaml:
--------------------------------------------------------------------------------
1 | # alertmanager-service.yaml
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: alertmanager-kubecop-service
6 | namespace: kubescape
7 | spec:
8 | selector:
9 | app: alertmanager-kubecop # This should match the labels of your Alertmanager pods
10 | ports:
11 | - protocol: TCP
12 | port: 9093 # The port on which Alertmanager is running
13 | targetPort: 9093 # The port to forward traffic to on the selected pods
14 |
--------------------------------------------------------------------------------
/chart/demo/templates/alertmanager.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: alertmanager-kubecop
5 | namespace: kubescape
6 | labels:
7 | app: alertmanager-kubecop
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: alertmanager-kubecop
13 | template:
14 | metadata:
15 | labels:
16 | app: alertmanager-kubecop
17 | spec:
18 | containers:
19 | - name: alertmanager
20 | image: quay.io/prometheus/alertmanager:latest
21 | imagePullPolicy: Always
22 | ports:
23 | - containerPort: 9093
24 | volumeMounts: # Mount the Alertmanager configuration
25 | - name: alertmanager-config
26 | mountPath: /etc/alertmanager
27 | volumes:
28 | - name: alertmanager-config
29 | configMap:
30 | name: alertmanager-config
31 |
--------------------------------------------------------------------------------
/chart/demo/values.yaml:
--------------------------------------------------------------------------------
1 | # Default values for ..
2 | # This is a YAML-formatted file.
3 | # Declare variables to be passed into your templates.
4 |
5 | kubecop:
6 | alertmanager:
7 | smtp_endpoint: ""
8 | smtp_from: ""
9 | smtp_auth_username: ""
10 | smtp_auth_password: ""
11 | smtp_require_tls: false
12 | email_to: ""
13 |
14 |
--------------------------------------------------------------------------------
/chart/kubecop/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *.orig
18 | *~
19 | # Various IDEs
20 | .project
21 | .idea/
22 | *.tmproj
23 | .vscode/
24 |
--------------------------------------------------------------------------------
/chart/kubecop/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: kubecop
3 | description: A Helm chart for Kubernetes Runtime Security detection system KubeCop
4 |
5 | type: application
6 |
7 | version: 0.0.40
8 |
9 | appVersion: "0.0.40"
10 |
11 |
12 | dependencies:
13 | - name: kubecop-clustered-crds
14 | version: 0.0.1
15 | repository: "file://./charts/clustered-crds"
16 | condition: isClusterScoped
17 | - name: kubecop-namespaced-crds
18 | version: 0.0.1
19 | repository: "file://./charts/namespaced-crds"
20 | condition: isNamespaced
--------------------------------------------------------------------------------
/chart/kubecop/charts/clustered-crds/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: kubecop-clustered-crds
3 | description: A Helm chart for clustered CRDs required by Kubernetes Runtime Security detection system KubeCop
4 |
5 | type: application
6 |
7 | version: 0.0.1
8 |
9 | appVersion: "0.0.1"
10 |
--------------------------------------------------------------------------------
/chart/kubecop/charts/clustered-crds/values.yaml:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/armosec/kubecop/758dd96345c315e269291d120a9a02766a9fa314/chart/kubecop/charts/clustered-crds/values.yaml
--------------------------------------------------------------------------------
/chart/kubecop/charts/namespaced-crds/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: kubecop-namespaced-crds
3 | description: A Helm chart for namespaced CRDs required by Kubernetes Runtime Security detection system KubeCop
4 |
5 | type: application
6 |
7 | version: 0.0.1
8 |
9 | appVersion: "0.0.1"
10 |
--------------------------------------------------------------------------------
/chart/kubecop/charts/namespaced-crds/values.yaml:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/armosec/kubecop/758dd96345c315e269291d120a9a02766a9fa314/chart/kubecop/charts/namespaced-crds/values.yaml
--------------------------------------------------------------------------------
/chart/kubecop/crds/app-profile.crd.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apiextensions.k8s.io/v1
2 | kind: CustomResourceDefinition
3 | metadata:
4 | name: applicationprofiles.kubescape.io
5 | spec:
6 | group: kubescape.io
7 | versions:
8 | - name: v1
9 | served: true
10 | storage: true
11 | schema:
12 | openAPIV3Schema:
13 | type: object
14 | properties:
15 | spec:
16 | type: object
17 | properties:
18 | containers:
19 | type: array
20 | items:
21 | type: object
22 | properties:
23 | syscalls:
24 | type: array
25 | items:
26 | type: string
27 | dns:
28 | type: array
29 | items:
30 | type: object
31 | properties:
32 | dnsName:
33 | type: string
34 | addresses:
35 | type: array
36 | items:
37 | type: string
38 | capabilities:
39 | type: array
40 | items:
41 | type: object
42 | properties:
43 | caps:
44 | type: array
45 | items:
46 | type: string
47 | syscall:
48 | type: string
49 | execs:
50 | type: array
51 | items:
52 | type: object
53 | properties:
54 | args:
55 | type: array
56 | items:
57 | type: string
58 | envs:
59 | type: array
60 | items:
61 | type: string
62 | path:
63 | type: string
64 | name:
65 | type: string
66 | opens:
67 | type: array
68 | items:
69 | type: object
70 | properties:
71 | path:
72 | type: string
73 | taskName:
74 | type: string
75 | taskId:
76 | type: integer
77 | flags:
78 | type: array
79 | items:
80 | type: string
81 | networkActivity:
82 | type: object
83 | properties:
84 | incoming:
85 | type: array
86 | items:
87 | type: object
88 | properties:
89 | port:
90 | type: integer
91 | protocol:
92 | type: string
93 | dstEndpoint:
94 | type: string
95 | outgoing:
96 | type: array
97 | items:
98 | type: object
99 | properties:
100 | port:
101 | type: integer
102 | protocol:
103 | type: string
104 | dstEndpoint:
105 | type: string
106 | scope: Namespaced
107 | names:
108 | plural: applicationprofiles
109 | singular: applicationprofile
110 | kind: ApplicationProfile
111 |
--------------------------------------------------------------------------------
/chart/kubecop/files/clamd.conf:
--------------------------------------------------------------------------------
1 | Foreground yes
2 | Debug no
3 | LeaveTemporaryFiles no
4 | LogTime yes
5 | LogClean yes
6 | LocalSocket /run/clamav/clamd.sock
7 | LocalSocketGroup clamav
8 | LocalSocketMode 660
9 | TCPSocket 3310
10 | TCPAddr 127.0.0.1
11 | ExcludePath ^/proc/
12 | ExcludePath ^/sys/
13 | ExcludePath ^/host/proc/
14 | ExcludePath ^/host/sys/
15 | ExcludePath ^/host/dev/
16 | ExcludePath ^/host/run/
17 | MaxDirectoryRecursion 20
18 | ExtendedDetectionInfo yes
19 |
--------------------------------------------------------------------------------
/chart/kubecop/files/freshclam.conf:
--------------------------------------------------------------------------------
1 | Foreground yes
2 | DatabaseDirectory /var/lib/clamav
3 | UpdateLogFile /dev/stdout
4 | Debug no
5 | LogFileMaxSize 0
6 | LogTime yes
7 | DatabaseOwner root
8 | DNSDatabaseInfo current.cvd.clamav.net
9 | DatabaseMirror database.clamav.net
10 | MaxAttempts 3
11 | ScriptedUpdates no
12 | CompressLocalDatabase no
13 | TestDatabases yes
14 | Checks 0
15 | ConnectTimeout 30
16 | ReceiveTimeout 60
17 |
--------------------------------------------------------------------------------
/chart/kubecop/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 |
2 | {{/* validate isClusterScoped and isNamespaced are mutual exclusive */}}
3 | {{- if and .Values.isClusterScoped .Values.isNamespaced }}
4 | {{- fail "isClusterScoped and isNamespaced cannot both be true" }}
5 | {{- end }}
6 |
7 | {{/*
8 | Expand the name of the chart.
9 | */}}
10 | {{- define "..name" -}}
11 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
12 | {{- end }}
13 |
14 | {{/*
15 | Create a default fully qualified app name.
16 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
17 | If release name contains chart name it will be used as a full name.
18 | */}}
19 | {{- define "..fullname" -}}
20 | {{- if .Values.fullnameOverride }}
21 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
22 | {{- else }}
23 | {{- $name := default .Chart.Name .Values.nameOverride }}
24 | {{- if contains $name .Release.Name }}
25 | {{- .Release.Name | trunc 63 | trimSuffix "-" }}
26 | {{- else }}
27 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
28 | {{- end }}
29 | {{- end }}
30 | {{- end }}
31 |
32 | {{/*
33 | Create chart name and version as used by the chart label.
34 | */}}
35 | {{- define "..chart" -}}
36 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
37 | {{- end }}
38 |
39 | {{/*
40 | Common labels
41 | */}}
42 | {{- define "..labels" -}}
43 | helm.sh/chart: {{ include "..chart" . }}
44 | {{ include "..selectorLabels" . }}
45 | {{- if .Chart.AppVersion }}
46 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
47 | {{- end }}
48 | app.kubernetes.io/managed-by: {{ .Release.Service }}
49 | {{- end }}
50 |
51 | {{/*
52 | Selector labels
53 | */}}
54 | {{- define "..selectorLabels" -}}
55 | app.kubernetes.io/name: {{ include "..name" . }}
56 | app.kubernetes.io/instance: {{ .Release.Name }}
57 | {{- end }}
58 |
59 | {{/*
60 | Create the name of the service account to use
61 | */}}
62 | {{- define "..serviceAccountName" -}}
63 | {{- if .Values.serviceAccount.create }}
64 | {{- default (include "..fullname" .) .Values.serviceAccount.name }}
65 | {{- else }}
66 | {{- default "default" .Values.serviceAccount.name }}
67 | {{- end }}
68 | {{- end }}
69 |
--------------------------------------------------------------------------------
/chart/kubecop/templates/clusterrole.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: {{ include "..fullname" . }}
5 | rules:
6 | - apiGroups: [""]
7 | resources: ["namespaces", "pods", "serviceaccounts", "services"]
8 | verbs: ["list", "get", "watch"]
9 | - apiGroups: ["apps","batch","extensions"]
10 | resources: ["*"]
11 | verbs: ["get"]
12 | - apiGroups: ["kubescape.io"]
13 | resources: ["applicationprofiles", "namespaces/*/*", "namespaces/*/applicationprofiles/*"]
14 | verbs: ["watch", "create", "update", "get", "list", "delete", "patch"]
15 | - apiGroups: ["kubescape.io"]
16 | resources: ["runtimerulealertbindings"]
17 | verbs: ["list", "watch"]
18 |
--------------------------------------------------------------------------------
/chart/kubecop/templates/clusterrolebinding.yaml:
--------------------------------------------------------------------------------
1 | kind: ClusterRoleBinding
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | name: {{ include "..serviceAccountName" . }}-binding
5 | subjects:
6 | - kind: ServiceAccount
7 | name: {{ include "..serviceAccountName" . }}
8 | namespace: {{ .Release.Namespace }}
9 | roleRef:
10 | kind: ClusterRole
11 | name: {{ include "..fullname" . }}
12 | apiGroup: rbac.authorization.k8s.io
--------------------------------------------------------------------------------
/chart/kubecop/templates/configmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: {{ .Release.Name }}-clamav
5 | namespace: {{ .Release.Namespace }}
6 | data:
7 | clamd.conf: |-
8 | {{ .Files.Get "files/clamd.conf" | indent 4 }}
9 | freshclam.conf: |-
10 | {{ .Files.Get "files/freshclam.conf" | indent 4 }}
11 |
--------------------------------------------------------------------------------
/chart/kubecop/templates/controller-statefulset.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: StatefulSet
3 | metadata:
4 | name: {{ include "..fullname" . }}-controller
5 | labels:
6 | {{- include "..labels" . | nindent 4 }}
7 | spec:
8 | serviceName: {{ include "..fullname" . }}-controller
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | {{- include "..selectorLabels" . | nindent 6 }}
13 | template:
14 | metadata:
15 | annotations:
16 | {{- with .Values.podAnnotations }}
17 | {{- toYaml . | nindent 8 }}
18 | {{- end }}
19 | labels:
20 | {{- include "..selectorLabels" . | nindent 8 }}
21 | spec:
22 | {{- with .Values.imagePullSecrets }}
23 | imagePullSecrets:
24 | {{- toYaml . | nindent 8 }}
25 | {{- end }}
26 | serviceAccountName: {{ include "..serviceAccountName" . }}
27 | securityContext:
28 | {{- toYaml .Values.podSecurityContext | nindent 8 }}
29 | containers:
30 | - name: kubecop-controller
31 | terminationMessagePolicy: FallbackToLogsOnError
32 | image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
33 | imagePullPolicy: {{ .Values.image.pullPolicy }}
34 | args:
35 | - --mode-controller
36 | securityContext:
37 | {{- toYaml .Values.securityContextNormal | nindent 12 }}
38 | resources:
39 | {{- toYaml .Values.kubecop.resources | nindent 12 }}
40 | env:
41 | {{- if .Values.isNamespaced }}
42 | - name: STORE_NAMESPACE
43 | valueFrom:
44 | fieldRef:
45 | fieldPath: metadata.namespace
46 | {{- end }}
47 | - name: NODE_NAME
48 | valueFrom:
49 | fieldRef:
50 | fieldPath: spec.nodeName
51 | - name: POD_NAME
52 | valueFrom:
53 | fieldRef:
54 | fieldPath: metadata.name
55 | tolerations:
56 | {{- toYaml .Values.tolerations | nindent 8 }}
57 |
--------------------------------------------------------------------------------
/chart/kubecop/templates/default-rule-binding.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.installDefaultAlertRuleBinding }}
2 | apiVersion: kubescape.io/v1
3 | kind: RuntimeRuleAlertBinding
4 | metadata:
5 | {{- if .Values.isNamespaced }}
6 | namespace: {{ .Release.Namespace }}
7 | {{- end }}
8 | name: all-rules-all-pods
9 | spec:
10 | namespaceSelector:
11 | # exclude K8s system namespaces
12 | matchExpressions:
13 | - key: "kubernetes.io/metadata.name"
14 | operator: "NotIn"
15 | values:
16 | - "kube-system"
17 | - "kube-public"
18 | - "kube-node-lease"
19 | - "kubeconfig"
20 | podSelector:
21 | matchExpressions:
22 | - key: "app.kubernetes.io/name"
23 | operator: "NotIn"
24 | values:
25 | - {{ include "..name" . }}
26 | rules:
27 | - ruleName: "Unexpected process launched"
28 | - ruleName: "Unexpected file access"
29 | parameters:
30 | ignoreMounts: true
31 | ignorePrefixes: ["/proc", "/run/secrets/kubernetes.io/serviceaccount", "/var/run/secrets/kubernetes.io/serviceaccount", "/tmp"]
32 | - ruleName: "Unexpected system call"
33 | - ruleName: "Unexpected capability used"
34 | - ruleName: "Unexpected domain request"
35 | - ruleName: "Unexpected Service Account Token Access"
36 | - ruleName: "Kubernetes Client Executed"
37 | - ruleName: "Exec from malicious source"
38 | - ruleName: "Kernel Module Load"
39 | - ruleName: "Exec Binary Not In Base Image"
40 | - ruleName: "Malicious SSH Connection"
41 | - ruleName: "Crypto Miner detected"
42 | - ruleName: "Exec from mount"
43 |
44 | {{- end }}
--------------------------------------------------------------------------------
/chart/kubecop/templates/podmonitor.yaml:
--------------------------------------------------------------------------------
1 | # If prometheus enabled, add prometheus exporter
2 | {{- if .Values.kubecop.prometheusExporter.enabled }}
3 | apiVersion: monitoring.coreos.com/v1
4 | kind: PodMonitor
5 | metadata:
6 | labels:
7 | release: prometheus
8 | name: kubecop-pod-monitor
9 | namespace: kubescape
10 | spec:
11 | podMetricsEndpoints:
12 | - port: http-metrics
13 | selector:
14 | matchLabels:
15 | app.kubernetes.io/name: kubecop
16 | {{ end }}
--------------------------------------------------------------------------------
/chart/kubecop/templates/serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.serviceAccount.create -}}
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: {{ include "..serviceAccountName" . }}
6 | labels:
7 | {{- include "..labels" . | nindent 4 }}
8 | {{- with .Values.serviceAccount.annotations }}
9 | annotations:
10 | {{- toYaml . | nindent 4 }}
11 | {{- end }}
12 | {{- end }}
13 |
--------------------------------------------------------------------------------
/chart/kubecop/values.yaml:
--------------------------------------------------------------------------------
1 | # Default values for ..
2 | # This is a YAML-formatted file.
3 | # Declare variables to be passed into your templates.
4 |
5 | image:
6 | repository: quay.io/armosec/kubecop
7 | pullPolicy: Always
8 | tag: "v0.0.40"
9 |
10 | imagePullSecrets: []
11 | nameOverride: ""
12 | fullnameOverride: "kubecop"
13 |
14 | kubecop:
15 | resources:
16 | limits:
17 | cpu: 500m
18 | memory: 512Mi
19 | requests:
20 | cpu: 100m
21 | memory: 256Mi
22 | gomemlimit:
23 | enabled: true
24 | # It is recommended to set this value to 3/4 of the memory limit
25 | limit: 384MiB
26 | recording:
27 | samplingInterval: 60s
28 | finalizationDuration: 900s
29 | finalizationJitter: 120s
30 | alertmanager:
31 | enabled: false
32 | endpoints: "localhost:9093"
33 | httpEndpoint:
34 | enabled: false
35 | url: "http://synchronizer.kubescape.svc.cluster.local/apis/v1/kubescape.io/v1/RuntimeAlerts"
36 | syslog:
37 | enabled: false
38 | endpoint: "localhost:514"
39 | protocol: "udp"
40 | csv:
41 | enabled: false
42 | path: "/tmp/kubecop.csv"
43 | malwarePath: "/tmp/kubecop-malware.csv"
44 | prometheusExporter:
45 | enabled: false
46 | pprofserver:
47 | enabled: false
48 | partialProfiles: # If enabled, application profiles won't be enriched with the following prefixes and mounts.
49 | enabled: true
50 | ignoreMounts: "true"
51 | ignorePrefixes: "/proc,/tmp"
52 |
53 | clamAV:
54 | enabled: false
55 | host: "localhost"
56 | port: "3310"
57 | path: "/host"
58 | scanInterval: 1h
59 | image:
60 | repository: quay.io/armosec/klamav
61 | pullPolicy: Always
62 | tag: "beta5"
63 | resources:
64 | limits:
65 | cpu: 300m
66 | memory: 512Mi
67 | requests:
68 | cpu: 100m
69 | memory: 256Mi
70 |
71 | serviceAccount:
72 | # Specifies whether a service account should be created
73 | create: true
74 | # Annotations to add to the service account
75 | annotations: {}
76 | # The name of the service account to use.
77 | # If not set and create is true, a name is generated using the fullname template
78 | name: "kubecop"
79 |
80 | podAnnotations: {}
81 |
82 | podSecurityContext: {}
83 |
84 | securityContext:
85 | privileged: true
86 | capabilities:
87 | add:
88 | - SYS_ADMIN
89 | - NET_ADMIN
90 |
91 | securityContextNormal: {}
92 |
93 | nodeSelector: {}
94 |
95 | tolerations:
96 | - effect: NoSchedule
97 | operator: Exists
98 | - effect: NoExecute
99 | operator: Exists
100 |
101 | affinity:
102 | nodeAffinity:
103 | requiredDuringSchedulingIgnoredDuringExecution:
104 | nodeSelectorTerms:
105 | - matchExpressions:
106 | - key: kubernetes.io/os
107 | operator: In
108 | values:
109 | - linux
110 | - key: kubernetes.io/arch
111 | operator: In
112 | values:
113 | - arm64
114 | - amd64
115 |
116 | # default rule binding to catch all pods
117 | installDefaultAlertRuleBinding: true
118 |
119 | isNamespaced: false
120 | isClusterScoped: true
121 |
--------------------------------------------------------------------------------
/cmd/malicious-exe/Containerfile:
--------------------------------------------------------------------------------
1 | FROM golang:1.21.1-alpine3.18 as builder
2 | WORKDIR /go/src/app
3 | COPY . .
4 | RUN CGO_ENABLED=0 GOOS=linux go build -o malicious cmd/malicious-exe/malicious.go
5 |
6 | FROM alpine:3.18
7 | COPY --from=builder /go/src/app/malicious /malicious
8 | ENTRYPOINT ["/malicious"]
9 |
--------------------------------------------------------------------------------
/cmd/single-tracer-test/build_n_run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | go build single-tracer.go || exit 1
3 | kubectl apply -f ../../dev/dev-daemonset.yaml
4 | POD_NAME=$(kubectl -n tracer-example get pods -l k8s-app=tracer-example -o jsonpath="{.items[0].metadata.name}")
5 | kubectl cp single-tracer tracer-example/$POD_NAME:/bin/single-tracer
6 | kubectl exec -n tracer-example -it $POD_NAME -- /bin/single-tracer
--------------------------------------------------------------------------------
/cmd/single-tracer-test/single-trace.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "os"
5 | "os/signal"
6 | "syscall"
7 |
8 | "net/http"
9 | _ "net/http/pprof"
10 |
11 | log "github.com/sirupsen/logrus"
12 |
13 | containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection"
14 | "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection/networktracer"
15 | tracernetwork "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/network/tracer"
16 | tracernetworktype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/network/types"
17 | tracercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/tracer-collection"
18 | eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types"
19 | "github.com/inspektor-gadget/inspektor-gadget/pkg/utils/host"
20 | )
21 |
22 | var callbackCount int64
23 |
24 | func networkEventCallback(event *tracernetworktype.Event) {
25 | if event.Type == eventtypes.NORMAL {
26 | callbackCount++
27 | if callbackCount%100000 == 0 {
28 | log.Printf("callbackCount: %d\n", callbackCount)
29 | }
30 | }
31 | }
32 |
33 | func main() {
34 |
35 | go func() {
36 | log.Println(http.ListenAndServe("localhost:6060", nil))
37 | }()
38 |
39 | host.Init(host.Config{AutoMountFilesystems: true})
40 |
41 | // Use container collection to get notified for new containers
42 | containerCollection := &containercollection.ContainerCollection{}
43 |
44 | // Create a tracer collection instance
45 | tracerCollection, err := tracercollection.NewTracerCollection(containerCollection)
46 | if err != nil {
47 | log.Fatalf("Failed to create tracer collection: %v\n", err)
48 | }
49 |
50 | // Define the different options for the container collection instance
51 | opts := []containercollection.ContainerCollectionOption{
52 | containercollection.WithTracerCollection(tracerCollection),
53 |
54 | // Get containers created with runc
55 | containercollection.WithRuncFanotify(),
56 |
57 | // Get containers created with docker
58 | containercollection.WithCgroupEnrichment(),
59 |
60 | // Enrich events with Linux namespaces information, it is needed for per container filtering
61 | containercollection.WithLinuxNamespaceEnrichment(),
62 | }
63 |
64 | // Initialize the container collection
65 | if err := containerCollection.Initialize(opts...); err != nil {
66 | log.Fatalf("failed to initialize container collection: %s\n", err)
67 | }
68 |
69 | containerSelector := containercollection.ContainerSelector{}
70 | if err := tracerCollection.AddTracer("networkTraceName", containerSelector); err != nil {
71 | log.Fatalf("error adding tracer: %v\n", err)
72 | }
73 |
74 | tracerNetwork, err := tracernetwork.NewTracer()
75 | if err != nil {
76 | log.Printf("error creating tracer: %s\n", err)
77 | }
78 | tracerNetwork.SetEventHandler(networkEventCallback)
79 |
80 | config := &networktracer.ConnectToContainerCollectionConfig[tracernetworktype.Event]{
81 | Tracer: tracerNetwork,
82 | Resolver: containerCollection,
83 | Selector: containerSelector,
84 | Base: tracernetworktype.Base,
85 | }
86 |
87 | _, err = networktracer.ConnectToContainerCollection(config)
88 |
89 | if err != nil {
90 | log.Fatalf("error creating tracer: %s\n", err)
91 | }
92 |
93 | // Wait for shutdown signal
94 | shutdown := make(chan os.Signal, 1)
95 | signal.Notify(shutdown, os.Interrupt, syscall.SIGTERM)
96 | <-shutdown
97 | log.Println("Shutting down...")
98 |
99 | }
100 |
--------------------------------------------------------------------------------
/demo/assets/alertmanager.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/armosec/kubecop/758dd96345c315e269291d120a9a02766a9fa314/demo/assets/alertmanager.png
--------------------------------------------------------------------------------
/demo/assets/fileless-malware.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/armosec/kubecop/758dd96345c315e269291d120a9a02766a9fa314/demo/assets/fileless-malware.png
--------------------------------------------------------------------------------
/demo/assets/kubectl.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/armosec/kubecop/758dd96345c315e269291d120a9a02766a9fa314/demo/assets/kubectl.png
--------------------------------------------------------------------------------
/demo/assets/ls.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/armosec/kubecop/758dd96345c315e269291d120a9a02766a9fa314/demo/assets/ls.png
--------------------------------------------------------------------------------
/demo/assets/malwares.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/armosec/kubecop/758dd96345c315e269291d120a9a02766a9fa314/demo/assets/malwares.png
--------------------------------------------------------------------------------
/demo/assets/ping.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/armosec/kubecop/758dd96345c315e269291d120a9a02766a9fa314/demo/assets/ping.png
--------------------------------------------------------------------------------
/demo/assets/pods.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/armosec/kubecop/758dd96345c315e269291d120a9a02766a9fa314/demo/assets/pods.png
--------------------------------------------------------------------------------
/demo/assets/service-account-token.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/armosec/kubecop/758dd96345c315e269291d120a9a02766a9fa314/demo/assets/service-account-token.png
--------------------------------------------------------------------------------
/demo/assets/webapp.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/armosec/kubecop/758dd96345c315e269291d120a9a02766a9fa314/demo/assets/webapp.png
--------------------------------------------------------------------------------
/demo/general_attack/commands.md:
--------------------------------------------------------------------------------
1 | # Service Account Token
2 | cat /run/secrets/kubernetes.io/serviceaccount/token
3 |
4 | # K8s client - From inside a pod
5 | ```
6 | arch=$(uname -m | sed 's/x86_64/amd64/g' | sed 's/aarch64/arm64/g')
7 | curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/$arch/kubectl"
8 | ls -l kubectl
9 | mv kubectl /var/tmp/kubectl
10 | chmod +x /var/tmp/kubectl
11 | cat /var/run/secrets/kubernetes.io/serviceaccount/token > /var/tmp/token
12 | /var/tmp/kubectl --server https://kubernetes.default --insecure-skip-tls-verify --token $(cat /var/tmp/token) get pods
13 | ```
14 |
--------------------------------------------------------------------------------
/demo/general_attack/webapp/Containerfile:
--------------------------------------------------------------------------------
1 | # Use the official PHP image
2 | FROM php:7.4-apache
3 |
4 | # Install ping
5 | RUN apt-get update && \
6 | apt-get install -y iputils-ping wget curl && \
7 | rm -rf /var/lib/apt/lists/*
8 |
9 | # Copy the PHP script and index.html files into the container
10 | COPY ping.php /var/www/html/
11 | COPY index.html /var/www/html/
12 |
13 | # Expose port 80 for Apache
14 | EXPOSE 80
15 |
16 | # Start Apache in the foreground
17 | CMD ["apache2-foreground"]
18 |
--------------------------------------------------------------------------------
/demo/general_attack/webapp/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | IP Ping Tool
8 |
72 |
73 |
74 |
80 |
81 |
82 |
--------------------------------------------------------------------------------
/demo/general_attack/webapp/ping-app.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: ping-app
5 | labels:
6 | app: ping-app
7 | spec:
8 |
9 | containers:
10 | - name: ping-app
11 | image: docker.io/amitschendel/ping-app:latest
12 | imagePullPolicy: Always
13 | ports:
14 | - containerPort: 80
15 | ---
16 | apiVersion: v1
17 | kind: Service
18 | metadata:
19 | name: ping-app
20 | labels:
21 | app: ping-app
22 | spec:
23 | selector:
24 | app: ping-app
25 | ports:
26 | - protocol: TCP
27 | port: 80
28 | targetPort: 80
29 | ---
30 | apiVersion: rbac.authorization.k8s.io/v1
31 | kind: Role
32 | metadata:
33 | namespace: default
34 | name: ping-app-role
35 | rules:
36 | - apiGroups: [""]
37 | resources: ["*"]
38 | verbs: ["get", "list", "watch", "create", "update", "delete"]
39 | ---
40 | apiVersion: rbac.authorization.k8s.io/v1
41 | kind: RoleBinding
42 | metadata:
43 | namespace: default
44 | name: ping-app-role-binding
45 | subjects:
46 | - kind: ServiceAccount
47 | name: "default"
48 | namespace: default
49 | roleRef:
50 | kind: Role
51 | name: ping-app-role
52 | apiGroup: rbac.authorization.k8s.io
53 |
--------------------------------------------------------------------------------
/demo/general_attack/webapp/ping.php:
--------------------------------------------------------------------------------
1 | ";
10 | echo "Ping results for $ip:
";
11 |
12 | // Iterate through each line of the output
13 | foreach ($output as $line) {
14 | // Highlight successful pings in green
15 | if (strpos($line, "icmp_seq") !== false && strpos($line, "time=") !== false) {
16 | echo "" . htmlspecialchars($line) . "
";
17 | } else {
18 | echo htmlspecialchars($line) . "
";
19 | }
20 | }
21 |
22 | echo "";
23 |
24 | // Display the return status
25 | echo "Return status: $return_var";
26 | ?>
--------------------------------------------------------------------------------
/demo/general_attack/webapp/setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Kill any existing port forwards
4 | echo "[+] Killing any existing port forwards"
5 | killall kubectl 2>/dev/null
6 |
7 | # Apply the YAML file for the web app
8 | echo "[+] Applying YAML file for the web app"
9 | kubectl apply -f demo/general_attack/webapp/ping-app.yaml
10 |
11 | # Wait for the web app to be ready
12 | echo "[+] Waiting for the web app to be ready"
13 | kubectl wait --for=condition=ready pod -l app=ping-app
14 |
15 | # Port forward from port 80 to port localhost:8080
16 | echo "[+] Port forwarding from port 80 to localhost:8080"
17 | kubectl port-forward pod/ping-app 8080:80 2>&1 >/dev/null &
18 |
19 | # Wait for the port forward to be ready
20 | echo "[+] Waiting for the port forward to be ready"
21 | sleep 1
22 | echo "[+] The web app is ready"
23 |
--------------------------------------------------------------------------------
/demo/miner/Containerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:18.04
2 |
3 | WORKDIR /usr/app/src
4 |
5 | COPY config.json ./
6 | COPY xmrig ./
7 |
8 | CMD [ "./xmrig", "--config=config.json"]
--------------------------------------------------------------------------------
/demo/miner/miner-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: k8s-miner-deployment
5 | namespace: kubescape
6 | spec:
7 | replicas: 1
8 | selector:
9 | matchLabels:
10 | app: k8s-miner
11 | template:
12 | metadata:
13 | labels:
14 | app: k8s-miner
15 | annotations:
16 | karpenter.sh/do-not-disrupt: "true"
17 | spec:
18 | containers:
19 | - name: k8s-miner
20 | image: docker.io/amitschendel/crypto-miner-1
21 | imagePullPolicy: IfNotPresent
22 | resources:
23 | requests:
24 | memory: "3Gi"
25 | cpu: "3"
--------------------------------------------------------------------------------
/dev/devpod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: kapprofiler-dev-env
5 | ---
6 | apiVersion: v1
7 | kind: ServiceAccount
8 | metadata:
9 | name: kapprofiler-dev-env
10 | namespace: kapprofiler-dev-env
11 | ---
12 | kind: ClusterRoleBinding
13 | apiVersion: rbac.authorization.k8s.io/v1
14 | metadata:
15 | name: kapprofiler-dev-env-cluster-role-binding
16 | subjects:
17 | - kind: ServiceAccount
18 | name: kapprofiler-dev-env
19 | namespace: kapprofiler-dev-env
20 | roleRef:
21 | kind: ClusterRole
22 | name: cluster-admin
23 | apiGroup: rbac.authorization.k8s.io
24 | ---
25 | apiVersion: apps/v1
26 | kind: DaemonSet
27 | metadata:
28 | name: kapprofiler-dev-env
29 | namespace: kapprofiler-dev-env
30 | labels:
31 | k8s-app: kapprofiler-dev-env
32 | spec:
33 | selector:
34 | matchLabels:
35 | k8s-app: kapprofiler-dev-env
36 | template:
37 | metadata:
38 | labels:
39 | k8s-app: kapprofiler-dev-env
40 | spec:
41 | serviceAccount: kapprofiler-dev-env
42 | hostPID: true
43 | hostNetwork: false
44 | containers:
45 | - name: dev-env
46 | terminationMessagePolicy: FallbackToLogsOnError
47 | image: ubuntu:latest
48 | command: ["/bin/sleep"]
49 | args: ["infinity"]
50 | imagePullPolicy: Always
51 | env:
52 | - name: STDOUT_ENABLED
53 | value: "true"
54 | # - name: ALERTMANAGER_URL
55 | # value: "alertmanager.default.svc.cluster.local:9093"
56 | - name: NODE_NAME
57 | valueFrom:
58 | fieldRef:
59 | fieldPath: spec.nodeName
60 | - name: POD_NAME
61 | valueFrom:
62 | fieldRef:
63 | fieldPath: metadata.name
64 | - name: HOST_ROOT
65 | value: "/host"
66 | securityContext:
67 | privileged: true
68 | capabilities:
69 | add:
70 | # fanotify requires CAP_SYS_ADMIN
71 | - SYS_ADMIN
72 | volumeMounts:
73 | - name: host
74 | mountPath: /host
75 | - name: run
76 | mountPath: /run
77 | - name: modules
78 | mountPath: /lib/modules
79 | - name: debugfs
80 | mountPath: /sys/kernel/debug
81 | - name: cgroup
82 | mountPath: /sys/fs/cgroup
83 | - name: bpffs
84 | mountPath: /sys/fs/bpf
85 | tolerations:
86 | - effect: NoSchedule
87 | operator: Exists
88 | - effect: NoExecute
89 | operator: Exists
90 | volumes:
91 | - name: host
92 | hostPath:
93 | path: /
94 | - name: run
95 | hostPath:
96 | path: /run
97 | - name: cgroup
98 | hostPath:
99 | path: /sys/fs/cgroup
100 | - name: modules
101 | hostPath:
102 | path: /lib/modules
103 | - name: bpffs
104 | hostPath:
105 | path: /sys/fs/bpf
106 | - name: debugfs
107 | hostPath:
108 | path: /sys/kernel/debug
109 |
--------------------------------------------------------------------------------
/dev/nginx/nginx-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: nginx-deployment
5 | spec:
6 | selector:
7 | matchLabels:
8 | app: nginx
9 | replicas: 1
10 | template:
11 | metadata:
12 | labels:
13 | app: nginx
14 | spec:
15 | containers:
16 | - name: nginx
17 | image: nginx:1.14.2
18 | ports:
19 | - containerPort: 80
20 | ---
21 | apiVersion: v1
22 | kind: Service
23 | metadata:
24 | name: nginx-service
25 | spec:
26 | selector:
27 | app: nginx
28 | ports:
29 | - protocol: TCP
30 | port: 80
31 | targetPort: 80
--------------------------------------------------------------------------------
/docs/development.md:
--------------------------------------------------------------------------------
1 | # Development environment for KubeCop
2 |
3 | ## Simple development environment
4 |
5 | Simple development environment enables the developer to run KubeCop in a single node cluster (minikube, kind) within a development Pod.
6 |
7 | Clone this repository then do the following:
8 | ```bash
9 | make deploy-dev-pod # Deploying dev pod on your cluster
10 | make install # Build and deploy the binaries (installing them in the dev Pod)
11 | make open-shell # Open a shell on the development Pods
12 | ```
13 |
14 | > **Note:** make sure to configure the [exportes](pkg/exporters/README.md) before running the KubeCop.
15 |
16 | In the shell, you can run the `kubecop` binary. This gives an agile environment to work and test.
17 |
18 | ```bash
19 | $ kubecop
20 | ```
21 |
22 |
23 | To test KubeCop, in a different shell, install the application profile for Nginx and deploy Nginx
24 | ```bash
25 | kubectl apply -f dev/nginx/nginx-app-profile.yaml -f dev/nginx/nginx-deployment.yaml
26 | ```
27 |
28 | and now open a shell on the Nginx Pod which will trigger un-whitelisted alert in the KubeCop
29 | ```bash
30 | kubectl exec -it $(kubectl get pods -l app=nginx -o=jsonpath='{.items[0].metadata.name}') -- sh
31 | ```
32 |
33 | you should get this on the KubeCop console:
34 | ```
35 | &{nginx ad5d83bb20617b086ec8ec384ac76976d2ac4aa39d8380f2ae3b0080d205edc5 nginx-deployment-cbdccf466-jhvb7 default 1699770928201031673 0} - Alert exec call "/bin/sh" is not whitelisted by application profile```
36 |
37 | ## Tear down
38 | ```bash
39 | make close-shell # Close the shell on the development Pods
40 | ```
41 |
42 | ## Running KubeCop at system level
43 |
44 | To test KubeCop as part of the full system pipe locally, set up a cluster connection (minikube or kind). Then build the KubeCop image locally using:
45 | ```bash
46 | make build-image
47 | ```
48 |
49 | If this is done, you can bring up the Prometheus stack and KubeCop Helm deployment (with the image built above) using the script:
50 | ```bash
51 | ./scripts/setup-system-test-cluster.sh
52 | ```
53 |
54 | ## Getting pprof samples from KubeCop
55 |
56 | Run KubeCop with `_PPROF_SERVER=enable` (env variable) or enable through Helm `kubecop.pprofserver.enabled=true`.
57 |
58 | Then pull the sample file and see the results with these commands:
59 | ```bash
60 | curl http://:6060/debug/pprof/profile?seconds=120 -o cpu.pd.gz
61 | curl http://:6060/debug/pprof/heap > /tmp/heap.out
62 | go tool pprof -http=:8082 pprof.pd.gz
63 | ```
64 |
65 |
--------------------------------------------------------------------------------
/docs/images/kubecop-software-design.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/armosec/kubecop/758dd96345c315e269291d120a9a02766a9fa314/docs/images/kubecop-software-design.png
--------------------------------------------------------------------------------
/pkg/approfilecache/stats_prometheus.go:
--------------------------------------------------------------------------------
1 | package approfilecache
2 |
3 | import (
4 | "github.com/prometheus/client_golang/prometheus"
5 | )
6 |
7 | type prometheusMetric struct {
8 | createCounter prometheus.Counter
9 | updateCounter prometheus.Counter
10 | deleteCounter prometheus.Counter
11 | }
12 |
13 | func createPrometheusMetric() *prometheusMetric {
14 | createCounter := prometheus.NewCounter(prometheus.CounterOpts{
15 | Name: "kubecop_application_profile_create_counter",
16 | Help: "The total number of application profile creations",
17 | })
18 | prometheus.MustRegister(createCounter)
19 |
20 | updateCounter := prometheus.NewCounter(prometheus.CounterOpts{
21 | Name: "kubecop_application_profile_update_counter",
22 | Help: "The total number of application profile updates",
23 | })
24 | prometheus.MustRegister(updateCounter)
25 |
26 | deleteCounter := prometheus.NewCounter(prometheus.CounterOpts{
27 | Name: "kubecop_application_profile_delete_counter",
28 | Help: "The total number of application profile deletions",
29 | })
30 | prometheus.MustRegister(deleteCounter)
31 |
32 | return &prometheusMetric{
33 | createCounter: createCounter,
34 | updateCounter: updateCounter,
35 | deleteCounter: deleteCounter,
36 | }
37 | }
38 |
39 | func (p *prometheusMetric) destroy() {
40 | prometheus.Unregister(p.createCounter)
41 | prometheus.Unregister(p.updateCounter)
42 | prometheus.Unregister(p.deleteCounter)
43 | }
44 |
45 | func (p *prometheusMetric) reportApplicationProfileCreated() {
46 | p.createCounter.Inc()
47 | }
48 |
49 | func (p *prometheusMetric) reportApplicationProfileUpdated() {
50 | p.updateCounter.Inc()
51 | }
52 |
53 | func (p *prometheusMetric) reportApplicationProfileDeleted() {
54 | p.deleteCounter.Inc()
55 | }
56 |
--------------------------------------------------------------------------------
/pkg/approfilecache/types.go:
--------------------------------------------------------------------------------
1 | package approfilecache
2 |
3 | import (
4 | "github.com/kubescape/kapprofiler/pkg/collector"
5 | )
6 |
7 | type SingleApplicationProfileAccess interface {
8 | // Get application profile name
9 | GetName() string
10 | // Get application profile namespace
11 | GetNamespace() string
12 | // Get exec list
13 | GetExecList() (*[]collector.ExecCalls, error)
14 | // Get open list
15 | GetOpenList() (*[]collector.OpenCalls, error)
16 | // Get network activity
17 | GetNetworkActivity() (*collector.NetworkActivity, error)
18 | // Get system calls
19 | GetSystemCalls() ([]string, error)
20 | // Get capabilities
21 | GetCapabilities() (*[]collector.CapabilitiesCalls, error)
22 | // Get DNS activity
23 | GetDNS() (*[]collector.DnsCalls, error)
24 | }
25 |
26 | type ApplicationProfileCache interface {
27 | // Load an application profile to the cache
28 | LoadApplicationProfile(namespace, kind, workloadName, ownerKind, ownerName, containerName, containerID string, acceptPartial bool) error
29 |
30 | // Anticipate an application profile to be loaded to the cache
31 | AnticipateApplicationProfile(namespace, kind, workloadName, ownerKind, ownerName, containerName, containerID string, acceptPartial bool) error
32 |
33 | // Delete an application profile from the cache
34 | DeleteApplicationProfile(containerID string) error
35 |
36 | // Has application profile for the given container in Kubernetes workload (identified by namespace, kind, workload name and container name)
37 | HasApplicationProfile(namespace, kind, workloadName, containerName string) bool
38 |
39 | // Get application profile access for the given container in Kubernetes workload (identified by container name and ID in the cache)
40 | GetApplicationProfileAccess(containerName, containerID string) (SingleApplicationProfileAccess, error)
41 | }
42 |
--------------------------------------------------------------------------------
/pkg/engine/apiserver.go:
--------------------------------------------------------------------------------
1 | package engine
2 |
3 | import (
4 | "context"
5 | "fmt"
6 |
7 | corev1 "k8s.io/api/core/v1"
8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
9 | )
10 |
11 | func (engine *Engine) fetchPodSpec(podName, namespace string) (*corev1.PodSpec, error) {
12 | pod, err := engine.k8sClientset.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
13 | if err != nil {
14 | return nil, err
15 | }
16 |
17 | return &pod.Spec, nil
18 | }
19 |
20 | func (engine *Engine) GetApiServerIpAddress() (string, error) {
21 | service, err := engine.k8sClientset.CoreV1().Services("default").Get(context.Background(), "kubernetes", metav1.GetOptions{})
22 | if err != nil {
23 | return "", err
24 | }
25 |
26 | for _, ip := range service.Spec.ClusterIPs {
27 | return ip, nil
28 | }
29 |
30 | return "", fmt.Errorf("failed to get api server ip")
31 | }
32 |
--------------------------------------------------------------------------------
/pkg/engine/container_test.go:
--------------------------------------------------------------------------------
1 | package engine
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/armosec/kubecop/pkg/engine/rule"
7 | "github.com/armosec/kubecop/pkg/rulebindingstore"
8 | "github.com/stretchr/testify/assert"
9 | )
10 |
11 | func TestAssociateRulesWithContainerInCache(t *testing.T) {
12 | engine := &Engine{} // Create an instance of the Engine struct
13 |
14 | // Define the test input
15 | contEntry := containerEntry{
16 | PodName: "test-pod",
17 | Namespace: "test-namespace",
18 | ContainerID: "test-container",
19 | }
20 |
21 | // Mock the getRulesForPodFunc function
22 | engine.getRulesForPodFunc = func(podName, namespace string) ([]rulebindingstore.RuntimeAlertRuleBindingRule, error) {
23 | // Return some mock rule parameters
24 | return []rulebindingstore.RuntimeAlertRuleBindingRule{
25 | {
26 | RuleName: rule.R0001UnexpectedProcessLaunchedRuleDescriptor.Name,
27 | },
28 | {
29 | RuleID: rule.R0002UnexpectedFileAccessRuleDescriptor.ID,
30 | },
31 | {
32 | RuleTags: rule.R0003UnexpectedSystemCallRuleDescriptor.Tags,
33 | },
34 | }, nil
35 | }
36 |
37 | // Call the method first with exists=true, then with exists=false
38 | exists := true
39 | err := engine.associateRulesWithContainerInCache(contEntry, exists)
40 |
41 | // Check the result
42 | if err != nil {
43 | t.Errorf("Unexpected error: %v", err)
44 | }
45 | // get from cache
46 | _, ok := getContainerDetails(contEntry.ContainerID)
47 | if ok {
48 | t.Errorf("Container details should not found in cache in this case")
49 | }
50 | // Call the method again with exists=false
51 | exists = false
52 | err = engine.associateRulesWithContainerInCache(contEntry, exists)
53 | if err != nil {
54 | t.Errorf("Unexpected error: %v", err)
55 | }
56 | // get from cache
57 | contDetFromCache, ok := getContainerDetails(contEntry.ContainerID)
58 | if !ok {
59 | t.Errorf("Container details not found in cache")
60 | }
61 |
62 | // check the container details fields
63 | assert.Equal(t, contEntry.ContainerID, contDetFromCache.ContainerID)
64 | assert.Equal(t, contEntry.PodName, contDetFromCache.PodName)
65 | assert.Equal(t, contEntry.Namespace, contDetFromCache.Namespace)
66 |
67 | // Check the bound rules
68 | expectedRuleDescs := []rule.Rule{
69 | rule.CreateRuleByName(rule.R0001UnexpectedProcessLaunchedRuleDescriptor.Name),
70 | rule.CreateRuleByID(rule.R0002UnexpectedFileAccessRuleDescriptor.ID),
71 | }
72 | expectedRuleDescs = append(expectedRuleDescs, rule.CreateRulesByTags(rule.R0003UnexpectedSystemCallRuleDescriptor.Tags)...)
73 | assert.Equal(t, expectedRuleDescs, contDetFromCache.BoundRules)
74 |
75 | // delete from cache
76 | deleteContainerDetails(contEntry.ContainerID)
77 | _, ok = getContainerDetails(contEntry.ContainerID)
78 | if ok {
79 | t.Errorf("Container details should not found in cache after deletion")
80 | }
81 | }
82 |
--------------------------------------------------------------------------------
/pkg/engine/containeridcache.go:
--------------------------------------------------------------------------------
1 | package engine
2 |
3 | import (
4 | "sync"
5 |
6 | "github.com/armosec/kubecop/pkg/engine/rule"
7 | corev1 "k8s.io/api/core/v1"
8 | )
9 |
10 | type containerEntry struct {
11 | ContainerID string
12 | ContainerName string
13 | PodName string
14 | Namespace string
15 | OwnerKind string
16 | OwnerName string
17 | // Low level container information
18 | NsMntId uint64
19 |
20 | // Attached late (after container already started)
21 | AttachedLate bool
22 |
23 | // Pod spec
24 | PodSpec *corev1.PodSpec
25 |
26 | // Add rules here
27 | BoundRules []rule.Rule
28 | }
29 |
30 | // Container ID to details cache
31 | var containerIdToDetailsCache = make(map[string]containerEntry)
32 | var containerIdToDetailsCacheLock = sync.RWMutex{}
33 |
34 | func setContainerDetails(containerId string, containerDetails containerEntry, exists bool) {
35 | containerIdToDetailsCacheLock.Lock()
36 | defer containerIdToDetailsCacheLock.Unlock()
37 | if exists {
38 | // If the container used to be exist and it's not in the cache, don't add it again
39 | if _, ok := containerIdToDetailsCache[containerId]; !ok {
40 | return
41 | }
42 | }
43 | containerIdToDetailsCache[containerId] = containerDetails
44 | }
45 |
46 | func getContainerDetails(containerId string) (containerEntry, bool) {
47 | containerIdToDetailsCacheLock.RLock()
48 | defer containerIdToDetailsCacheLock.RUnlock()
49 | containerDetails, ok := containerIdToDetailsCache[containerId]
50 | return containerDetails, ok
51 | }
52 |
53 | func deleteContainerDetails(containerId string) {
54 | containerIdToDetailsCacheLock.Lock()
55 | defer containerIdToDetailsCacheLock.Unlock()
56 | delete(containerIdToDetailsCache, containerId)
57 | }
58 |
59 | func getcontainerIdToDetailsCacheCopy() map[string]containerEntry {
60 | containerIdToDetailsCacheLock.RLock()
61 | defer containerIdToDetailsCacheLock.RUnlock()
62 | copy := make(map[string]containerEntry)
63 | for k, v := range containerIdToDetailsCache {
64 | copy[k] = v
65 | }
66 | return copy
67 | }
68 |
--------------------------------------------------------------------------------
/pkg/engine/engine.go:
--------------------------------------------------------------------------------
1 | package engine
2 |
3 | import (
4 | log "github.com/sirupsen/logrus"
5 |
6 | "github.com/armosec/kubecop/pkg/approfilecache"
7 | "github.com/armosec/kubecop/pkg/exporters"
8 | "github.com/armosec/kubecop/pkg/rulebindingstore"
9 | "github.com/gammazero/workerpool"
10 | "github.com/kubescape/kapprofiler/pkg/tracing"
11 | discovery "k8s.io/client-go/discovery"
12 | appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
13 | batchv1 "k8s.io/client-go/kubernetes/typed/batch/v1"
14 | corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
15 | )
16 |
17 | type ClientSetInterface interface {
18 | CoreV1() corev1.CoreV1Interface
19 | Discovery() discovery.DiscoveryInterface
20 | AppsV1() appsv1.AppsV1Interface
21 | BatchV1() batchv1.BatchV1Interface
22 | }
23 |
24 | type Engine struct {
25 | applicationProfileCache approfilecache.ApplicationProfileCache
26 | tracer *tracing.Tracer
27 | exporter exporters.Exporter
28 | // Event processing worker pool
29 | eventProcessingPool *workerpool.WorkerPool
30 | k8sClientset ClientSetInterface
31 | pollLoopRunning bool
32 | pollLoopCancelChannel chan struct{}
33 | promCollector *prometheusMetric
34 | getRulesForPodFunc func(podName, namespace string) ([]rulebindingstore.RuntimeAlertRuleBindingRule, error)
35 | nodeName string
36 | }
37 |
38 | func NewEngine(k8sClientset ClientSetInterface,
39 | appProfileCache approfilecache.ApplicationProfileCache,
40 | tracer *tracing.Tracer,
41 | exporter exporters.Exporter,
42 | workerPoolWidth int, nodeName string) *Engine {
43 | workerPool := workerpool.New(workerPoolWidth)
44 | engine := Engine{
45 | applicationProfileCache: appProfileCache,
46 | k8sClientset: k8sClientset,
47 | eventProcessingPool: workerPool,
48 | tracer: tracer,
49 | exporter: exporter,
50 | promCollector: createPrometheusMetric(),
51 | nodeName: nodeName,
52 | }
53 | log.Print("Engine created")
54 | engine.StartPullComponent()
55 | return &engine
56 | }
57 |
58 | func (e *Engine) SetGetRulesForPodFunc(getRulesForPodFunc func(podName, namespace string) ([]rulebindingstore.RuntimeAlertRuleBindingRule, error)) {
59 | e.getRulesForPodFunc = getRulesForPodFunc
60 | }
61 |
62 | func (e *Engine) Delete() {
63 | e.StopPullComponent()
64 | e.eventProcessingPool.StopWait()
65 | e.promCollector.destroy()
66 | }
67 |
--------------------------------------------------------------------------------
/pkg/engine/poll.go:
--------------------------------------------------------------------------------
1 | package engine
2 |
3 | import (
4 | "fmt"
5 | "time"
6 |
7 | log "github.com/sirupsen/logrus"
8 |
9 | "github.com/kubescape/kapprofiler/pkg/tracing"
10 | )
11 |
12 | // Function to start pull componet from tracer
13 | func (engine *Engine) StartPullComponent() {
14 | if !engine.pollLoopRunning {
15 | if engine.tracer != nil {
16 | engine.pollLoopCancelChannel = make(chan struct{})
17 | go engine.Poll()
18 | } else {
19 | log.Printf("Tracer not initialized, ignoring request to start poll loop\n")
20 | }
21 | } else {
22 | log.Printf("Poll loop already running, ignoring request to start it again\n")
23 | }
24 | }
25 |
26 | // Function to stop pull componet from tracer
27 | func (engine *Engine) StopPullComponent() {
28 | if engine.pollLoopRunning {
29 | close(engine.pollLoopCancelChannel)
30 | engine.pollLoopRunning = false
31 | } else {
32 | log.Printf("Poll loop not running, ignoring request to stop it\n")
33 | }
34 | }
35 |
36 | func cancelableSleep(d time.Duration, cancel <-chan struct{}) error {
37 | select {
38 | case <-time.After(d):
39 | return nil
40 | case <-cancel:
41 | return fmt.Errorf("sleep canceled")
42 | }
43 | }
44 |
45 | // Function main poll loop
46 | func (engine *Engine) Poll() {
47 | engine.pollLoopRunning = true
48 | for {
49 | if cancelableSleep(1*time.Second, engine.pollLoopCancelChannel) == nil {
50 | // Time elapsed without cancelation, do the work
51 | if engine.tracer != nil {
52 | // Loop over the containerIdToDetailsCache map
53 | for containerId, containerDetails := range getcontainerIdToDetailsCacheCopy() {
54 | syscalls, err := engine.tracer.PeekSyscallInContainer(containerDetails.NsMntId)
55 | if err != nil {
56 | continue
57 | }
58 | // Generate events for the syscalls and process them in the engine
59 | e := tracing.SyscallEvent{
60 | GeneralEvent: tracing.GeneralEvent{
61 | ContainerID: containerId,
62 | ContainerName: containerDetails.ContainerName,
63 | PodName: containerDetails.PodName,
64 | Namespace: containerDetails.Namespace,
65 | Timestamp: time.Now().UnixNano(),
66 | },
67 | Syscalls: syscalls,
68 | }
69 | engine.submitEventForProcessing(containerId, tracing.SyscallEventType, &e)
70 | }
71 | }
72 | }
73 | }
74 | }
75 |
--------------------------------------------------------------------------------
/pkg/engine/processing.go:
--------------------------------------------------------------------------------
1 | package engine
2 |
3 | import (
4 | log "github.com/sirupsen/logrus"
5 |
6 | "github.com/armosec/kubecop/pkg/approfilecache"
7 | "github.com/armosec/kubecop/pkg/engine/rule"
8 | "github.com/kubescape/kapprofiler/pkg/tracing"
9 | )
10 |
11 | func (engine *Engine) ProcessEvent(eventType tracing.EventType, event interface{}, appProfile approfilecache.SingleApplicationProfileAccess, boundRules []rule.Rule) {
12 | // Convert the event to a generic event
13 | e, err := convertEventInterfaceToGenericEvent(eventType, event)
14 | if err != nil {
15 | log.Printf("Failed to convert event to a generic event: %v\n", event)
16 | }
17 |
18 | // Loop over the boundRules
19 | for _, rule := range boundRules {
20 | // TODO if no app profile and one of the rules must have it then fire alert!
21 | if appProfile == nil && rule.Requirements().NeedApplicationProfile {
22 | log.Debugf("%v - warning missing app profile", e)
23 | continue // TODO - check with the RuleBinding if alert should be fired or not
24 | }
25 |
26 | ruleFailure := rule.ProcessEvent(eventType, event, appProfile, engine)
27 | if ruleFailure != nil {
28 | engine.exporter.SendRuleAlert(ruleFailure)
29 | engine.promCollector.reportRuleAlereted(rule.Name())
30 | }
31 | engine.promCollector.reportRuleProcessed(rule.Name())
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/pkg/engine/rule/README.md:
--------------------------------------------------------------------------------
1 | | ID | Rule | Description | Tags | Priority | Application profile | Parameters |
2 | |----|------|-------------|------|----------|---------------------| ---------- |
3 | | R0001 | Unexpected process launched | Detecting exec calls that are not whitelisted by application profile | [exec whitelisted] | 10 | true | false |
4 | | R0002 | Unexpected file access | Detecting file access that are not whitelisted by application profile. File access is defined by the combination of path and flags | [open whitelisted] | 5 | true | [ignoreMounts: bool ignorePrefixes: string[]] |
5 | | R0003 | Unexpected system call | Detecting unexpected system calls that are not whitelisted by application profile. Every unexpected system call will be alerted only once. | [syscall whitelisted] | 5 | true | false |
6 | | R0004 | Unexpected capability used | Detecting unexpected capabilities that are not whitelisted by application profile. Every unexpected capability is identified in context of a syscall and will be alerted only once per container. | [capabilities whitelisted] | 8 | true | false |
7 | | R0005 | Unexpected domain request | Detecting unexpected domain requests that are not whitelisted by application profile. | [dns whitelisted] | 5 | true | false |
8 | | R0006 | Unexpected service account token access | Detecting unexpected service account token access that are not whitelisted by application profile. | [token malicious whitelisted] | 8 | true | false |
9 | | R0007 | Kubernetes Client Executed | Detecting exececution of kubernetes client | [exec malicious whitelisted] | 10 | false | false |
10 | | R1000 | Exec from malicious source | Detecting exec calls that are from malicious source like: /dev/shm, /run, /var/run, /proc/self | [exec signature] | 10 | false | false |
11 | | R1001 | Exec Binary Not In Base Image | Detecting exec calls of binaries that are not included in the base image | [exec malicious binary base image] | 10 | false | false |
12 | | R1002 | Kernel Module Load | Detecting Kernel Module Load. | [syscall kernel module load] | 10 | false | false |
13 | | R1003 | Malicious SSH Connection | Detecting ssh connection to disallowed port | [ssh connection port malicious] | 8 | false | false |
14 | | R1004 | Exec from mount | Detecting exec calls from mounted paths. | [exec mount] | 5 | false | false |
15 | | R1006 | Unshare System Call usage | Detecting Unshare System Call usage. | [syscall escape unshare] | 8 | false | false |
16 | | R1007 | Crypto Miners | Detecting Crypto Miners. | [network crypto miners malicious dns] | 8 | false | false |
--------------------------------------------------------------------------------
/pkg/engine/rule/factory.go:
--------------------------------------------------------------------------------
1 | package rule
2 |
3 | // List of all rules descriptions.
4 | var ruleDescriptions []RuleDesciptor = []RuleDesciptor{
5 | R0001UnexpectedProcessLaunchedRuleDescriptor,
6 | R0002UnexpectedFileAccessRuleDescriptor,
7 | R0003UnexpectedSystemCallRuleDescriptor,
8 | R0004UnexpectedCapabilityUsedRuleDescriptor,
9 | R0005UnexpectedDomainRequestRuleDescriptor,
10 | R0006UnexpectedServiceAccountTokenAccessRuleDescriptor,
11 | R0007KubernetesClientExecutedDescriptor,
12 | R1000ExecFromMaliciousSourceDescriptor,
13 | R1001ExecBinaryNotInBaseImageRuleDescriptor,
14 | R1002LoadKernelModuleRuleDescriptor,
15 | R1003MaliciousSSHConnectionRuleDescriptor,
16 | R1004ExecFromMountRuleDescriptor,
17 | R1006UnshareSyscallRuleDescriptor,
18 | R1007CryptoMinersRuleDescriptor,
19 | }
20 |
21 | func GetAllRuleDescriptors() []RuleDesciptor {
22 | return ruleDescriptions
23 | }
24 |
25 | func CreateRulesByTags(tags []string) []Rule {
26 | var rules []Rule
27 | for _, rule := range ruleDescriptions {
28 | if rule.HasTags(tags) {
29 | rules = append(rules, rule.RuleCreationFunc())
30 | }
31 | }
32 | return rules
33 | }
34 |
35 | func CreateRuleByID(id string) Rule {
36 | for _, rule := range ruleDescriptions {
37 | if rule.ID == id {
38 | return rule.RuleCreationFunc()
39 | }
40 | }
41 | return nil
42 | }
43 |
44 | func CreateRuleByName(name string) Rule {
45 | for _, rule := range ruleDescriptions {
46 | if rule.Name == name {
47 | return rule.RuleCreationFunc()
48 | }
49 | }
50 | return nil
51 | }
52 |
53 | func CreateRulesByNames(names []string) []Rule {
54 | var rules []Rule
55 | for _, rule := range ruleDescriptions {
56 | for _, name := range names {
57 | if rule.Name == name {
58 | rules = append(rules, rule.RuleCreationFunc())
59 | }
60 | }
61 | }
62 | return rules
63 | }
64 |
--------------------------------------------------------------------------------
/pkg/engine/rule/factory_test.go:
--------------------------------------------------------------------------------
1 | package rule
2 |
3 | import (
4 | "testing"
5 | )
6 |
7 | // Test CreateRulesByTags
8 | func TestCreateRulesByTags(t *testing.T) {
9 | // Create a new rule
10 | rules := CreateRulesByTags([]string{"exec"})
11 | // Assert r is not nil
12 | if rules == nil {
13 | t.Errorf("Expected rules to not be nil")
14 | }
15 | }
16 |
17 | // Test CreateRulesByNames
18 | func TestCreateRulesByNames(t *testing.T) {
19 | // Create a new rule
20 | rules := CreateRulesByNames([]string{R0001UnexpectedProcessLaunchedRuleName})
21 | // Assert r is not nil
22 | if rules == nil || len(rules) != 1 {
23 | t.Errorf("Expected rules to not be nil")
24 | }
25 | }
26 |
27 | // Test CreateRuleByName
28 | func TestCreateRuleByName(t *testing.T) {
29 | // Create a new rule
30 | rule := CreateRuleByName(R0001UnexpectedProcessLaunchedRuleName)
31 | // Assert r is not nil
32 | if rule == nil {
33 | t.Errorf("Expected rule to not be nil")
34 | }
35 | // not exist
36 | rule = CreateRuleByName("not exist")
37 | // Assert r is not nil
38 | if rule != nil {
39 | t.Errorf("Expected rule to be nil")
40 | }
41 | }
42 |
43 | // Test CreateRuleByID
44 | func TestCreateRuleByID(t *testing.T) {
45 | rule := CreateRuleByID(R0001ID)
46 | // Assert r is not nil
47 | if rule == nil {
48 | t.Errorf("Expected rule to not be nil")
49 | }
50 | // not exist
51 | rule = CreateRuleByID("not exist")
52 | // Assert r is not nil
53 | if rule != nil {
54 | t.Errorf("Expected rule to be nil")
55 | }
56 | }
57 |
--------------------------------------------------------------------------------
/pkg/engine/rule/gen_rule_files.py:
--------------------------------------------------------------------------------
1 |
2 | import sys
3 | import re
4 |
5 | rule_id = sys.argv[1]
6 |
7 | # Verify rule id looks like {rule_id}
8 | if not re.match(r'R[0-9]{4}', rule_id):
9 | print('Rule id must be in the format {rule_id}')
10 | sys.exit(1)
11 |
12 | rule_name = ' '.join(sys.argv[2:])
13 | rule_abbrev = ''.join([s.capitalize() for s in rule_name.split(' ')])
14 |
15 | rule_file_name = f'{rule_id}_{rule_name.replace(" ", "_")}.go'.lower()
16 | rule_test_file_name = f'{rule_id}_{rule_name.replace(" ", "_")}_test.go'.lower()
17 |
18 |
19 | rule_template = '''package rule
20 |
21 | import (
22 | "github.com/armosec/kubecop/pkg/approfilecache"
23 | "github.com/kubescape/kapprofiler/pkg/tracing"
24 | )
25 |
26 | const (
27 | {rule_id}{rule_abbrev}RuleName = "{rule_id} {rule_name}"
28 | )
29 |
30 | var {rule_id}{rule_abbrev}RuleDescriptor = RuleDesciptor{
31 | Name: {rule_id}{rule_abbrev}RuleName,
32 | Tags: []string{},
33 | Priority: replceme,
34 | Requirements: RuleRequirements{
35 | EventTypes: []tracing.EventType{replaceme},
36 | NeedApplicationProfile: replaceme,
37 | },
38 | RuleCreationFunc: func() Rule {
39 | return CreateRule{rule_id}{rule_abbrev}()
40 | },
41 | }
42 |
43 | type {rule_id}{rule_abbrev} struct {
44 | }
45 |
46 | type {rule_id}{rule_abbrev}Failure struct {
47 | RuleName string
48 | RulePriority int
49 | Err string
50 | FailureEvent *replaceme
51 | }
52 |
53 | func (rule *{rule_id}{rule_abbrev}) Name() string {
54 | return {rule_id}{rule_abbrev}RuleName
55 | }
56 |
57 | func CreateRule{rule_id}{rule_abbrev}() *{rule_id}{rule_abbrev} {
58 | return &{rule_id}{rule_abbrev}{}
59 | }
60 |
61 | func (rule *{rule_id}{rule_abbrev}) DeleteRule() {
62 | }
63 |
64 | func (rule *{rule_id}{rule_abbrev}) ProcessEvent(eventType tracing.EventType, event interface{}, appProfileAccess approfilecache.SingleApplicationProfileAccess, engineAccess EngineAccess) RuleFailure {
65 | if eventType != replaceme {
66 | return nil
67 | }
68 |
69 | execEvent, ok := event.(*replaceme)
70 | if !ok {
71 | return nil
72 | }
73 |
74 | if appProfileAccess == nil {
75 | return &{rule_id}{rule_abbrev}Failure{
76 | RuleName: rule.Name(),
77 | Err: "Application profile is missing",
78 | FailureEvent: execEvent,
79 | RulePriority: RulePrioritySystemIssue,
80 | }
81 | }
82 |
83 | appProfileExecList, err := appProfileAccess.GetExecList()
84 | if err != nil || appProfileExecList == nil {
85 | return &{rule_id}{rule_abbrev}Failure{
86 | RuleName: rule.Name(),
87 | Err: "Application profile is missing",
88 | FailureEvent: execEvent,
89 | RulePriority: RulePrioritySystemIssue,
90 | }
91 | }
92 |
93 | }
94 |
95 | func (rule *{rule_id}{rule_abbrev}) Requirements() RuleRequirements {
96 | return RuleRequirements{
97 | EventTypes: []tracing.EventType{replaceme},
98 | NeedApplicationProfile: true,
99 | }
100 | }
101 |
102 | func (rule *{rule_id}{rule_abbrev}Failure) Name() string {
103 | return rule.RuleName
104 | }
105 |
106 | func (rule *{rule_id}{rule_abbrev}Failure) Error() string {
107 | return rule.Err
108 | }
109 |
110 | func (rule *{rule_id}{rule_abbrev}Failure) Event() tracing.GeneralEvent {
111 | return rule.FailureEvent.GeneralEvent
112 | }
113 |
114 | func (rule *{rule_id}{rule_abbrev}Failure) Priority() int {
115 | return rule.RulePriority
116 | }
117 | '''
118 |
119 | rule_test_template = '''package rule
120 |
121 | import (
122 | "testing"
123 | )
124 |
125 | func Test{rule_id}{rule_abbrev}(t *testing.T) {
126 | // Create a new rule
127 | r := CreateRule{rule_id}{rule_abbrev}()
128 | // Assert r is not nil
129 | if r == nil {
130 | t.Errorf("Expected r to not be nil")
131 | }
132 | }
133 | '''
134 |
135 | with open(rule_file_name, 'w') as f:
136 | rule = rule_template.replace('{rule_id}', rule_id).replace('{rule_name}', rule_name).replace('{rule_abbrev}', rule_abbrev)
137 | f.write(rule)
138 |
139 | with open(rule_test_file_name, 'w') as f:
140 | rule_test = rule_test_template.replace('{rule_id}', rule_id).replace('{rule_name}', rule_name).replace('{rule_abbrev}', rule_abbrev)
141 | f.write(rule_test)
142 |
--------------------------------------------------------------------------------
/pkg/engine/rule/mock.go:
--------------------------------------------------------------------------------
1 | package rule
2 |
3 | import (
4 | "github.com/kubescape/kapprofiler/pkg/collector"
5 | corev1 "k8s.io/api/core/v1"
6 | )
7 |
8 | type EngineAccessMock struct {
9 | }
10 |
11 | func (e *EngineAccessMock) GetPodSpec(podName, namespace, containerID string) (*corev1.PodSpec, error) {
12 | podSpec := corev1.PodSpec{}
13 | podSpec.Containers = []corev1.Container{
14 | {
15 | Name: "test",
16 | Image: "test",
17 | VolumeMounts: []corev1.VolumeMount{
18 | {
19 | Name: "test",
20 | MountPath: "/var/test1",
21 | },
22 | {
23 | Name: "test2",
24 | MountPath: "/var/test2",
25 | SubPath: "test2",
26 | },
27 | },
28 | },
29 | }
30 |
31 | return &podSpec, nil
32 | }
33 |
34 | func (e *EngineAccessMock) GetApiServerIpAddress() (string, error) {
35 | return "1.1.1.1", nil
36 | }
37 |
38 | type MockAppProfileAccess struct {
39 | Execs []collector.ExecCalls
40 | OpenCalls []collector.OpenCalls
41 | Syscalls []string
42 | Capabilities []collector.CapabilitiesCalls
43 | NetworkActivity collector.NetworkActivity
44 | Dns []collector.DnsCalls
45 | }
46 |
47 | func (m *MockAppProfileAccess) GetName() string {
48 | return "testProfile"
49 | }
50 |
51 | func (m *MockAppProfileAccess) GetNamespace() string {
52 | return "testNamespace"
53 | }
54 |
55 | func (m *MockAppProfileAccess) GetExecList() (*[]collector.ExecCalls, error) {
56 | return &m.Execs, nil
57 | }
58 |
59 | func (m *MockAppProfileAccess) GetOpenList() (*[]collector.OpenCalls, error) {
60 | return &m.OpenCalls, nil
61 | }
62 |
63 | func (m *MockAppProfileAccess) GetNetworkActivity() (*collector.NetworkActivity, error) {
64 | return &m.NetworkActivity, nil
65 | }
66 |
67 | func (m *MockAppProfileAccess) GetSystemCalls() ([]string, error) {
68 | return m.Syscalls, nil
69 | }
70 |
71 | func (m *MockAppProfileAccess) GetCapabilities() (*[]collector.CapabilitiesCalls, error) {
72 | return &m.Capabilities, nil
73 | }
74 |
75 | func (m *MockAppProfileAccess) GetDNS() (*[]collector.DnsCalls, error) {
76 | return &m.Dns, nil
77 | }
78 |
--------------------------------------------------------------------------------
/pkg/engine/rule/r0001_unexpected_process_launched_test.go:
--------------------------------------------------------------------------------
1 | package rule
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/kubescape/kapprofiler/pkg/collector"
7 | "github.com/kubescape/kapprofiler/pkg/tracing"
8 | )
9 |
10 | func TestR0001UnexpectedProcessLaunched(t *testing.T) {
11 | // Create a new rule
12 | r := CreateRuleR0001UnexpectedProcessLaunched()
13 | // Assert r is not nil
14 | if r == nil {
15 | t.Errorf("Expected r to not be nil")
16 | }
17 | // Create a exec event
18 | e := &tracing.ExecveEvent{
19 | GeneralEvent: tracing.GeneralEvent{
20 | ContainerID: "test",
21 | PodName: "test",
22 | Namespace: "test",
23 | Timestamp: 0,
24 | },
25 | PathName: "/test",
26 | Args: []string{"test"},
27 | }
28 |
29 | // Test with nil appProfileAccess
30 | ruleResult := r.ProcessEvent(tracing.ExecveEventType, e, nil, nil)
31 | if ruleResult == nil {
32 | t.Errorf("Expected ruleResult to not be nil must have an appProfile")
33 | }
34 |
35 | // Test with empty appProfileAccess
36 | ruleResult = r.ProcessEvent(tracing.ExecveEventType, e, &MockAppProfileAccess{}, nil)
37 | if ruleResult == nil {
38 | t.Errorf("Expected ruleResult since exec is not whitelisted")
39 | }
40 |
41 | if ruleResult.FixSuggestion() == "" {
42 | t.Errorf("Expected fix suggestion to not be empty")
43 | }
44 |
45 | // Test with whitelisted exec
46 | ruleResult = r.ProcessEvent(tracing.ExecveEventType, e, &MockAppProfileAccess{
47 | Execs: []collector.ExecCalls{
48 | {
49 | Path: "/test",
50 | Args: []string{"test"},
51 | },
52 | },
53 | }, nil)
54 | if ruleResult != nil {
55 | t.Errorf("Expected ruleResult to be nil since exec is whitelisted")
56 | }
57 | }
58 |
--------------------------------------------------------------------------------
/pkg/engine/rule/r0002_unexpected_file_access_test.go:
--------------------------------------------------------------------------------
1 | package rule
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/kubescape/kapprofiler/pkg/collector"
7 | "github.com/kubescape/kapprofiler/pkg/tracing"
8 | )
9 |
10 | func TestR0002UnexpectedFileAccess(t *testing.T) {
11 | // Create a new rule
12 | r := CreateRuleR0002UnexpectedFileAccess()
13 | // Assert r is not nil
14 | if r == nil {
15 | t.Errorf("Expected r to not be nil")
16 | }
17 |
18 | // Create a file access event
19 | e := &tracing.OpenEvent{
20 | GeneralEvent: tracing.GeneralEvent{
21 | ContainerID: "test",
22 | PodName: "test",
23 | Namespace: "test",
24 | ContainerName: "test",
25 | Timestamp: 0,
26 | },
27 | PathName: "/test",
28 | Flags: []string{"O_RDONLY"},
29 | }
30 |
31 | // Test with nil appProfileAccess
32 | ruleResult := r.ProcessEvent(tracing.OpenEventType, e, nil, &EngineAccessMock{})
33 | if ruleResult == nil {
34 | t.Errorf("Expected ruleResult to not be nil since no appProfile")
35 | }
36 |
37 | // Test with empty appProfileAccess
38 | ruleResult = r.ProcessEvent(tracing.OpenEventType, e, &MockAppProfileAccess{}, &EngineAccessMock{})
39 | if ruleResult == nil {
40 | t.Errorf("Expected ruleResult to not be nil since file is not whitelisted")
41 | }
42 |
43 | // Test with whitelisted file
44 | ruleResult = r.ProcessEvent(tracing.OpenEventType, e, &MockAppProfileAccess{
45 | OpenCalls: []collector.OpenCalls{
46 | {
47 | Path: "/test",
48 | Flags: []string{"O_RDONLY"},
49 | },
50 | },
51 | }, &EngineAccessMock{})
52 | if ruleResult != nil {
53 | t.Errorf("Expected ruleResult to be nil since file is whitelisted")
54 | }
55 |
56 | // Test with whitelisted file, but different flags
57 | e.Flags = []string{"O_WRONLY"}
58 | ruleResult = r.ProcessEvent(tracing.OpenEventType, e, &MockAppProfileAccess{
59 | OpenCalls: []collector.OpenCalls{
60 | {
61 | Path: "/test",
62 | Flags: []string{"O_RDONLY"},
63 | },
64 | },
65 | }, &EngineAccessMock{})
66 | if ruleResult == nil {
67 | t.Errorf("Expected ruleResult to not be nil since flag is not whitelisted")
68 | }
69 |
70 | // Test with mounted file
71 | e.PathName = "/var/test1"
72 | r.SetParameters(map[string]interface{}{"ignoreMounts": true})
73 | ruleResult = r.ProcessEvent(tracing.OpenEventType, e, &MockAppProfileAccess{
74 | OpenCalls: []collector.OpenCalls{
75 | {
76 | Path: "/test",
77 | Flags: []string{"O_RDONLY"},
78 | },
79 | },
80 | }, &EngineAccessMock{})
81 |
82 | if ruleResult != nil {
83 | t.Errorf("Expected ruleResult to be nil since file is mounted")
84 | }
85 |
86 | // Test with ignored prefix
87 | e.PathName = "/var/test1"
88 | ignorePrefixes := []interface{}{"/var"}
89 | r.SetParameters(map[string]interface{}{"ignoreMounts": false, "ignorePrefixes": ignorePrefixes})
90 | ruleResult = r.ProcessEvent(tracing.OpenEventType, e, &MockAppProfileAccess{
91 | OpenCalls: []collector.OpenCalls{
92 | {
93 | Path: "/test",
94 | Flags: []string{"O_RDONLY"},
95 | },
96 | },
97 | }, &EngineAccessMock{})
98 | if ruleResult != nil {
99 | t.Errorf("Expected ruleResult to be nil since file is ignored")
100 | }
101 |
102 | }
103 |
--------------------------------------------------------------------------------
/pkg/engine/rule/r0003_unexpected_system_call_test.go:
--------------------------------------------------------------------------------
1 | package rule
2 |
3 | import (
4 | "fmt"
5 | "testing"
6 |
7 | "github.com/kubescape/kapprofiler/pkg/tracing"
8 | )
9 |
10 | func TestR0003UnexpectedSystemCall(t *testing.T) {
11 | // Create a new rule
12 | r := CreateRuleR0003UnexpectedSystemCall()
13 | // Assert r is not nil
14 | if r == nil {
15 | t.Errorf("Expected r to not be nil")
16 | }
17 |
18 | // Create a syscall event
19 | e := &tracing.SyscallEvent{
20 | GeneralEvent: tracing.GeneralEvent{
21 | ContainerID: "test",
22 | PodName: "test",
23 | Namespace: "test",
24 | Timestamp: 0,
25 | },
26 | Syscalls: []string{"test"},
27 | }
28 |
29 | // Test with nil appProfileAccess
30 | ruleResult := r.ProcessEvent(tracing.SyscallEventType, e, nil, nil)
31 | if ruleResult == nil {
32 | t.Errorf("Expected ruleResult to be nil since no syscall event")
33 | }
34 |
35 | // Test with mock appProfileAccess
36 | ruleResult = r.ProcessEvent(tracing.SyscallEventType, e, &MockAppProfileAccess{}, nil)
37 | if ruleResult == nil {
38 | t.Errorf("Expected ruleResult to be not nil since no syscall event")
39 | }
40 |
41 | // Test with mock appProfileAccess and syscall
42 | ruleResult = r.ProcessEvent(tracing.SyscallEventType, e, &MockAppProfileAccess{
43 | Syscalls: []string{"test"},
44 | }, nil)
45 | if ruleResult != nil {
46 | fmt.Printf("ruleResult: %v\n", ruleResult)
47 | t.Errorf("Expected ruleResult to be nil since syscall is whitelisted")
48 | }
49 |
50 | }
51 |
--------------------------------------------------------------------------------
/pkg/engine/rule/r0004_unexpected_capability_used_test.go:
--------------------------------------------------------------------------------
1 | package rule
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/kubescape/kapprofiler/pkg/collector"
7 | "github.com/kubescape/kapprofiler/pkg/tracing"
8 | )
9 |
10 | func TestR0004UnexpectedCapabilityUsed(t *testing.T) {
11 | // Create a new rule
12 | r := CreateRuleR0004UnexpectedCapabilityUsed()
13 | // Assert r is not nil
14 | if r == nil {
15 | t.Errorf("Expected r to not be nil")
16 | }
17 |
18 | // Create a capability event
19 | e := &tracing.CapabilitiesEvent{
20 | GeneralEvent: tracing.GeneralEvent{
21 | ContainerID: "test",
22 | PodName: "test",
23 | Namespace: "test",
24 | Timestamp: 0,
25 | },
26 | CapabilityName: "test_cap",
27 | Syscall: "test_call",
28 | }
29 |
30 | // Test with nil appProfileAccess
31 | ruleResult := r.ProcessEvent(tracing.CapabilitiesEventType, e, nil, nil)
32 | if ruleResult == nil {
33 | t.Errorf("Expected ruleResult to be nil since no capability event")
34 | }
35 |
36 | // Test with mock appProfileAccess
37 | ruleResult = r.ProcessEvent(tracing.CapabilitiesEventType, e, &MockAppProfileAccess{
38 | Capabilities: []collector.CapabilitiesCalls{
39 | {
40 | Capabilities: []string{"test_cap"},
41 | Syscall: "test_call",
42 | },
43 | },
44 | }, nil)
45 | if ruleResult != nil {
46 | t.Errorf("Expected ruleResult to be nil since capability is in the profile")
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/pkg/engine/rule/r0005_unexpected_domain_request_test.go:
--------------------------------------------------------------------------------
1 | package rule
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/kubescape/kapprofiler/pkg/collector"
7 | "github.com/kubescape/kapprofiler/pkg/tracing"
8 | )
9 |
10 | func TestR0005UnexpectedDomainRequest(t *testing.T) {
11 | // Create a new rule
12 | r := CreateRuleR0005UnexpectedDomainRequest()
13 | // Assert r is not nil
14 | if r == nil {
15 | t.Errorf("Expected r to not be nil")
16 | }
17 |
18 | // Create a domain request event
19 | e := &tracing.DnsEvent{
20 | GeneralEvent: tracing.GeneralEvent{
21 | ContainerID: "test",
22 | PodName: "test",
23 | Namespace: "test",
24 | Timestamp: 0,
25 | },
26 | DnsName: "test.com",
27 | Addresses: []string{
28 | "test",
29 | },
30 | }
31 |
32 | // Test with nil appProfileAccess
33 | ruleResult := r.ProcessEvent(tracing.DnsEventType, e, nil, nil)
34 | if ruleResult == nil {
35 | t.Errorf("Expected ruleResult to not be nil since no appProfile")
36 | }
37 |
38 | // Test with empty appProfileAccess
39 | ruleResult = r.ProcessEvent(tracing.DnsEventType, e, &MockAppProfileAccess{}, nil)
40 | if ruleResult == nil {
41 | t.Errorf("Expected ruleResult to not be nil since domain is not whitelisted")
42 | }
43 |
44 | // Test with whitelisted domain
45 | ruleResult = r.ProcessEvent(tracing.DnsEventType, e, &MockAppProfileAccess{
46 | Dns: []collector.DnsCalls{
47 | {
48 | DnsName: "test.com",
49 | },
50 | },
51 | }, nil)
52 | if ruleResult != nil {
53 | t.Errorf("Expected ruleResult to be nil since domain is whitelisted")
54 | }
55 |
56 | }
57 |
--------------------------------------------------------------------------------
/pkg/engine/rule/r0006_unexpected_service_account_token_access_test.go:
--------------------------------------------------------------------------------
1 | package rule
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/kubescape/kapprofiler/pkg/collector"
7 | "github.com/kubescape/kapprofiler/pkg/tracing"
8 | )
9 |
10 | func TestR0006UnexpectedServiceAccountTokenMount(t *testing.T) {
11 | // Create a new rule
12 | r := CreateRuleR0006UnexpectedServiceAccountTokenAccess()
13 | // Assert r is not nil
14 | if r == nil {
15 | t.Errorf("Expected r to not be nil")
16 | }
17 |
18 | // Create a file access event
19 | e := &tracing.OpenEvent{
20 | GeneralEvent: tracing.GeneralEvent{
21 | ContainerID: "test",
22 | PodName: "test",
23 | Namespace: "test",
24 | ContainerName: "test",
25 | Timestamp: 0,
26 | },
27 | PathName: "/run/secrets/kubernetes.io/serviceaccount",
28 | Flags: []string{"O_RDONLY"},
29 | }
30 |
31 | // Test with nil appProfileAccess
32 | ruleResult := r.ProcessEvent(tracing.OpenEventType, e, nil, &EngineAccessMock{})
33 | if ruleResult == nil {
34 | t.Errorf("Expected ruleResult to not be nil since no appProfile")
35 | return
36 | }
37 |
38 | // Test with empty appProfileAccess
39 | ruleResult = r.ProcessEvent(tracing.OpenEventType, e, &MockAppProfileAccess{}, &EngineAccessMock{})
40 | if ruleResult == nil {
41 | t.Errorf("Expected ruleResult to not be nil since file is not whitelisted")
42 | return
43 | }
44 |
45 | // Test with whitelisted file
46 | e.PathName = "/run/secrets/kubernetes.io/serviceaccount/asdasd"
47 | ruleResult = r.ProcessEvent(tracing.OpenEventType, e, &MockAppProfileAccess{
48 | OpenCalls: []collector.OpenCalls{
49 | {
50 | Path: "/var/run/secrets/kubernetes.io/serviceaccount",
51 | Flags: []string{"O_RDONLY"},
52 | },
53 | },
54 | }, &EngineAccessMock{})
55 | if ruleResult != nil {
56 | t.Errorf("Expected ruleResult to be nil since file is whitelisted")
57 | }
58 | }
59 |
--------------------------------------------------------------------------------
/pkg/engine/rule/r0007_kubernetes_client_executed_test.go:
--------------------------------------------------------------------------------
1 | package rule
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/kubescape/kapprofiler/pkg/collector"
7 | "github.com/kubescape/kapprofiler/pkg/tracing"
8 | )
9 |
10 | func TestR0007KubernetesClientExecuted(t *testing.T) {
11 | // Create a new rule
12 | r := CreateRuleR0007KubernetesClientExecuted()
13 | // Assert r is not nil
14 | if r == nil {
15 | t.Errorf("Expected r to not be nil")
16 | }
17 | // Create a exec event
18 | e := &tracing.ExecveEvent{
19 | GeneralEvent: tracing.GeneralEvent{
20 | ContainerID: "test",
21 | PodName: "test",
22 | Namespace: "test",
23 | Timestamp: 0,
24 | },
25 | PathName: "/test",
26 | Args: []string{"test"},
27 | }
28 |
29 | appProfileAccess := &MockAppProfileAccess{
30 | Execs: []collector.ExecCalls{
31 | {
32 | Path: "/test",
33 | Args: []string{"test"},
34 | },
35 | },
36 | NetworkActivity: collector.NetworkActivity{
37 | Outgoing: []collector.NetworkCalls{
38 | {
39 | Protocol: "TCP",
40 | Port: 443,
41 | DstEndpoint: "1.1.1.1",
42 | },
43 | },
44 | },
45 | }
46 |
47 | ruleResult := r.ProcessEvent(tracing.ExecveEventType, e, appProfileAccess, &EngineAccessMock{})
48 | if ruleResult != nil {
49 | t.Errorf("Expected ruleResult to be nil since test is not a k8s client")
50 | return
51 | }
52 |
53 | event2 := &tracing.ExecveEvent{
54 | GeneralEvent: tracing.GeneralEvent{
55 | ContainerID: "test",
56 | PodName: "test",
57 | Namespace: "test",
58 | Timestamp: 0,
59 | },
60 | PathName: "kubectl",
61 | Args: []string{"test"},
62 | }
63 |
64 | ruleResult = r.ProcessEvent(tracing.ExecveEventType, event2, appProfileAccess, &EngineAccessMock{})
65 | if ruleResult == nil {
66 | t.Errorf("Expected ruleResult since exec is a k8s client")
67 | return
68 | }
69 |
70 | event3 := &tracing.ExecveEvent{
71 | GeneralEvent: tracing.GeneralEvent{
72 | ContainerID: "test",
73 | PodName: "test",
74 | Namespace: "test",
75 | Timestamp: 0,
76 | },
77 | PathName: "/a/b/c/kubectl",
78 | Args: []string{"test"},
79 | }
80 |
81 | ruleResult = r.ProcessEvent(tracing.ExecveEventType, event3, appProfileAccess, &EngineAccessMock{})
82 | if ruleResult == nil {
83 | t.Errorf("Expected ruleResult since exec is a k8s client")
84 | return
85 | }
86 |
87 | event4 := &tracing.NetworkEvent{
88 | GeneralEvent: tracing.GeneralEvent{
89 | ContainerID: "test",
90 | PodName: "test",
91 | Namespace: "test",
92 | Timestamp: 0,
93 | },
94 | PacketType: "OUTGOING",
95 | Protocol: "TCP",
96 | Port: 443,
97 | DstEndpoint: "1.1.1.1",
98 | }
99 |
100 | ruleResult = r.ProcessEvent(tracing.NetworkEventType, event4, appProfileAccess, &EngineAccessMock{})
101 | if ruleResult != nil {
102 | t.Errorf("Expected ruleResult since network event dst is kube api server and whitelisted")
103 | return
104 | }
105 |
106 | // Test with non whitelisted network event
107 | appProfileAccess = &MockAppProfileAccess{
108 | Execs: []collector.ExecCalls{
109 | {
110 | Path: "/test",
111 | Args: []string{"test"},
112 | },
113 | },
114 | NetworkActivity: collector.NetworkActivity{
115 | Outgoing: []collector.NetworkCalls{
116 | {
117 | Protocol: "TCP",
118 | Port: 443,
119 | DstEndpoint: "1.1.1.2",
120 | },
121 | },
122 | },
123 | }
124 |
125 | ruleResult = r.ProcessEvent(tracing.NetworkEventType, event4, appProfileAccess, &EngineAccessMock{})
126 | if ruleResult == nil {
127 | t.Errorf("Expected ruleResult to be non nil since network event dst is not kube api server in the profile")
128 | return
129 | }
130 |
131 | // Test with whitelisted exec event
132 | appProfileAccess.Execs = []collector.ExecCalls{
133 | {
134 | Path: "kubectl",
135 | Args: []string{"test"},
136 | },
137 | }
138 |
139 | ruleResult = r.ProcessEvent(tracing.ExecveEventType, event2, appProfileAccess, &EngineAccessMock{})
140 | if ruleResult != nil {
141 | t.Errorf("Expected ruleResult to be nil since exec is whitelisted")
142 | return
143 | }
144 |
145 | }
146 |
--------------------------------------------------------------------------------
/pkg/engine/rule/r1000_exec_from_malicious_source.go:
--------------------------------------------------------------------------------
1 | package rule
2 |
3 | import (
4 | "fmt"
5 | "strings"
6 |
7 | "github.com/armosec/kubecop/pkg/approfilecache"
8 | "github.com/kubescape/kapprofiler/pkg/tracing"
9 | )
10 |
11 | const (
12 | R1000ID = "R1000"
13 | R1000ExecFromMaliciousSourceRuleName = "Exec from malicious source"
14 | )
15 |
16 | var R1000ExecFromMaliciousSourceDescriptor = RuleDesciptor{
17 | ID: R1000ID,
18 | Name: R1000ExecFromMaliciousSourceRuleName,
19 | Description: "Detecting exec calls that are from malicious source like: /dev/shm, /run, /var/run, /proc/self",
20 | Priority: RulePriorityCritical,
21 | Tags: []string{"exec", "signature"},
22 | Requirements: RuleRequirements{
23 | EventTypes: []tracing.EventType{tracing.ExecveEventType},
24 | NeedApplicationProfile: false,
25 | },
26 | RuleCreationFunc: func() Rule {
27 | return CreateRuleR1000ExecFromMaliciousSource()
28 | },
29 | }
30 |
31 | type R1000ExecFromMaliciousSource struct {
32 | BaseRule
33 | }
34 |
35 | type R1000ExecFromMaliciousSourceFailure struct {
36 | RuleName string
37 | RulePriority int
38 | FixSuggestionMsg string
39 | Err string
40 | FailureEvent *tracing.ExecveEvent
41 | }
42 |
43 | func (rule *R1000ExecFromMaliciousSource) Name() string {
44 | return R1000ExecFromMaliciousSourceRuleName
45 | }
46 |
47 | func CreateRuleR1000ExecFromMaliciousSource() *R1000ExecFromMaliciousSource {
48 | return &R1000ExecFromMaliciousSource{}
49 | }
50 |
51 | func (rule *R1000ExecFromMaliciousSource) DeleteRule() {
52 | }
53 |
54 | func (rule *R1000ExecFromMaliciousSource) ProcessEvent(eventType tracing.EventType, event interface{}, appProfileAccess approfilecache.SingleApplicationProfileAccess, engineAccess EngineAccess) RuleFailure {
55 | if eventType != tracing.ExecveEventType {
56 | return nil
57 | }
58 |
59 | execEvent, ok := event.(*tracing.ExecveEvent)
60 | if !ok {
61 | return nil
62 | }
63 |
64 | var maliciousExecPathPrefixes = []string{
65 | "/dev/shm",
66 | "/run",
67 | "/var/run",
68 | "/proc/self",
69 | }
70 |
71 | // /proc/self/fd/ is classic way to hide malicious execs
72 | // (see ezuri packer for example)
73 | // Here it would be even more interesting to check if the fd
74 | // is memory mapped file
75 |
76 | // The assumption here is that the event path is absolute!
77 |
78 | for _, maliciousExecPathPrefix := range maliciousExecPathPrefixes {
79 | if strings.HasPrefix(execEvent.PathName, maliciousExecPathPrefix) {
80 | return &R1000ExecFromMaliciousSourceFailure{
81 | RuleName: rule.Name(),
82 | Err: fmt.Sprintf("exec call \"%s\" is from a malicious source \"%s\"", execEvent.PathName, maliciousExecPathPrefix),
83 | FixSuggestionMsg: "If this is a legitimate action, please add consider removing this workload from the binding of this rule.",
84 | FailureEvent: execEvent,
85 | RulePriority: R1000ExecFromMaliciousSourceDescriptor.Priority,
86 | }
87 | }
88 | }
89 |
90 | return nil
91 | }
92 |
93 | func (rule *R1000ExecFromMaliciousSource) Requirements() RuleRequirements {
94 | return RuleRequirements{
95 | EventTypes: []tracing.EventType{tracing.ExecveEventType},
96 | NeedApplicationProfile: false,
97 | }
98 | }
99 |
100 | func (rule *R1000ExecFromMaliciousSourceFailure) Name() string {
101 | return rule.RuleName
102 | }
103 |
104 | func (rule *R1000ExecFromMaliciousSourceFailure) Error() string {
105 | return rule.Err
106 | }
107 |
108 | func (rule *R1000ExecFromMaliciousSourceFailure) Event() tracing.GeneralEvent {
109 | return rule.FailureEvent.GeneralEvent
110 | }
111 |
112 | func (rule *R1000ExecFromMaliciousSourceFailure) Priority() int {
113 | return rule.RulePriority
114 | }
115 |
116 | func (rule *R1000ExecFromMaliciousSourceFailure) FixSuggestion() string {
117 | return rule.FixSuggestionMsg
118 | }
119 |
--------------------------------------------------------------------------------
/pkg/engine/rule/r1000_exec_from_malicious_source_test.go:
--------------------------------------------------------------------------------
1 | package rule
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/kubescape/kapprofiler/pkg/tracing"
7 | )
8 |
9 | func TestR1000ExecFromMaliciousSource(t *testing.T) {
10 | // Create a new rule
11 | r := CreateRuleR1000ExecFromMaliciousSource()
12 | // Assert r is not nil
13 | if r == nil {
14 | t.Errorf("Expected r to not be nil")
15 | }
16 | // Create a exec event
17 | e := &tracing.ExecveEvent{
18 | GeneralEvent: tracing.GeneralEvent{
19 | ContainerID: "test",
20 | PodName: "test",
21 | Namespace: "test",
22 | Timestamp: 0,
23 | },
24 | PathName: "/test",
25 | Args: []string{"test"},
26 | }
27 |
28 | ruleResult := r.ProcessEvent(tracing.ExecveEventType, e, nil, nil)
29 | if ruleResult != nil {
30 | t.Errorf("Expected ruleResult to be nil since test is not a malicious exec")
31 | }
32 |
33 | e.PathName = "/proc/self/fd/3"
34 |
35 | ruleResult = r.ProcessEvent(tracing.ExecveEventType, e, nil, nil)
36 | if ruleResult == nil {
37 | t.Errorf("Expected ruleResult since exec is is malicious")
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/pkg/engine/rule/r1001_exec_binary_not_in_base_image_test.go:
--------------------------------------------------------------------------------
1 | package rule
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/kubescape/kapprofiler/pkg/tracing"
7 | )
8 |
9 | func TestR1001ExecBinaryNotInBaseImage(t *testing.T) {
10 | // Create a new rule
11 | r := CreateRuleR1001ExecBinaryNotInBaseImage()
12 | // Assert r is not nil
13 | if r == nil {
14 | t.Errorf("Expected r to not be nil")
15 | }
16 | // Create a exec event
17 | e := &tracing.ExecveEvent{
18 | GeneralEvent: tracing.GeneralEvent{
19 | ContainerID: "test",
20 | PodName: "test",
21 | Namespace: "test",
22 | Timestamp: 0,
23 | },
24 | PathName: "/usr/bin/test",
25 | Args: []string{"test"},
26 | }
27 |
28 | // Test with non existing binary
29 | ruleResult := r.ProcessEvent(tracing.ExecveEventType, e, nil, nil)
30 | if ruleResult != nil {
31 | t.Errorf("Expected ruleResult to be nil since exec is not in the upper layer")
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/pkg/engine/rule/r1002_load_kernel_module.go:
--------------------------------------------------------------------------------
1 | package rule
2 |
3 | import (
4 | "slices"
5 |
6 | "github.com/armosec/kubecop/pkg/approfilecache"
7 | "github.com/kubescape/kapprofiler/pkg/tracing"
8 | )
9 |
10 | const (
11 | R1002ID = "R1002"
12 | R1002LoadKernelModuleRuleName = "Kernel Module Load"
13 | )
14 |
15 | var R1002LoadKernelModuleRuleDescriptor = RuleDesciptor{
16 | ID: R1002ID,
17 | Name: R1002LoadKernelModuleRuleName,
18 | Description: "Detecting Kernel Module Load.",
19 | Tags: []string{"syscall", "kernel", "module", "load"},
20 | Priority: RulePriorityCritical,
21 | Requirements: RuleRequirements{
22 | EventTypes: []tracing.EventType{
23 | tracing.SyscallEventType,
24 | },
25 | NeedApplicationProfile: false,
26 | },
27 | RuleCreationFunc: func() Rule {
28 | return CreateRuleR1002LoadKernelModule()
29 | },
30 | }
31 |
32 | type R1002LoadKernelModule struct {
33 | BaseRule
34 | }
35 |
36 | type R1002LoadKernelModuleFailure struct {
37 | RuleName string
38 | RulePriority int
39 | Err string
40 | FixSuggestionMsg string
41 | FailureEvent *tracing.SyscallEvent
42 | }
43 |
44 | func (rule *R1002LoadKernelModule) Name() string {
45 | return R1002LoadKernelModuleRuleName
46 | }
47 |
48 | func CreateRuleR1002LoadKernelModule() *R1002LoadKernelModule {
49 | return &R1002LoadKernelModule{}
50 | }
51 |
52 | func (rule *R1002LoadKernelModule) DeleteRule() {
53 | }
54 |
55 | func (rule *R1002LoadKernelModule) ProcessEvent(eventType tracing.EventType, event interface{}, appProfileAccess approfilecache.SingleApplicationProfileAccess, engineAccess EngineAccess) RuleFailure {
56 | if eventType != tracing.SyscallEventType {
57 | return nil
58 | }
59 |
60 | syscallEvent, ok := event.(*tracing.SyscallEvent)
61 | if !ok {
62 | return nil
63 | }
64 | if slices.Contains(syscallEvent.Syscalls, "init_module") {
65 | return &R1002LoadKernelModuleFailure{
66 | RuleName: rule.Name(),
67 | Err: "Kernel Module Load",
68 | FailureEvent: syscallEvent,
69 | FixSuggestionMsg: "If this is a legitimate action, please add consider removing this workload from the binding of this rule",
70 | RulePriority: R1002LoadKernelModuleRuleDescriptor.Priority,
71 | }
72 | }
73 |
74 | return nil
75 | }
76 |
77 | func (rule *R1002LoadKernelModule) Requirements() RuleRequirements {
78 | return RuleRequirements{
79 | EventTypes: []tracing.EventType{tracing.SyscallEventType},
80 | NeedApplicationProfile: false,
81 | }
82 | }
83 |
84 | func (rule *R1002LoadKernelModuleFailure) Name() string {
85 | return rule.RuleName
86 | }
87 |
88 | func (rule *R1002LoadKernelModuleFailure) Error() string {
89 | return rule.Err
90 | }
91 |
92 | func (rule *R1002LoadKernelModuleFailure) Event() tracing.GeneralEvent {
93 | return rule.FailureEvent.GeneralEvent
94 | }
95 |
96 | func (rule *R1002LoadKernelModuleFailure) Priority() int {
97 | return rule.RulePriority
98 | }
99 |
100 | func (rule *R1002LoadKernelModuleFailure) FixSuggestion() string {
101 | return rule.FixSuggestionMsg
102 | }
103 |
--------------------------------------------------------------------------------
/pkg/engine/rule/r1002_load_kernel_module_test.go:
--------------------------------------------------------------------------------
1 | package rule
2 |
3 | import (
4 | "fmt"
5 | "testing"
6 |
7 | "github.com/kubescape/kapprofiler/pkg/tracing"
8 | )
9 |
10 | func TestR1002LoadKernelModule(t *testing.T) {
11 | // Create a new rule
12 | r := CreateRuleR1002LoadKernelModule()
13 | // Assert r is not nil
14 | if r == nil {
15 | t.Errorf("Expected r to not be nil")
16 | }
17 |
18 | // Create a syscall event
19 | e := &tracing.SyscallEvent{
20 | GeneralEvent: tracing.GeneralEvent{
21 | ContainerID: "test",
22 | PodName: "test",
23 | Namespace: "test",
24 | Timestamp: 0,
25 | },
26 | Syscalls: []string{"test"},
27 | }
28 |
29 | ruleResult := r.ProcessEvent(tracing.SyscallEventType, e, nil, nil)
30 | if ruleResult != nil {
31 | fmt.Printf("ruleResult: %v\n", ruleResult)
32 | t.Errorf("Expected ruleResult to be nil since syscall is not init_module")
33 | }
34 |
35 | // Create a syscall event
36 | e = &tracing.SyscallEvent{
37 | GeneralEvent: tracing.GeneralEvent{
38 | ContainerID: "test",
39 | PodName: "test",
40 | Namespace: "test",
41 | Timestamp: 0,
42 | },
43 | Syscalls: []string{"init_module"},
44 | }
45 |
46 | ruleResult = r.ProcessEvent(tracing.SyscallEventType, e, nil, nil)
47 | if ruleResult == nil {
48 | fmt.Printf("ruleResult: %v\n", ruleResult)
49 | t.Errorf("Expected ruleResult to be Failure because of init_module is not allowed")
50 | }
51 |
52 | }
53 |
--------------------------------------------------------------------------------
/pkg/engine/rule/r1003_malicious_ssh_connection_test.go:
--------------------------------------------------------------------------------
1 | package rule
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/kubescape/kapprofiler/pkg/tracing"
7 | )
8 |
9 | func TestR1003DisallowedSSHConnectionPort_ProcessEvent(t *testing.T) {
10 | rule := CreateRuleR1003MaliciousSSHConnection()
11 |
12 | // Test case 1: SSH connection to disallowed port
13 | networkEvent := &tracing.NetworkEvent{
14 | GeneralEvent: tracing.GeneralEvent{
15 | ProcessDetails: tracing.ProcessDetails{
16 | Pid: 1,
17 | },
18 | ContainerID: "test",
19 | PodName: "test",
20 | Namespace: "test",
21 | Timestamp: 2,
22 | },
23 | PacketType: "OUTGOING",
24 | Protocol: "TCP",
25 | Port: 2222,
26 | DstEndpoint: "1.1.1.1",
27 | }
28 |
29 | openEvent := &tracing.OpenEvent{
30 | GeneralEvent: tracing.GeneralEvent{
31 | ProcessDetails: tracing.ProcessDetails{
32 | Pid: 1,
33 | },
34 | ContainerID: "test",
35 | PodName: "test",
36 | Namespace: "test",
37 | Timestamp: 1,
38 | },
39 | PathName: "/etc/ssh/sshd_config",
40 | }
41 | rule.ProcessEvent(tracing.OpenEventType, openEvent, nil, &EngineAccessMock{})
42 | failure := rule.ProcessEvent(tracing.NetworkEventType, networkEvent, nil, &EngineAccessMock{})
43 | if failure == nil {
44 | t.Errorf("Expected failure, but got nil")
45 | }
46 |
47 | // Test case 2: SSH connection to allowed port
48 | networkEvent.Port = 22
49 | failure = rule.ProcessEvent(tracing.NetworkEventType, networkEvent, nil, &EngineAccessMock{})
50 | if failure != nil {
51 | t.Errorf("Expected failure to be nil, but got %v", failure)
52 | }
53 |
54 | // Test case 3: SSH connection to disallowed port, but not from SSH initiator
55 | networkEvent.Port = 2222
56 | networkEvent.Pid = 2
57 | failure = rule.ProcessEvent(tracing.NetworkEventType, networkEvent, nil, &EngineAccessMock{})
58 | if failure != nil {
59 | t.Errorf("Expected failure to be nil, but got %v", failure)
60 | }
61 |
62 | // Test case 4: SSH connection to disallowed port, but not from SSH initiator
63 | networkEvent.Port = 2222
64 | networkEvent.Pid = 1
65 | networkEvent.Timestamp = 3
66 | failure = rule.ProcessEvent(tracing.NetworkEventType, networkEvent, nil, &EngineAccessMock{})
67 | if failure != nil {
68 | t.Errorf("Expected failure to be nil, but got %v", failure)
69 | }
70 |
71 | // Test case 5: Time diff is greater than MaxTimeDiffInSeconds
72 | networkEvent.Port = 2222
73 | networkEvent.Pid = 1
74 | networkEvent.Timestamp = 5
75 | failure = rule.ProcessEvent(tracing.NetworkEventType, networkEvent, nil, &EngineAccessMock{})
76 | if failure != nil {
77 | t.Errorf("Expected failure to be nil, but got %v", failure)
78 | }
79 | }
80 |
--------------------------------------------------------------------------------
/pkg/engine/rule/r1004_exec_from_mount_test.go:
--------------------------------------------------------------------------------
1 | package rule
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/kubescape/kapprofiler/pkg/tracing"
7 | )
8 |
9 | func TestR1004ExecFromMount(t *testing.T) {
10 | // Create a new rule
11 | r := CreateRuleR1004ExecFromMount()
12 | // Assert r is not nil
13 | if r == nil {
14 | t.Errorf("Expected r to not be nil")
15 | }
16 | // Create a exec event
17 | e := &tracing.ExecveEvent{
18 | GeneralEvent: tracing.GeneralEvent{
19 | ContainerID: "test",
20 | ContainerName: "test",
21 | PodName: "test",
22 | Namespace: "test",
23 | Timestamp: 0,
24 | },
25 | PathName: "/test",
26 | Args: []string{"test"},
27 | }
28 |
29 | // Test case where path is not mounted
30 | ruleResult := r.ProcessEvent(tracing.ExecveEventType, e, nil, &EngineAccessMock{})
31 | if ruleResult != nil {
32 | t.Errorf("Expected ruleResult to be nil since test is not from a mounted path")
33 | }
34 |
35 | // Test case where path is mounted
36 |
37 | e.PathName = "/var/test1/test"
38 |
39 | ruleResult = r.ProcessEvent(tracing.ExecveEventType, e, nil, &EngineAccessMock{})
40 | if ruleResult == nil {
41 | t.Errorf("Expected ruleResult since exec is from a mounted path")
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/pkg/engine/rule/r1006_unshare_system_call.go:
--------------------------------------------------------------------------------
1 | package rule
2 |
3 | import (
4 | "slices"
5 |
6 | "github.com/armosec/kubecop/pkg/approfilecache"
7 | "github.com/kubescape/kapprofiler/pkg/tracing"
8 | )
9 |
10 | const (
11 | R1006ID = "R1006"
12 | R1006UnshareSyscallRuleName = "Unshare System Call usage"
13 | )
14 |
15 | var R1006UnshareSyscallRuleDescriptor = RuleDesciptor{
16 | ID: R1006ID,
17 | Name: R1006UnshareSyscallRuleName,
18 | Description: "Detecting Unshare System Call usage, which can be used to escape container.",
19 | Tags: []string{"syscall", "escape", "unshare"},
20 | Priority: RulePriorityHigh,
21 | Requirements: RuleRequirements{
22 | EventTypes: []tracing.EventType{
23 | tracing.SyscallEventType,
24 | },
25 | NeedApplicationProfile: false,
26 | },
27 | RuleCreationFunc: func() Rule {
28 | return CreateRuleR1006UnshareSyscall()
29 | },
30 | }
31 |
32 | type R1006UnshareSyscall struct {
33 | BaseRule
34 | aleadyNotified bool
35 | }
36 |
37 | type R1006UnshareSyscallFailure struct {
38 | RuleName string
39 | RulePriority int
40 | Err string
41 | FixSuggestionMsg string
42 | FailureEvent *tracing.SyscallEvent
43 | }
44 |
45 | func (rule *R1006UnshareSyscall) Name() string {
46 | return R1006UnshareSyscallRuleName
47 | }
48 |
49 | func CreateRuleR1006UnshareSyscall() *R1006UnshareSyscall {
50 | return &R1006UnshareSyscall{aleadyNotified: false}
51 | }
52 |
53 | func (rule *R1006UnshareSyscall) DeleteRule() {
54 | }
55 |
56 | func (rule *R1006UnshareSyscall) ProcessEvent(eventType tracing.EventType, event interface{}, appProfileAccess approfilecache.SingleApplicationProfileAccess, engineAccess EngineAccess) RuleFailure {
57 | if rule.aleadyNotified {
58 | return nil
59 | }
60 |
61 | if eventType != tracing.SyscallEventType {
62 | return nil
63 | }
64 |
65 | syscallEvent, ok := event.(*tracing.SyscallEvent)
66 | if !ok {
67 | return nil
68 | }
69 | if slices.Contains(syscallEvent.Syscalls, "unshare") {
70 | rule.aleadyNotified = true
71 | return &R1006UnshareSyscallFailure{
72 | RuleName: rule.Name(),
73 | Err: "Unshare System Call usage",
74 | FailureEvent: syscallEvent,
75 | FixSuggestionMsg: "If this is a legitimate action, please add consider removing this workload from the binding of this rule",
76 | RulePriority: R1006UnshareSyscallRuleDescriptor.Priority,
77 | }
78 | }
79 |
80 | return nil
81 | }
82 |
83 | func (rule *R1006UnshareSyscall) Requirements() RuleRequirements {
84 | return RuleRequirements{
85 | EventTypes: []tracing.EventType{tracing.SyscallEventType},
86 | NeedApplicationProfile: false,
87 | }
88 | }
89 |
90 | func (rule *R1006UnshareSyscallFailure) Name() string {
91 | return rule.RuleName
92 | }
93 |
94 | func (rule *R1006UnshareSyscallFailure) Error() string {
95 | return rule.Err
96 | }
97 |
98 | func (rule *R1006UnshareSyscallFailure) Event() tracing.GeneralEvent {
99 | return rule.FailureEvent.GeneralEvent
100 | }
101 |
102 | func (rule *R1006UnshareSyscallFailure) Priority() int {
103 | return rule.RulePriority
104 | }
105 |
106 | func (rule *R1006UnshareSyscallFailure) FixSuggestion() string {
107 | return rule.FixSuggestionMsg
108 | }
109 |
--------------------------------------------------------------------------------
/pkg/engine/rule/r1006_unshare_system_call_test.go:
--------------------------------------------------------------------------------
1 | package rule
2 |
3 | import (
4 | "fmt"
5 | "testing"
6 |
7 | "github.com/kubescape/kapprofiler/pkg/tracing"
8 | )
9 |
10 | func TestR1006UnshareSyscall(t *testing.T) {
11 | // Create a new rule
12 | r := CreateRuleR1006UnshareSyscall()
13 | // Assert r is not nil
14 | if r == nil {
15 | t.Errorf("Expected r to not be nil")
16 | }
17 |
18 | // Create a syscall event
19 | e := &tracing.SyscallEvent{
20 | GeneralEvent: tracing.GeneralEvent{
21 | ContainerID: "test",
22 | PodName: "test",
23 | Namespace: "test",
24 | Timestamp: 0,
25 | },
26 | Syscalls: []string{"test"},
27 | }
28 |
29 | ruleResult := r.ProcessEvent(tracing.SyscallEventType, e, nil, nil)
30 | if ruleResult != nil {
31 | fmt.Printf("ruleResult: %v\n", ruleResult)
32 | t.Errorf("Expected ruleResult to be nil since syscall is not unshare")
33 | return
34 | }
35 |
36 | // Create a syscall event
37 | e.Syscalls = append(e.Syscalls, "unshare")
38 |
39 | ruleResult = r.ProcessEvent(tracing.SyscallEventType, e, nil, nil)
40 | if ruleResult == nil {
41 | fmt.Printf("ruleResult: %v\n", ruleResult)
42 | t.Errorf("Expected ruleResult to be Failure because of unshare is used")
43 | return
44 | }
45 |
46 | }
47 |
--------------------------------------------------------------------------------
/pkg/engine/rule/r1007_crypto_miners_test.go:
--------------------------------------------------------------------------------
1 | package rule
2 |
3 | import (
4 | "fmt"
5 | "testing"
6 |
7 | "github.com/kubescape/kapprofiler/pkg/tracing"
8 | )
9 |
10 | func TestR1007CryptoMiners(t *testing.T) {
11 | // Create a new rule
12 | r := CreateRuleR1007CryptoMiners()
13 | // Assert r is not nil
14 | if r == nil {
15 | t.Errorf("Expected r to not be nil")
16 | }
17 |
18 | // Create network event
19 | e := &tracing.NetworkEvent{
20 | GeneralEvent: tracing.GeneralEvent{
21 | ContainerID: "test",
22 | PodName: "test",
23 | Namespace: "test",
24 | Timestamp: 0,
25 | },
26 | PacketType: "OUTGOING",
27 | Protocol: "TCP",
28 | Port: 2222,
29 | DstEndpoint: "1.1.1.1",
30 | }
31 |
32 | ruleResult := r.ProcessEvent(tracing.NetworkEventType, e, nil, nil)
33 | if ruleResult != nil {
34 | fmt.Printf("ruleResult: %v\n", ruleResult)
35 | t.Errorf("Expected ruleResult to be nil since dst port is not in the commonly used crypto miners ports")
36 | return
37 | }
38 |
39 | // Create network event with dst port 3333
40 | e.Port = 3333
41 |
42 | ruleResult = r.ProcessEvent(tracing.NetworkEventType, e, nil, nil)
43 | if ruleResult == nil {
44 | fmt.Printf("ruleResult: %v\n", ruleResult)
45 | t.Errorf("Expected ruleResult to be Failure because of dst port is in the commonly used crypto miners ports")
46 | return
47 | }
48 |
49 | // Create dns event
50 | e2 := &tracing.DnsEvent{
51 | GeneralEvent: tracing.GeneralEvent{
52 | ContainerID: "test",
53 | PodName: "test",
54 | Namespace: "test",
55 | Timestamp: 0,
56 | },
57 | DnsName: "zergpool.com",
58 | Addresses: []string{},
59 | }
60 |
61 | ruleResult = r.ProcessEvent(tracing.DnsEventType, e2, nil, nil)
62 | if ruleResult == nil {
63 | fmt.Printf("ruleResult: %v\n", ruleResult)
64 | t.Errorf("Expected ruleResult to be Failure because of dns name is in the commonly used crypto miners domains")
65 | return
66 | }
67 |
68 | // Test RandomX event
69 | e3 := &tracing.RandomXEvent{
70 | GeneralEvent: tracing.GeneralEvent{
71 | ContainerID: "test",
72 | PodName: "test",
73 | Namespace: "test",
74 | Timestamp: 0,
75 | },
76 | }
77 |
78 | ruleResult = r.ProcessEvent(tracing.RandomXEventType, e3, nil, nil)
79 | if ruleResult == nil {
80 | fmt.Printf("ruleResult: %v\n", ruleResult)
81 | t.Errorf("Expected ruleResult to be Failure because of RandomX event")
82 | return
83 | }
84 |
85 | }
86 |
--------------------------------------------------------------------------------
/pkg/engine/rule/rule.go:
--------------------------------------------------------------------------------
1 | package rule
2 |
3 | import (
4 | "sync"
5 |
6 | "github.com/armosec/kubecop/pkg/approfilecache"
7 | "github.com/kubescape/kapprofiler/pkg/tracing"
8 | )
9 |
10 | const (
11 | RulePriorityNone = 0
12 | RulePriorityLow = 1
13 | RulePriorityMed = 5
14 | RulePriorityHigh = 8
15 | RulePriorityCritical = 10
16 | RulePrioritySystemIssue = 1000
17 | )
18 |
19 | type RuleDesciptor struct {
20 | // Rule ID
21 | ID string
22 | // Rule Name.
23 | Name string
24 | // Rule Description.
25 | Description string
26 | // Priority.
27 | Priority int
28 | // Tags
29 | Tags []string
30 | // Rule requirements.
31 | Requirements RuleRequirements
32 | // Create a rule function.
33 | RuleCreationFunc func() Rule
34 | }
35 |
36 | type RuleFailure interface {
37 | // Rule Name.
38 | Name() string
39 | // Priority.
40 | Priority() int
41 | // Error interface.
42 | Error() string
43 | // Fix suggestion.
44 | FixSuggestion() string
45 | // Generic event
46 | Event() tracing.GeneralEvent
47 | }
48 |
49 | type RuleRequirements struct {
50 | // Needed events for the rule.
51 | EventTypes []tracing.EventType
52 |
53 | // Need application profile.
54 | NeedApplicationProfile bool
55 | }
56 |
57 | type Rule interface {
58 | // Delete a rule instance.
59 | DeleteRule()
60 |
61 | // Rule Name.
62 | Name() string
63 |
64 | // Needed events for the rule.
65 | ProcessEvent(eventType tracing.EventType, event interface{}, appProfileAccess approfilecache.SingleApplicationProfileAccess, engineAccess EngineAccess) RuleFailure
66 |
67 | // Rule requirements.
68 | Requirements() RuleRequirements
69 |
70 | // Set rule parameters.
71 | SetParameters(parameters map[string]interface{})
72 |
73 | // Get rule parameters.
74 | GetParameters() map[string]interface{}
75 | }
76 |
77 | type BaseRule struct {
78 | // Mutex for protecting rule parameters.
79 | parametersMutex sync.RWMutex
80 | parameters map[string]interface{}
81 | }
82 |
83 | func (rule *BaseRule) SetParameters(parameters map[string]interface{}) {
84 | rule.parametersMutex.Lock()
85 | defer rule.parametersMutex.Unlock()
86 | rule.parameters = parameters
87 | }
88 |
89 | func (rule *BaseRule) GetParameters() map[string]interface{} {
90 | rule.parametersMutex.RLock()
91 | defer rule.parametersMutex.RUnlock()
92 | if rule.parameters == nil {
93 | rule.parameters = make(map[string]interface{})
94 | return rule.parameters
95 | }
96 |
97 | // Create a copy to avoid returning a reference to the internal map
98 | parametersCopy := make(map[string]interface{})
99 | for key, value := range rule.parameters {
100 | parametersCopy[key] = value
101 | }
102 |
103 | return parametersCopy
104 | }
105 |
106 | func (r *RuleDesciptor) HasTags(tags []string) bool {
107 | for _, tag := range tags {
108 | for _, ruleTag := range r.Tags {
109 | if tag == ruleTag {
110 | return true
111 | }
112 | }
113 | }
114 | return false
115 | }
116 |
--------------------------------------------------------------------------------
/pkg/engine/rule/types.go:
--------------------------------------------------------------------------------
1 | package rule
2 |
3 | import (
4 | corev1 "k8s.io/api/core/v1"
5 | )
6 |
7 | type EngineAccess interface {
8 | GetPodSpec(podName, namespace, containerID string) (*corev1.PodSpec, error)
9 | GetApiServerIpAddress() (string, error)
10 | }
11 |
--------------------------------------------------------------------------------
/pkg/engine/stats.go:
--------------------------------------------------------------------------------
1 | package engine
2 |
3 | import (
4 | "fmt"
5 | "sync"
6 | "time"
7 |
8 | "github.com/kubescape/kapprofiler/pkg/tracing"
9 | )
10 |
11 | type EngineStatePrinter interface {
12 | Print(v ...any)
13 | }
14 |
15 | type EngineStats struct {
16 | printer EngineStatePrinter
17 | ebpfLock sync.RWMutex
18 | ebpfEventStats map[tracing.EventType]int
19 | ruleLock sync.RWMutex
20 | ruleStats map[string]int
21 | alertLock sync.RWMutex
22 | alertStats map[string]int
23 | stopChannel chan struct{}
24 | collectionInterval time.Duration
25 | }
26 |
27 | func CreateStatComponent(printer EngineStatePrinter, collectionInterval time.Duration) *EngineStats {
28 | // Start a goroutine that will periodically print the stats
29 | // Create a channel that will be used to stop the goroutine
30 | engine := EngineStats{stopChannel: make(chan struct{}),
31 | printer: printer,
32 | ebpfEventStats: make(map[tracing.EventType]int),
33 | ruleStats: make(map[string]int),
34 | alertStats: make(map[string]int),
35 | collectionInterval: collectionInterval,
36 | }
37 |
38 | go engine.StatProcessing()
39 | return &engine
40 | }
41 |
42 | func (e *EngineStats) DestroyStatComponent() {
43 | close(e.stopChannel)
44 | }
45 |
46 | func (e *EngineStats) StatProcessing() {
47 | for {
48 | select {
49 | case <-e.stopChannel:
50 | return
51 | case <-time.After(e.collectionInterval):
52 | // Copy the maps then clear them
53 | ebpfEventStatsCopy := make(map[tracing.EventType]int)
54 | ruleStatsCopy := make(map[string]int)
55 | alertStatsCopy := make(map[string]int)
56 | e.ebpfLock.RLock()
57 | for k, v := range e.ebpfEventStats {
58 | ebpfEventStatsCopy[k] = v
59 | }
60 | e.ebpfLock.RUnlock()
61 | e.ruleLock.RLock()
62 | for k, v := range e.ruleStats {
63 | ruleStatsCopy[k] = v
64 | }
65 | e.ruleLock.RUnlock()
66 | e.alertLock.RLock()
67 | for k, v := range e.alertStats {
68 | alertStatsCopy[k] = v
69 | }
70 | e.alertLock.RUnlock()
71 | e.ebpfEventStats = make(map[tracing.EventType]int)
72 | e.ruleStats = make(map[string]int)
73 | e.alertStats = make(map[string]int)
74 |
75 | // Convert interval to human readable string
76 | intervalString := e.collectionInterval.String()
77 | text := fmt.Sprintf("Engine processing stats (per %s):\n", intervalString)
78 | text += "EBPF events:\n"
79 | for k, v := range ebpfEventStatsCopy {
80 | eventString := ""
81 | switch k {
82 | case tracing.ExecveEventType:
83 | eventString = "Execve"
84 | case tracing.OpenEventType:
85 | eventString = "Open"
86 | case tracing.NetworkEventType:
87 | eventString = "Network"
88 | case tracing.CapabilitiesEventType:
89 | eventString = "Capabilities"
90 | case tracing.DnsEventType:
91 | eventString = "DNS"
92 | case tracing.SyscallEventType:
93 | eventString = "Syscall"
94 | }
95 | text += fmt.Sprintf("\t%s: %d\n", eventString, v)
96 | }
97 | if len(ruleStatsCopy) > 0 {
98 | text += "Rules processed:\n"
99 | for k, v := range ruleStatsCopy {
100 | text += fmt.Sprintf("\t%s: %d\n", k, v)
101 | }
102 | } else {
103 | text += "No rules processed\n"
104 | }
105 | if len(alertStatsCopy) > 0 {
106 | text += "Alerts fired:\n"
107 | for k, v := range alertStatsCopy {
108 | text += fmt.Sprintf("\t%s: %d\n", k, v)
109 | }
110 | } else {
111 | text += "No alerts fired\n"
112 | }
113 | e.printer.Print(text)
114 | }
115 | }
116 | }
117 |
118 | func (e *EngineStats) ReportEbpfEvent(eventType tracing.EventType) {
119 | // Check if eventType exists in map
120 | e.ebpfLock.Lock()
121 | if _, ok := e.ebpfEventStats[eventType]; !ok {
122 | // Create it
123 | e.ebpfEventStats[eventType] = 0
124 | }
125 | // Lock the map
126 | e.ebpfEventStats[eventType]++
127 | e.ebpfLock.Unlock()
128 | }
129 |
130 | func (e *EngineStats) ReportRuleProcessed(ruleID string) {
131 | // Check if ruleID exists in map
132 | e.ruleLock.Lock()
133 | if _, ok := e.ruleStats[ruleID]; !ok {
134 | // Create it
135 | e.ruleStats[ruleID] = 0
136 | }
137 | e.ruleStats[ruleID]++
138 | e.ruleLock.Unlock()
139 | }
140 |
141 | func (e *EngineStats) ReportRuleAlereted(ruleID string) {
142 | // Check if ruleID exists in map
143 | e.alertLock.Lock()
144 | if _, ok := e.alertStats[ruleID]; !ok {
145 | // Create it
146 | e.alertStats[ruleID] = 0
147 | }
148 | e.alertStats[ruleID]++
149 | e.alertLock.Unlock()
150 | }
151 |
--------------------------------------------------------------------------------
/pkg/engine/stats_test.go:
--------------------------------------------------------------------------------
1 | package engine
2 |
3 | import (
4 | "fmt"
5 | "strings"
6 | "testing"
7 | "time"
8 |
9 | "github.com/kubescape/kapprofiler/pkg/tracing"
10 | )
11 |
12 | type MockStatPrinter struct {
13 | text string
14 | }
15 |
16 | func (m *MockStatPrinter) Print(v ...any) {
17 | m.text = fmt.Sprint(v...)
18 | }
19 |
20 | func (m *MockStatPrinter) GetText() string {
21 | return m.text
22 | }
23 |
24 | func TestEngineBasic(t *testing.T) {
25 | // Create a mock printer
26 | mockPrinter := MockStatPrinter{}
27 | engineStat := CreateStatComponent(&mockPrinter, 2*time.Second)
28 | defer engineStat.DestroyStatComponent()
29 |
30 | // Report some events
31 | engineStat.ReportEbpfEvent(tracing.ExecveEventType)
32 | engineStat.ReportEbpfEvent(tracing.ExecveEventType)
33 | engineStat.ReportEbpfEvent(tracing.ExecveEventType)
34 |
35 | // Report some rules
36 | engineStat.ReportRuleProcessed("rule1")
37 | engineStat.ReportRuleProcessed("rule1")
38 |
39 | // Report some alerts
40 | engineStat.ReportRuleAlereted("rule1")
41 | engineStat.ReportRuleAlereted("rule1")
42 |
43 | // Sleep for a while
44 | time.Sleep(3 * time.Second)
45 |
46 | // Check the output
47 | ebpfExpectation := "Execve: 3"
48 | ruleExpectation := "rule1: 2"
49 | alertExpectation := "rule1: 2"
50 | output := mockPrinter.GetText()
51 | // Chec ebpf expectation
52 | if strings.Contains(output, ebpfExpectation) == false {
53 | t.Errorf("Expected to find %s in %s", ebpfExpectation, output)
54 | }
55 | // Check rule expectation
56 | if strings.Contains(output, ruleExpectation) == false {
57 | t.Errorf("Expected to find %s in %s", ruleExpectation, output)
58 | }
59 | // Check alert expectation
60 | if strings.Contains(output, alertExpectation) == false {
61 | t.Errorf("Expected to find %s in %s", alertExpectation, output)
62 | }
63 | }
64 |
--------------------------------------------------------------------------------
/pkg/exporters/README.md:
--------------------------------------------------------------------------------
1 | # KubeCop exporters package
2 | This package contains the exporters for the KubeCop project.
3 |
4 | ## Exporters
5 | The following exporters are available:
6 | - [Alertmanager](https://github.com/prometheus/alertmanager)
7 | - STD OUT
8 | - SYSLOG
9 | - CSV
10 | - HTTP endpoint
11 |
12 | ### Alertmanager
13 | The Alertmanager exporter is used to send alerts to the Alertmanager. The Alertmanager will then send the alerts to the configured receivers.
14 | This exporter supports multiple Alertmanagers. The alerts will be sent to all configured Alertmanagers.
15 | To enable the Alertmanager exporter, set the following environment variables:
16 | - `ALERTMANAGER_URLS`: The URLs of the Alertmanagers. Example: `localhost:9093` or `localhost:9093,localhost:9094`
17 |
18 | ### STD OUT
19 | The STD OUT exporter is used to print the alerts to the standard output. This exporter is enabled by default.
20 | To disable the STD OUT exporter, set the following environment variable:
21 | - `STDOUT_ENABLED`: Set to `false` to disable the STD OUT exporter.
22 |
23 | ### SYSLOG
24 | The SYSLOG exporter is used to send the alerts to a syslog server. This exporter is disabled by default.
25 | NOTE: The SYSLOG messages format is RFC 5424.
26 | To enable the SYSLOG exporter, set the following environment variables:
27 | - `SYSLOG_HOST`: The host of the syslog server. Example: `localhost:514`
28 | - `SYSLOG_PROTOCOL`: The protocol of the syslog server. Example: `tcp` or `udp`
29 |
30 | ### CSV
31 | The CSV exporter is used to write the alerts to a CSV file. This exporter is disabled by default.
32 | To enable the CSV exporter, set the following environment variables:
33 | - `EXPORTER_CSV_RULE_PATH`: The path to the CSV file of the failed rules. Example: `/tmp/alerts.csv`
34 | - `EXPORTER_CSV_MALWARE_PATH`: The path to the CSV file of the malwares found. Example: `/tmp/malware.csv`
35 |
36 | ### HTTP endpoint
37 | The HTTP endpoint exporter is used to send the alerts to an HTTP endpoint. This exporter is disabled by default.
38 | To enable the HTTP endpoint exporter, set the following environment variables:
39 | - `HTTP_ENDPOINT_URL`: The URL of the HTTP endpoint. Example: `http://localhost:8080/alerts`
40 | This will send a POST request to the specified URL with the alerts as the body.
41 | The alerts are limited to 10000 per minute. If the limit is reached, the exporter will stop sending alerts for the rest of the minute and will send a system alert to the configured HTTP endpoint.
42 |
--------------------------------------------------------------------------------
/pkg/exporters/csv_exporter.go:
--------------------------------------------------------------------------------
1 | package exporters
2 |
3 | import (
4 | "encoding/csv"
5 | "fmt"
6 | "os"
7 |
8 | "github.com/armosec/kubecop/pkg/engine/rule"
9 | "github.com/armosec/kubecop/pkg/scan"
10 | "github.com/sirupsen/logrus"
11 | )
12 |
13 | // CsvExporter is an exporter that sends alerts to csv
14 | type CsvExporter struct {
15 | CsvRulePath string
16 | CsvMalwarePath string
17 | }
18 |
19 | // InitCsvExporter initializes a new CsvExporter
20 | func InitCsvExporter(csvRulePath, csvMalwarePath string) *CsvExporter {
21 | if csvRulePath == "" {
22 | csvRulePath = os.Getenv("EXPORTER_CSV_RULE_PATH")
23 | if csvRulePath == "" {
24 | logrus.Debugf("csv rule path not provided, rule alerts will not be exported to csv")
25 | return nil
26 | }
27 | }
28 |
29 | if csvMalwarePath == "" {
30 | csvMalwarePath = os.Getenv("EXPORTER_CSV_MALWARE_PATH")
31 | if csvMalwarePath == "" {
32 | logrus.Debugf("csv malware path not provided, malware alerts will not be exported to csv")
33 | }
34 | }
35 |
36 | if _, err := os.Stat(csvRulePath); os.IsNotExist(err) {
37 | writeRuleHeaders(csvRulePath)
38 | }
39 |
40 | if _, err := os.Stat(csvMalwarePath); os.IsNotExist(err) && csvMalwarePath != "" {
41 | writeMalwareHeaders(csvMalwarePath)
42 | }
43 |
44 | return &CsvExporter{
45 | CsvRulePath: csvRulePath,
46 | CsvMalwarePath: csvMalwarePath,
47 | }
48 | }
49 |
50 | // SendRuleAlert sends an alert to csv
51 | func (ce *CsvExporter) SendRuleAlert(failedRule rule.RuleFailure) {
52 | csvFile, err := os.OpenFile(ce.CsvRulePath, os.O_APPEND|os.O_WRONLY, 0644)
53 | if err != nil {
54 | logrus.Errorf("failed to initialize csv exporter: %v", err)
55 | return
56 | }
57 | defer csvFile.Close()
58 |
59 | csvWriter := csv.NewWriter(csvFile)
60 | defer csvWriter.Flush()
61 | csvWriter.Write([]string{
62 | failedRule.Name(),
63 | failedRule.Error(),
64 | failedRule.FixSuggestion(),
65 | failedRule.Event().PodName,
66 | failedRule.Event().ContainerName,
67 | failedRule.Event().Namespace,
68 | failedRule.Event().ContainerID,
69 | fmt.Sprintf("%d", failedRule.Event().Pid),
70 | failedRule.Event().Comm,
71 | failedRule.Event().Cwd,
72 | fmt.Sprintf("%d", failedRule.Event().Uid),
73 | fmt.Sprintf("%d", failedRule.Event().Gid),
74 | fmt.Sprintf("%d", failedRule.Event().Ppid),
75 | fmt.Sprintf("%d", failedRule.Event().MountNsID),
76 | fmt.Sprintf("%d", failedRule.Event().Timestamp),
77 | })
78 | }
79 |
80 | func writeRuleHeaders(csvPath string) {
81 | csvFile, err := os.OpenFile(csvPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
82 | if err != nil {
83 | logrus.Errorf("failed to initialize csv exporter: %v", err)
84 | return
85 | }
86 | defer csvFile.Close()
87 |
88 | csvWriter := csv.NewWriter(csvFile)
89 | defer csvWriter.Flush()
90 | csvWriter.Write([]string{
91 | "Rule Name",
92 | "Alert Message",
93 | "Fix Suggestion",
94 | "Pod Name",
95 | "Container Name",
96 | "Namespace",
97 | "Container ID",
98 | "PID",
99 | "Comm",
100 | "Cwd",
101 | "UID",
102 | "GID",
103 | "PPID",
104 | "Mount Namespace ID",
105 | "Timestamp",
106 | })
107 | }
108 |
109 | func (ce *CsvExporter) SendMalwareAlert(malwareDescription scan.MalwareDescription) {
110 | csvFile, err := os.OpenFile(ce.CsvMalwarePath, os.O_APPEND|os.O_WRONLY, 0644)
111 | if err != nil {
112 | logrus.Errorf("failed to initialize csv exporter: %v", err)
113 | return
114 | }
115 | defer csvFile.Close()
116 |
117 | csvWriter := csv.NewWriter(csvFile)
118 | defer csvWriter.Flush()
119 | csvWriter.Write([]string{
120 | malwareDescription.Name,
121 | malwareDescription.Description,
122 | malwareDescription.Path,
123 | malwareDescription.Hash,
124 | malwareDescription.Size,
125 | malwareDescription.Resource.String(),
126 | malwareDescription.Namespace,
127 | malwareDescription.PodName,
128 | malwareDescription.ContainerName,
129 | malwareDescription.ContainerID,
130 | fmt.Sprintf("%t", malwareDescription.IsPartOfImage),
131 | malwareDescription.ContainerImage,
132 | })
133 | }
134 |
135 | // Write Malware Headers
136 | func writeMalwareHeaders(csvPath string) {
137 | csvFile, err := os.OpenFile(csvPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
138 | if err != nil {
139 | logrus.Errorf("failed to initialize csv exporter: %v", err)
140 | return
141 | }
142 | defer csvFile.Close()
143 |
144 | csvWriter := csv.NewWriter(csvFile)
145 | defer csvWriter.Flush()
146 | csvWriter.Write([]string{
147 | "Malware Name",
148 | "Description",
149 | "Path",
150 | "Hash",
151 | "Size",
152 | "Resource",
153 | "Namespace",
154 | "Pod Name",
155 | "Container Name",
156 | "Container ID",
157 | "Is Part of Image",
158 | "Container Image",
159 | })
160 | }
161 |
--------------------------------------------------------------------------------
/pkg/exporters/csv_exporter_test.go:
--------------------------------------------------------------------------------
1 | package exporters
2 |
3 | import (
4 | "encoding/csv"
5 | "os"
6 | "testing"
7 |
8 | "github.com/armosec/kubecop/pkg/engine/rule"
9 | "github.com/armosec/kubecop/pkg/scan"
10 | "github.com/kubescape/kapprofiler/pkg/tracing"
11 | "k8s.io/apimachinery/pkg/runtime/schema"
12 | )
13 |
14 | func TestCsvExporter(t *testing.T) {
15 | csvExporter := InitCsvExporter("/tmp/kubecop.csv", "/tmp/kubecop-malware.csv")
16 | if csvExporter == nil {
17 | t.Fatalf("Expected csvExporter to not be nil")
18 | }
19 |
20 | csvExporter.SendRuleAlert(&rule.R0001UnexpectedProcessLaunchedFailure{
21 | RuleName: "testrule",
22 | Err: "Application profile is missing",
23 | FailureEvent: &tracing.ExecveEvent{GeneralEvent: tracing.GeneralEvent{
24 | ContainerName: "testcontainer", ContainerID: "testcontainerid", Namespace: "testnamespace", PodName: "testpodname"}},
25 | })
26 |
27 | csvExporter.SendMalwareAlert(scan.MalwareDescription{
28 | Name: "testmalware",
29 | Hash: "testhash",
30 | Description: "testdescription",
31 | Path: "testpath",
32 | Size: "2MB",
33 | Resource: schema.GroupVersionResource{
34 | Group: "testgroup",
35 | Version: "testversion",
36 | Resource: "testresource",
37 | },
38 | Namespace: "testnamespace",
39 | PodName: "testpodname",
40 | ContainerName: "testcontainername",
41 | ContainerID: "testcontainerid",
42 | })
43 |
44 | // Check if the csv file exists and contains the expected content (2 rows - header and the alert)
45 | if _, err := os.Stat("/tmp/kubecop.csv"); os.IsNotExist(err) {
46 | t.Fatalf("Expected csv file to exist")
47 | }
48 |
49 | if _, err := os.Stat("/tmp/kubecop-malware.csv"); os.IsNotExist(err) {
50 | t.Fatalf("Expected csv malware file to exist")
51 | }
52 |
53 | csvRuleFile, err := os.Open("/tmp/kubecop.csv")
54 | if err != nil {
55 | t.Fatalf("Expected csv file to open")
56 | }
57 |
58 | csvMalwareFile, err := os.Open("/tmp/kubecop-malware.csv")
59 | if err != nil {
60 | t.Fatalf("Expected csv malware file to open")
61 | }
62 |
63 | csvReader := csv.NewReader(csvRuleFile)
64 | csvMalwareReader := csv.NewReader(csvMalwareFile)
65 | csvMalwareData, err := csvMalwareReader.ReadAll()
66 | if err != nil {
67 | t.Fatalf("Expected csv malware file to be readable")
68 | }
69 |
70 | csvData, err := csvReader.ReadAll()
71 | if err != nil {
72 | t.Fatalf("Expected csv file to be readable")
73 | }
74 |
75 | if len(csvMalwareData) != 2 {
76 | t.Fatalf("Expected csv malware file to contain 2 rows")
77 | }
78 |
79 | if csvMalwareData[0][0] != "Malware Name" {
80 | t.Fatalf("Expected csv malware file to contain the malware name header")
81 | }
82 |
83 | if len(csvData) != 2 {
84 | t.Fatalf("Expected csv file to contain 2 rows")
85 | }
86 |
87 | if csvData[0][0] != "Rule Name" {
88 | t.Fatalf("Expected csv file to contain the rule name header")
89 | }
90 |
91 | csvRuleFile.Close()
92 | csvMalwareFile.Close()
93 |
94 | err = os.Remove("/tmp/kubecop.csv")
95 | if err != nil {
96 | t.Fatalf("Expected csv file to be removed")
97 | }
98 |
99 | err = os.Remove("/tmp/kubecop-malware.csv")
100 | if err != nil {
101 | t.Fatalf("Expected csv malware file to be removed")
102 | }
103 | }
104 |
--------------------------------------------------------------------------------
/pkg/exporters/exporter.go:
--------------------------------------------------------------------------------
1 | package exporters
2 |
3 | import (
4 | "github.com/armosec/kubecop/pkg/engine/rule"
5 | "github.com/armosec/kubecop/pkg/scan"
6 | )
7 |
8 | // generic exporter interface
9 | type Exporter interface {
10 | // SendRuleAlert sends an alert on failed rule to the exporter
11 | SendRuleAlert(failedRule rule.RuleFailure)
12 | // SendMalwareAlert sends an alert on malware detection to the exporter.
13 | SendMalwareAlert(scan.MalwareDescription)
14 | }
15 |
--------------------------------------------------------------------------------
/pkg/exporters/exporters_bus.go:
--------------------------------------------------------------------------------
1 | package exporters
2 |
3 | import (
4 | "os"
5 | "strings"
6 |
7 | log "github.com/sirupsen/logrus"
8 |
9 | "github.com/armosec/kubecop/pkg/engine/rule"
10 | "github.com/armosec/kubecop/pkg/scan"
11 | )
12 |
13 | type ExportersConfig struct {
14 | StdoutExporter *bool `yaml:"stdoutExporter"`
15 | AlertManagerExporterUrls string `yaml:"alertManagerExporterUrls"`
16 | SyslogExporter string `yaml:"syslogExporterURL"`
17 | CsvRuleExporterPath string `yaml:"CsvRuleExporterPath"`
18 | CsvMalwareExporterPath string `yaml:"CsvMalwareExporterPath"`
19 | HTTPExporterConfig *HTTPExporterConfig `yaml:"httpExporterConfig"`
20 | }
21 |
22 | // This file will contain the single point of contact for all exporters,
23 | // it will be used by the engine to send alerts to all exporters.
24 |
25 | const (
26 | // AlertManagerURLs separator delimiter.
27 | AlertManagerSepartorDelimiter = ","
28 | )
29 |
30 | type ExporterBus struct {
31 | // Exporters is a list of all exporters.
32 | exporters []Exporter
33 | }
34 |
35 | // InitExporters initializes all exporters.
36 | func InitExporters(exportersConfig ExportersConfig) ExporterBus {
37 | exporters := []Exporter{}
38 | alertManagerUrls := parseAlertManagerUrls(exportersConfig.AlertManagerExporterUrls)
39 | for _, url := range alertManagerUrls {
40 | alertMan := InitAlertManagerExporter(url)
41 | if alertMan != nil {
42 | exporters = append(exporters, alertMan)
43 | }
44 | }
45 | stdoutExp := InitStdoutExporter(exportersConfig.StdoutExporter)
46 | if stdoutExp != nil {
47 | exporters = append(exporters, stdoutExp)
48 | }
49 | syslogExp := InitSyslogExporter(exportersConfig.SyslogExporter)
50 | if syslogExp != nil {
51 | exporters = append(exporters, syslogExp)
52 | }
53 | csvExp := InitCsvExporter(exportersConfig.CsvRuleExporterPath, exportersConfig.CsvMalwareExporterPath)
54 | if csvExp != nil {
55 | exporters = append(exporters, csvExp)
56 | }
57 | if exportersConfig.HTTPExporterConfig == nil {
58 | if httpURL := os.Getenv("HTTP_ENDPOINT_URL"); httpURL != "" {
59 | exportersConfig.HTTPExporterConfig = &HTTPExporterConfig{}
60 | exportersConfig.HTTPExporterConfig.URL = httpURL
61 | }
62 | }
63 | if exportersConfig.HTTPExporterConfig != nil {
64 | httpExp, err := InitHTTPExporter(*exportersConfig.HTTPExporterConfig)
65 | if err != nil {
66 | log.WithError(err).Error("failed to initialize HTTP exporter")
67 | }
68 | exporters = append(exporters, httpExp)
69 | }
70 |
71 | if len(exporters) == 0 {
72 | panic("no exporters were initialized")
73 | }
74 | log.Info("exporters initialized")
75 |
76 | return ExporterBus{exporters: exporters}
77 | }
78 |
79 | // ParseAlertManagerUrls parses the alert manager urls from the given string.
80 | func parseAlertManagerUrls(urls string) []string {
81 | if urls == "" {
82 | urls = os.Getenv("ALERTMANAGER_URLS")
83 | if urls == "" {
84 | return nil
85 | }
86 |
87 | return strings.Split(urls, AlertManagerSepartorDelimiter)
88 |
89 | }
90 | return strings.Split(urls, AlertManagerSepartorDelimiter)
91 | }
92 |
93 | func (e *ExporterBus) SendRuleAlert(failedRule rule.RuleFailure) {
94 | for _, exporter := range e.exporters {
95 | exporter.SendRuleAlert(failedRule)
96 | }
97 | }
98 |
99 | func (e *ExporterBus) SendMalwareAlert(malwareDescription scan.MalwareDescription) {
100 | for _, exporter := range e.exporters {
101 | exporter.SendMalwareAlert(malwareDescription)
102 | }
103 | }
104 |
--------------------------------------------------------------------------------
/pkg/exporters/stdout_exporter.go:
--------------------------------------------------------------------------------
1 | package exporters
2 |
3 | import (
4 | "os"
5 |
6 | log "github.com/sirupsen/logrus"
7 |
8 | "github.com/armosec/kubecop/pkg/engine/rule"
9 | "github.com/armosec/kubecop/pkg/scan"
10 | )
11 |
12 | type StdoutExporter struct {
13 | logger *log.Logger
14 | }
15 |
16 | func InitStdoutExporter(useStdout *bool) *StdoutExporter {
17 | if useStdout == nil {
18 | useStdout = new(bool)
19 | *useStdout = os.Getenv("STDOUT_ENABLED") != "false"
20 | }
21 | if !*useStdout {
22 | return nil
23 | }
24 |
25 | logger := log.New()
26 | logger.SetFormatter(&log.JSONFormatter{})
27 | logger.SetOutput(os.Stderr)
28 |
29 | return &StdoutExporter{
30 | logger: logger,
31 | }
32 | }
33 |
34 | func (exporter *StdoutExporter) SendRuleAlert(failedRule rule.RuleFailure) {
35 | exporter.logger.WithFields(log.Fields{
36 | "severity": failedRule.Priority(),
37 | "message": failedRule.Error(),
38 | "event": failedRule.Event(),
39 | }).Error(failedRule.Name())
40 | }
41 |
42 | func (exporter *StdoutExporter) SendMalwareAlert(malwareDescription scan.MalwareDescription) {
43 | exporter.logger.WithFields(log.Fields{
44 | "severity": 10,
45 | "description": malwareDescription.Description,
46 | "hash": malwareDescription.Hash,
47 | "path": malwareDescription.Path,
48 | "size": malwareDescription.Size,
49 | "pod": malwareDescription.PodName,
50 | "namespace": malwareDescription.Namespace,
51 | "container": malwareDescription.ContainerName,
52 | "containerID": malwareDescription.ContainerID,
53 | "isPartOfImage": malwareDescription.IsPartOfImage,
54 | "containerImage": malwareDescription.ContainerImage,
55 | "resource": malwareDescription.Resource,
56 | }).Error(malwareDescription.Name)
57 | }
58 |
--------------------------------------------------------------------------------
/pkg/exporters/stdout_exporter_test.go:
--------------------------------------------------------------------------------
1 | package exporters
2 |
3 | import (
4 | "os"
5 | "testing"
6 |
7 | "github.com/armosec/kubecop/pkg/engine/rule"
8 | "github.com/kubescape/kapprofiler/pkg/tracing"
9 | "github.com/stretchr/testify/assert"
10 | )
11 |
12 | func TestInitStdoutExporter(t *testing.T) {
13 | // Test when useStdout is nil
14 | useStdout := new(bool)
15 | exporter := InitStdoutExporter(nil)
16 | assert.NotNil(t, exporter)
17 |
18 | // Test when useStdout is true
19 | useStdout = new(bool)
20 | *useStdout = true
21 | exporter = InitStdoutExporter(useStdout)
22 | assert.NotNil(t, exporter)
23 | assert.NotNil(t, exporter.logger)
24 |
25 | // Test when useStdout is false
26 | useStdout = new(bool)
27 | *useStdout = false
28 | exporter = InitStdoutExporter(useStdout)
29 | assert.Nil(t, exporter)
30 |
31 | // Test when STDOUT_ENABLED environment variable is set to "false"
32 | os.Setenv("STDOUT_ENABLED", "false")
33 | exporter = InitStdoutExporter(nil)
34 | assert.Nil(t, exporter)
35 |
36 | // Test when STDOUT_ENABLED environment variable is set to "true"
37 | os.Setenv("STDOUT_ENABLED", "true")
38 | exporter = InitStdoutExporter(nil)
39 | assert.NotNil(t, exporter)
40 | assert.NotNil(t, exporter.logger)
41 |
42 | // Test when STDOUT_ENABLED environment variable is not set
43 | os.Unsetenv("STDOUT_ENABLED")
44 | exporter = InitStdoutExporter(nil)
45 | assert.NotNil(t, exporter)
46 | assert.NotNil(t, exporter.logger)
47 | }
48 |
49 | func TestStdoutExporter_SendAlert(t *testing.T) {
50 | exporter := InitStdoutExporter(nil)
51 | assert.NotNil(t, exporter)
52 |
53 | exporter.SendRuleAlert(&rule.R0001UnexpectedProcessLaunchedFailure{
54 | RuleName: "testrule",
55 | Err: "Application profile is missing",
56 | FailureEvent: &tracing.ExecveEvent{GeneralEvent: tracing.GeneralEvent{
57 | ContainerName: "testcontainer", ContainerID: "testcontainerid", Namespace: "testnamespace", PodName: "testpodname"}},
58 | })
59 | }
60 |
--------------------------------------------------------------------------------
/pkg/exporters/syslog_exporter_test.go:
--------------------------------------------------------------------------------
1 | package exporters
2 |
3 | import (
4 | "os"
5 | "testing"
6 | "time"
7 |
8 | log "github.com/sirupsen/logrus"
9 |
10 | "github.com/armosec/kubecop/pkg/engine/rule"
11 | "github.com/armosec/kubecop/pkg/scan"
12 | "github.com/kubescape/kapprofiler/pkg/tracing"
13 | "github.com/stretchr/testify/assert"
14 | "gopkg.in/mcuadros/go-syslog.v2"
15 | "k8s.io/apimachinery/pkg/runtime/schema"
16 | )
17 |
18 | func setupServer() *syslog.Server {
19 | channel := make(syslog.LogPartsChannel, 100)
20 | handler := syslog.NewChannelHandler(channel)
21 |
22 | server := syslog.NewServer()
23 | server.SetFormat(syslog.Automatic)
24 | server.SetHandler(handler)
25 | if err := server.ListenUDP("0.0.0.0:40000"); err != nil { // Due to permission issues, we can't listen on port 514 on the CI.
26 | log.Fatalf("failed to listen on UDP: %v", err)
27 | }
28 |
29 | if err := server.Boot(); err != nil {
30 | log.Fatalf("failed to boot the server: %v", err)
31 | }
32 |
33 | go func(channel syslog.LogPartsChannel) {
34 | for logParts := range channel {
35 | if assert.NotNil(nil, logParts) {
36 | if assert.NotNil(nil, logParts["content"]) {
37 | assert.NotEmpty(nil, logParts["content"].(string))
38 | }
39 | } else {
40 | os.Exit(1)
41 | }
42 | }
43 | }(channel)
44 |
45 | go server.Wait()
46 |
47 | return server
48 | }
49 |
50 | func TestSyslogExporter(t *testing.T) {
51 | // Set up a mock syslog server
52 | server := setupServer()
53 | defer server.Kill()
54 |
55 | // Set up environment variables for the exporter
56 | syslogHost := "127.0.0.1:40000"
57 | os.Setenv("SYSLOG_HOST", syslogHost)
58 | os.Setenv("SYSLOG_PROTOCOL", "udp")
59 |
60 | // Initialize the syslog exporter
61 | syslogExp := InitSyslogExporter("")
62 | if syslogExp == nil {
63 | t.Errorf("Expected syslogExp to not be nil")
64 | }
65 |
66 | // Send an alert
67 | syslogExp.SendRuleAlert(&rule.R0001UnexpectedProcessLaunchedFailure{
68 | RuleName: "testrule",
69 | Err: "Application profile is missing",
70 | FailureEvent: &tracing.ExecveEvent{GeneralEvent: tracing.GeneralEvent{
71 | ContainerName: "testcontainer", ContainerID: "testcontainerid", Namespace: "testnamespace", PodName: "testpodname"}},
72 | })
73 |
74 | syslogExp.SendRuleAlert(&rule.R0001UnexpectedProcessLaunchedFailure{
75 | RuleName: "testrule",
76 | Err: "Application profile is missing",
77 | FailureEvent: &tracing.ExecveEvent{GeneralEvent: tracing.GeneralEvent{
78 | ContainerName: "testcontainer", ContainerID: "testcontainerid", Namespace: "testnamespace", PodName: "testpodname"}},
79 | })
80 |
81 | syslogExp.SendMalwareAlert(scan.MalwareDescription{
82 | Name: "testmalware",
83 | Hash: "testhash",
84 | Description: "testdescription",
85 | Path: "testpath",
86 | Size: "2MB",
87 | Resource: schema.GroupVersionResource{
88 | Group: "testgroup",
89 | Version: "testversion",
90 | Resource: "testresource",
91 | },
92 | Namespace: "testnamespace",
93 | PodName: "testpodname",
94 | ContainerName: "testcontainername",
95 | ContainerID: "testcontainerid",
96 | })
97 |
98 | // Allow some time for the message to reach the mock syslog server
99 | time.Sleep(200 * time.Millisecond)
100 | }
101 |
--------------------------------------------------------------------------------
/pkg/exporters/utils.go:
--------------------------------------------------------------------------------
1 | package exporters
2 |
3 | import "github.com/armosec/kubecop/pkg/engine/rule"
4 |
5 | func PriorityToStatus(priority int) string {
6 | switch priority {
7 | case rule.RulePriorityNone:
8 | return "none"
9 | case rule.RulePriorityLow:
10 | return "low"
11 | case rule.RulePriorityMed:
12 | return "medium"
13 | case rule.RulePriorityHigh:
14 | return "high"
15 | case rule.RulePriorityCritical:
16 | return "critical"
17 | case rule.RulePrioritySystemIssue:
18 | return "system_issue"
19 | default:
20 | if priority < rule.RulePriorityMed {
21 | return "low"
22 | } else if priority < rule.RulePriorityHigh {
23 | return "medium"
24 | } else if priority < rule.RulePriorityCritical {
25 | return "high"
26 | }
27 | return "unknown"
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/pkg/exporters/utils_test.go:
--------------------------------------------------------------------------------
1 | package exporters
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/armosec/kubecop/pkg/engine/rule"
7 | )
8 |
9 | func TestPriorityToStatus(t *testing.T) {
10 | tests := []struct {
11 | name string
12 | priority int
13 | want string
14 | }{
15 | {
16 | name: "none",
17 | priority: rule.RulePriorityNone,
18 | want: "none",
19 | },
20 | {
21 | name: "low",
22 | priority: rule.RulePriorityLow,
23 | want: "low",
24 | },
25 | {
26 | name: "medium",
27 | priority: rule.RulePriorityMed,
28 | want: "medium",
29 | },
30 | {
31 | name: "high",
32 | priority: rule.RulePriorityHigh,
33 | want: "high",
34 | },
35 | {
36 | name: "critical",
37 | priority: rule.RulePriorityCritical,
38 | want: "critical",
39 | },
40 | {
41 | name: "system_issue",
42 | priority: rule.RulePrioritySystemIssue,
43 | want: "system_issue",
44 | },
45 | {
46 | name: "unknown",
47 | priority: 100,
48 | want: "unknown",
49 | },
50 | {
51 | name: "low2",
52 | priority: rule.RulePriorityMed - 1,
53 | want: "low",
54 | },
55 | {
56 | name: "medium2",
57 | priority: rule.RulePriorityHigh - 1,
58 | want: "medium",
59 | },
60 | {
61 | name: "high2",
62 | priority: rule.RulePriorityCritical - 1,
63 | want: "high",
64 | },
65 | }
66 | for _, tt := range tests {
67 | t.Run(tt.name, func(t *testing.T) {
68 | if got := PriorityToStatus(tt.priority); got != tt.want {
69 | t.Errorf("PriorityToStatus() = %v, want %v", got, tt.want)
70 | }
71 | })
72 | }
73 | }
74 |
--------------------------------------------------------------------------------
/pkg/rulebindingstore/README.md:
--------------------------------------------------------------------------------
1 | # runtime rule alert binding
2 | In order to determine which rules should be applied to which workloads, we need to bind the rules to the workloads. This is done by creating a `RuntimeRuleAlertBinding` object that binds a `Rule` to certain pods.
3 | This is a CRD needed to be created by the user. The `RuntimeRuleAlertBinding` object is inspired by K8s native policies and bindings, such as [NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/network-policies/#networkpolicy-resource) and [ValidatingAdmissionPolicyBinding](https://www.armosec.io/glossary/kubernetes-validation-admission-policies/) objects and so it contains the following fields:
4 | - `namespaceSelector` - a [selector](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#resources-that-support-set-based-requirements) that selects the namespaces that the rule should be applied to. If not specified, the rule will be applied to all namespaces.
5 | - `podSelector` - a [selector](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#resources-that-support-set-based-requirements) that selects the pods that the rule should be applied to. If not specified, the rule will be applied to all pods.
6 | - `rules` - a list of rules that should be applied to the selected pods.
7 |
8 | Each `rule` in the list contains the following fields:
9 | - `ruleName` (mandatory) - the name of the rule to be applied.
10 | - `severity` -(optional) the severity of the alert that will be generated if the rule is violated. Each rule has a default severity, but it can be overridden by the user.
11 | - `parameters` - (optional) a list of parameters that can be passed to the rule. Each rule has a default set of parameters, but it can be overridden by the user.
12 |
13 | ## Example
14 | The first step is to apply the `RuntimeRuleAlertBinding` CRD to the cluster:
15 | ```bash
16 | kubectl apply -f chart/kubecop/crds/runtime-rule-binding.crd.yaml
17 | ```
18 | The second step is to create the `RuntimeRuleAlertBinding` object:
19 | ```yaml
20 | apiVersion: kubescape.io/v1
21 | kind: RuntimeRuleAlertBinding
22 | metadata:
23 | name: single-rule-for-app-nginx-default-ns
24 | spec:
25 | namespaceSelector:
26 | matchLabels:
27 | kubernetes.io/metadata.name: default
28 | podSelector:
29 | matchExpressions:
30 | - key: app
31 | operator: In
32 | values:
33 | - nginx
34 | rules:
35 | - ruleName: "Unexpected process launched"
36 |
37 | ```
38 |
39 | In the above example, we bind the rule `Unexpected process launched` to the pods in the namespace `default`. The rule will be applied to all the pods that are labeled with `app: nginx` in the namespace `default`.
40 |
41 | ## how does it work?
42 | Once the user applies a change to a `RuntimeRuleAlertBinding` object or any container in the cluster is created/updated/deleted, the KubeCop will be notified and will update the rules that are applied to each pod. The KubeCop will then apply the rules to the pods and will generate alerts if needed.
43 |
44 | So there are 2 flows that can trigger the KubeCop to apply the rules to the pods:
45 | 1. The user applies a change to a `RuntimeRuleAlertBinding` object. Those changes are handled by the [RuleBindingK8sStore](store.go#L195).
46 | 2. A container in the cluster is created/updated/deleted. Those changes usually handled by the [Engine](../engine/engine.go#L20).
47 |
48 | In the 1st flow, the `RuleBindingK8sStore` will notify the subscribers (callback functions) about the change.
49 | Then the subsciber will get list of pods it needs to apply the rules to.
50 | For each pod, the subscriber will call `GetRulesForPod` to ask the `RuleBindingK8sStore` for the rules that should be applied to the pod.
51 |
52 | In the 2nd flow, the watcher of the container, usually the [Engine](../engine/engine.go#L20), will call `GetRulesForPod` to ask the `RuleBindingK8sStore` for the rules that should be applied to the pod.
53 |
54 | Then the caller of the `GetRulesForPod` will handle the rules for the pod.
55 |
56 | If more than one `RuntimeRuleAlertBinding` object is applied to the pod, the rules will be aggregated together.
--------------------------------------------------------------------------------
/pkg/rulebindingstore/structures.go:
--------------------------------------------------------------------------------
1 | package rulebindingstore
2 |
3 | import (
4 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
5 | )
6 |
7 | type RuntimeAlertRuleBindingList struct {
8 | metav1.TypeMeta `json:",inline"`
9 | metav1.ListMeta `json:"metadata,omitempty"`
10 | // Items is the list of RuntimeAlertRuleBinding
11 | Items []RuntimeAlertRuleBinding `json:"items"`
12 | }
13 |
14 | type RuntimeAlertRuleBinding struct {
15 | metav1.TypeMeta `json:",inline"`
16 | metav1.ObjectMeta `json:"metadata,omitempty"`
17 | // Specification of the desired behavior of the RuntimeAlertRuleBinding
18 | Spec RuntimeAlertRuleBindingSpec `json:"spec,omitempty"`
19 | }
20 |
21 | type RuntimeAlertRuleBindingSpec struct {
22 | Rules []RuntimeAlertRuleBindingRule `json:"rules" yaml:"rules"`
23 | PodSelector metav1.LabelSelector `json:"podSelector" yaml:"podSelector"`
24 | NamespaceSelector metav1.LabelSelector `json:"namespaceSelector" yaml:"namespaceSelector"`
25 | }
26 |
27 | type RuntimeAlertRuleBindingRule struct {
28 | RuleName string `json:"ruleName" yaml:"ruleName"`
29 | RuleID string `json:"ruleID" yaml:"ruleID"`
30 | RuleTags []string `json:"ruleTags" yaml:"ruleTags"`
31 | Severity string `json:"severity" yaml:"severity"`
32 | Parameters map[string]interface{} `json:"parameters" yaml:"parameters"`
33 | }
34 |
35 | type RuleBindingChangedHandler func(ruleBinding RuntimeAlertRuleBinding)
36 |
--------------------------------------------------------------------------------
/pkg/rulebindingstore/testdata/rulebindingsfiles/all-rules-all-pods.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kubescape.io/v1
2 | kind: RuntimeRuleAlertBinding
3 | metadata:
4 | name: all-rules-all-pods
5 | spec:
6 | namespaceSelector:
7 | podSelector:
8 | rules:
9 | - ruleName: "Unexpected process launched"
10 | - ruleName: "Unexpected file access"
11 | - ruleName: "Unexpected system call"
12 | - ruleName: "Unexpected capability used"
13 | - ruleName: "Unexpected domain request"
14 | - ruleName: "Unexpected Service Account Token Access"
15 | - ruleName: "Exec from malicious source"
16 | - ruleName: "Kernel Module Load"
17 | - ruleName: "Exec Binary Not In Base Image"
18 | - ruleName: "Malicious SSH Connection"
19 | - ruleName: "Kubernetes Client Executed"
20 | - ruleName: "Exec from mount"
21 | - ruleName: "Unshare System Call usage"
22 | - ruleName: "Crypto Miner detected"
23 |
--------------------------------------------------------------------------------
/pkg/rulebindingstore/testdata/rulebindingsfiles/all-rules-for-app-nginx-default-ns.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kubescape.io/v1
2 | kind: RuntimeRuleAlertBinding
3 | metadata:
4 | name: all-rules-for-app-nginx-default-ns
5 | spec:
6 | namespaceSelector:
7 | matchExpressions:
8 | - key: "kubernetes.io/metadata.name"
9 | operator: In
10 | values:
11 | - default
12 | podSelector:
13 | matchLabels:
14 | app: nginx
15 | rules:
16 | - ruleName: "Unexpected process launched"
17 | - ruleName: "Unexpected file access"
18 | - ruleName: "Unexpected system call"
19 | - ruleName: "Unexpected capability used"
20 | - ruleName: "Unexpected domain request"
21 | - ruleName: "Unexpected Service Account Token Access"
22 | - ruleName: "Exec from malicious source"
23 | - ruleName: "Kernel Module Load"
24 | - ruleName: "Exec Binary Not In Base Image"
25 | - ruleName: "Malicious SSH Connection"
26 | - ruleName: "Kubernetes Client Executed"
27 | - ruleName: "Exec from mount"
28 | - ruleName: "Unshare System Call usage"
29 | - ruleName: "Crypto Miner detected"
30 |
--------------------------------------------------------------------------------
/pkg/rulebindingstore/testdata/rulebindingsfiles/all-rules-for-app-nginx.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kubescape.io/v1
2 | kind: RuntimeRuleAlertBinding
3 | metadata:
4 | name: all-rules-for-app-nginx
5 | spec:
6 | podSelector:
7 | matchLabels:
8 | app: nginx
9 | rules:
10 | - ruleName: "Unexpected process launched"
11 | - ruleName: "Unexpected file access"
12 | - ruleName: "Unexpected system call"
13 | - ruleName: "Unexpected capability used"
14 | - ruleName: "Unexpected domain request"
15 | - ruleName: "Unexpected Service Account Token Access"
16 | - ruleName: "Exec from malicious source"
17 | - ruleName: "Kernel Module Load"
18 | - ruleName: "Exec Binary Not In Base Image"
19 | - ruleName: "Malicious SSH Connection"
20 | - ruleName: "Kubernetes Client Executed"
21 | - ruleName: "Exec from mount"
22 | - ruleName: "Unshare System Call usage"
23 | - ruleName: "Crypto Miner detected"
24 |
--------------------------------------------------------------------------------
/pkg/rulebindingstore/testdata/rulebindingsfiles/all-rules-for-default-ns.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kubescape.io/v1
2 | kind: RuntimeRuleAlertBinding
3 | metadata:
4 | name: all-rules-for-default-ns
5 | spec:
6 | namespaceSelector:
7 | matchLabels:
8 | kubernetes.io/metadata.name: default
9 | rules:
10 | - ruleName: "Unexpected process launched"
11 | - ruleName: "Unexpected file access"
12 | - ruleName: "Unexpected system call"
13 | - ruleName: "Unexpected capability used"
14 | - ruleName: "Unexpected domain request"
15 | - ruleName: "Unexpected Service Account Token Access"
16 | - ruleName: "Exec from malicious source"
17 | - ruleName: "Kernel Module Load"
18 | - ruleName: "Exec Binary Not In Base Image"
19 | - ruleName: "Malicious SSH Connection"
20 | - ruleName: "Kubernetes Client Executed"
21 | - ruleName: "Exec from mount"
22 | - ruleName: "Unshare System Call usage"
23 | - ruleName: "Crypto Miner detected"
24 |
--------------------------------------------------------------------------------
/pkg/rulebindingstore/testdata/rulebindingsfiles/no-rules-for-app-nginx-default-ns.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kubescape.io/v1
2 | kind: RuntimeRuleAlertBinding
3 | metadata:
4 | name: no-rules-for-app-nginx-default-ns
5 | spec:
6 | namespaceSelector:
7 | matchExpressions:
8 | - key: "kubernetes.io/metadata.name"
9 | operator: In
10 | values:
11 | - default
12 | podSelector:
13 | matchLabels:
14 | app: nginx
15 | rules:
16 |
--------------------------------------------------------------------------------
/pkg/rulebindingstore/testdata/rulebindingsfiles/single-rule-for-app-nginx-default-ns.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kubescape.io/v1
2 | kind: RuntimeRuleAlertBinding
3 | metadata:
4 | name: single-rule-for-app-nginx-default-ns
5 | spec:
6 | namespaceSelector:
7 | matchLabels:
8 | kubernetes.io/metadata.name: default
9 | podSelector:
10 | matchExpressions:
11 | - key: app
12 | operator: In
13 | values:
14 | - nginx
15 | rules:
16 | - ruleName: "Unexpected process launched"
17 |
--------------------------------------------------------------------------------
/pkg/rulebindingstore/testdata/ruleparamsfiles/rule-file-params.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kubescape.io/v1
2 | kind: RuntimeRuleAlertBinding
3 | metadata:
4 | name: single-rule-for-app-nginx-default-ns-params
5 | spec:
6 | namespaceSelector:
7 | matchLabels:
8 | kubernetes.io/metadata.name: default
9 | podSelector:
10 | matchExpressions:
11 | - key: app
12 | operator: In
13 | values:
14 | - nginx
15 | rules:
16 | - ruleName: "Unexpected file access"
17 | parameters:
18 | ignoreMounts: true
19 | ignorePrefixes: ["/proc"]
20 |
--------------------------------------------------------------------------------
/pkg/rulebindingstore/testdata/ruleparamsfiles/rule-ssh-params.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kubescape.io/v1
2 | kind: RuntimeRuleAlertBinding
3 | metadata:
4 | name: single-rule-for-app-nginx-default-ns-params-ssh
5 | spec:
6 | namespaceSelector:
7 | matchLabels:
8 | kubernetes.io/metadata.name: default
9 | podSelector:
10 | matchExpressions:
11 | - key: app
12 | operator: In
13 | values:
14 | - nginx
15 | rules:
16 | - ruleName: "Malicious SSH Connection"
17 | parameters:
18 | allowedPorts: [22, 2222]
19 |
--------------------------------------------------------------------------------
/pkg/scan/clamav/config.go:
--------------------------------------------------------------------------------
1 | package clamav
2 |
3 | import (
4 | "fmt"
5 | "time"
6 |
7 | "github.com/armosec/kubecop/pkg/exporters"
8 | "k8s.io/client-go/kubernetes"
9 | )
10 |
11 | type ClamAVConfig struct {
12 | Host string
13 | Port string
14 | ScanInterval string
15 | RetryDelay time.Duration
16 | MaxRetries int
17 | ExporterBus *exporters.ExporterBus
18 | KubernetesClient *kubernetes.Clientset
19 | }
20 |
21 | func (c *ClamAVConfig) Address() string {
22 | return fmt.Sprintf("tcp://%s:%s", c.Host, c.Port)
23 | }
24 |
--------------------------------------------------------------------------------
/pkg/scan/fileutil.go:
--------------------------------------------------------------------------------
1 | package scan
2 |
3 | import (
4 | "crypto/sha256"
5 | "encoding/hex"
6 | "io"
7 | "os"
8 | )
9 |
10 | // Get the size of the given file.
11 | func GetFileSize(path string) (int64, error) {
12 | file, err := os.Open(path)
13 | if err != nil {
14 | return 0, err
15 | }
16 |
17 | // Get the file size.
18 | fileInfo, err := file.Stat()
19 | if err != nil {
20 | return 0, err
21 | }
22 |
23 | return fileInfo.Size(), nil
24 | }
25 |
26 | // Calculate the SHA256 hash of the given file.
27 | func CalculateFileHash(path string) (string, error) {
28 | file, err := os.Open(path)
29 | if err != nil {
30 | return "", err
31 | }
32 | defer file.Close()
33 |
34 | hash := sha256.New()
35 | if _, err := io.Copy(hash, file); err != nil {
36 | return "", err
37 | }
38 |
39 | hashInBytes := hash.Sum(nil)
40 | hashString := hex.EncodeToString(hashInBytes)
41 |
42 | return hashString, nil
43 | }
44 |
--------------------------------------------------------------------------------
/pkg/scan/k8sutil.go:
--------------------------------------------------------------------------------
1 | package scan
2 |
3 | import (
4 | "context"
5 | "fmt"
6 |
7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
8 |
9 | "k8s.io/client-go/kubernetes"
10 | )
11 |
12 | // GetContainerImageID returns the image ID of the given container in the given pod.
13 | func GetContainerImageID(clientset *kubernetes.Clientset, namespace, podName, containerName string) (string, error) {
14 | pod, err := clientset.CoreV1().Pods(namespace).Get(context.Background(), podName, metav1.GetOptions{})
15 | if err != nil {
16 | fmt.Printf("Error getting pod: %v\n", err)
17 | return "", err
18 | }
19 |
20 | for _, container := range pod.Spec.Containers {
21 | if container.Name == containerName {
22 | return container.Image, nil
23 | }
24 | }
25 |
26 | return "", fmt.Errorf("could not find container %s in pod %s", containerName, podName)
27 | }
28 |
--------------------------------------------------------------------------------
/pkg/scan/overlay.go:
--------------------------------------------------------------------------------
1 | package scan
2 |
3 | import (
4 | "fmt"
5 | "strings"
6 |
7 | log "github.com/sirupsen/logrus"
8 |
9 | "github.com/prometheus/procfs"
10 | )
11 |
12 | type Overlay struct {
13 | // UpperDir is the path to the upper directory of the overlay filesystem.
14 | UpperDir string
15 | // WorkDir is the path to the work directory of the overlay filesystem.
16 | WorkDir string
17 | // MergedDir is the path to the merged directory of the overlay filesystem.
18 | MergedDir string
19 | // LowerDirs is the path to the lower directory of the overlay filesystem.
20 | LowerDirs []string
21 | }
22 |
23 | func GetOverlayLayer(path string, pid uint32) string {
24 | process, err := procfs.NewProc(int(pid))
25 | if err != nil {
26 | log.Infof("Error creating procfs for PID %d: %s\n", pid, err)
27 | return ""
28 | }
29 |
30 | // Get the overlay mount points for the process.
31 | overlay, err := getOverlayMountPoints(&process)
32 | if err != nil {
33 | log.Errorf("Error getting overlay mount points for PID %d: %s\n", pid, err)
34 | return ""
35 | }
36 |
37 | // Check if the path is in one of the overlay mount points.
38 | for _, lowerDir := range overlay.LowerDirs {
39 | if strings.HasPrefix(path, lowerDir) {
40 | return "lower"
41 | }
42 | }
43 |
44 | if strings.HasPrefix(path, overlay.UpperDir) {
45 | return "upper"
46 | } else if strings.HasPrefix(path, overlay.WorkDir) {
47 | return "work"
48 | } else if strings.HasPrefix(path, overlay.MergedDir) {
49 | return "merged"
50 | }
51 |
52 | return ""
53 | }
54 |
55 | func getOverlayMountPoints(process *procfs.Proc) (Overlay, error) {
56 | // Read the mount info for the process, and find the overlay mount point. (There should only be one?).
57 | if mounts, err := process.MountInfo(); err == nil {
58 | for _, mount := range mounts {
59 | if mount.FSType == "overlay" {
60 | // Return the merged directory.
61 | return Overlay{
62 | mount.SuperOptions["upperdir"],
63 | mount.SuperOptions["workdir"],
64 | strings.Replace(mount.SuperOptions["upperdir"], "diff", "merged", 1),
65 | strings.Split(mount.SuperOptions["lowerdir"], ":"),
66 | }, nil
67 | }
68 | }
69 | }
70 |
71 | return Overlay{}, fmt.Errorf("failed to get mount point for pid %d", process.PID)
72 | }
73 |
--------------------------------------------------------------------------------
/pkg/scan/types.go:
--------------------------------------------------------------------------------
1 | package scan
2 |
3 | import (
4 | "k8s.io/apimachinery/pkg/runtime/schema"
5 | )
6 |
7 | type MalwareDescription struct {
8 | // Name of the malware
9 | Name string `json:"malware_name"`
10 | // Description of the malware
11 | Description string `json:"description"`
12 | // Path to the file that was infected
13 | Path string `json:"path"`
14 | // Hash of the file that was infected
15 | Hash string `json:"hash"`
16 | // Size of the file that was infected
17 | Size string `json:"size"`
18 | // Is part of the image
19 | IsPartOfImage bool `json:"is_part_of_image"`
20 | // K8s resource that was infected
21 | Resource schema.GroupVersionResource `json:"resource"`
22 | // K8s namespace that was infected
23 | Namespace string `json:"namespace"`
24 | // K8s pod that was infected
25 | PodName string `json:"kind"`
26 | // K8s container that was infected
27 | ContainerName string `json:"container_name"`
28 | // K8s container ID that was infected
29 | ContainerID string `json:"container_id"`
30 | // K8s container image that was infected
31 | ContainerImage string `json:"container_image"`
32 | }
33 |
--------------------------------------------------------------------------------
/resources/clamav/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:23.10 as builder
2 | ARG SOCKS_PROXY
3 | ENV SOCKS_PROXY=$SOCKS_PROXY
4 | RUN apt-get update && apt-get install -y clamav wget curl
5 | COPY create-filtered-clam-db.sh main.cvd /
6 | RUN /create-filtered-clam-db.sh
7 |
8 |
9 | FROM clamav/clamav-debian:1.2.0-6_base
10 | RUN apt-get update && apt-get install -y netcat
11 | COPY init.sh /init
12 | RUN mkdir -p /var/lib/clamav || true
13 | COPY --from=builder main.cud /var/lib/clamav/main.cud
14 | RUN chmod +x /init && chown clamav:clamav /var/lib/clamav
15 | ENV CLAMAV_NO_FRESHCLAMD=true
--------------------------------------------------------------------------------
/resources/clamav/create-filtered-clam-db.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -x
3 |
4 | # Create a temporary directory:
5 | mkdir -p tmp
6 |
7 | # Get into it
8 | pushd tmp
9 |
10 | # Check if main.cvd exists
11 | if [ -f ../main.cvd ]
12 | then
13 | echo "main.cvd already exists"
14 | cp ../main.cvd .
15 | else
16 | echo "main.cvd does not exist, downloading it"
17 | # Download the main.cvd file
18 | if [ -z "$SOCKS_PROXY" ]
19 | then
20 | curl -o main.cvd -L -f http://database.clamav.net/main.cvd
21 | else
22 | curl --socks5-hostname $SOCKS_PROXY -o main.cvd -L -f http://database.clamav.net/main.cvd
23 | fi
24 | return_code=$?
25 | if [ $return_code -ne 0 ]
26 | then
27 | echo "Failed to download main.cvd (http code: $return_code)"
28 | exit 1
29 | fi
30 | fi
31 |
32 |
33 | # unpack the main.cvd
34 | sigtool --unpack main.cvd
35 | if [ $? -ne 0 ]
36 | then
37 | echo "Failed to unpack main.cvd"
38 | exit 1
39 | fi
40 | rm main.cvd
41 |
42 | # Loop over all the files in the tmp directory
43 | for file in *
44 | do
45 | # If the file has one line, skip
46 | if [ $(wc -l < $file) -eq 1 ]
47 | then
48 | echo "Skipping $file"
49 | continue
50 | fi
51 |
52 | # If the file is the COPYING or main.cvd file, skip
53 | if [ $(basename $file) == "main.cvd" ]
54 | then
55 | echo "Skipping $file"
56 | continue
57 | fi
58 | if [ $(basename $file) == "COPYING" ]
59 | then
60 | echo "Skipping $file"
61 | continue
62 | fi
63 |
64 | # Filter out the lines that does not contain the word "Unix" or "Multios"
65 | grep -v -E "Win\.|Osx\." $file > $file.tmp
66 | mv $file.tmp $file
67 | # If the file is empty, delete it
68 | if [ $(wc -l < $file) -eq 0 ]
69 | then
70 | echo "Deleting $file"
71 | rm $file
72 | fi
73 | done
74 |
75 |
76 | sigtool --version
77 | printf "slashben\n" | sigtool --build=main.cud --unsigned
78 | if [ $? -ne 0 ]
79 | then
80 | echo "Failed to build main.cud"
81 | exit 1
82 | fi
83 |
84 |
85 | # Get back
86 | popd
87 |
88 | cp tmp/main.cud main.cud
89 |
90 | # Clean up
91 | rm -rf tmp
92 |
--------------------------------------------------------------------------------
/resources/clamav/init.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 | # SPDX-License-Identifier: GPL-2.0-or-later
3 | #
4 | # Copyright (C) 2021 Olliver Schinagl
5 | # Copyright (C) 2021-2023 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
6 | #
7 | # A beginning user should be able to docker run image bash (or sh) without
8 | # needing to learn about --entrypoint
9 | # https://github.com/docker-library/official-images#consistency
10 |
11 | set -eu
12 |
13 | if [ ! -d "/run/clamav" ]; then
14 | install -d -g "clamav" -m 775 -o "clamav" "/run/clamav"
15 | fi
16 |
17 | # Assign ownership to the database directory, just in case it is a mounted volume
18 | chown -R clamav:clamav /var/lib/clamav
19 |
20 | # run command if it is not starting with a "-" and is an executable in PATH
21 | if [ "${#}" -gt 0 ] && \
22 | [ "${1#-}" = "${1}" ] && \
23 | command -v "${1}" > "/dev/null" 2>&1; then
24 | # Ensure healthcheck always passes
25 | CLAMAV_NO_CLAMD="true" exec "${@}"
26 | else
27 | if [ "${#}" -ge 1 ] && \
28 | [ "${1#-}" != "${1}" ]; then
29 | # If an argument starts with "-" pass it to clamd specifically
30 | exec clamd "${@}"
31 | fi
32 | # else default to running clamav's servers
33 |
34 | # Help tiny-init a little
35 | mkdir -p "/run/lock"
36 | ln -f -s "/run/lock" "/var/lock"
37 |
38 | # Ensure we have some virus data, otherwise clamd refuses to start
39 | if [ "${CLAMAV_NO_FRESHCLAMD:-false}" != "true" ]; then
40 | if [ ! -f "/var/lib/clamav/main.cvd" ]; then
41 | echo "Updating initial database"
42 | freshclam --foreground --stdout
43 | fi
44 | fi
45 |
46 | # Start freshclamd if not disabled
47 | if [ "${CLAMAV_NO_FRESHCLAMD:-false}" != "true" ]; then
48 | echo "Starting Freshclamd"
49 | freshclam \
50 | --checks="${FRESHCLAM_CHECKS:-1}" \
51 | --daemon \
52 | --foreground \
53 | --stdout \
54 | --user="clamav" \
55 | &
56 | fi
57 |
58 | if [ "${CLAMAV_NO_CLAMD:-false}" != "true" ]; then
59 | echo "Starting ClamAV"
60 | if [ -S "/run/clamav/clamd.sock" ]; then
61 | unlink "/run/clamav/clamd.sock"
62 | fi
63 | if [ -S "/tmp/clamd.sock" ]; then
64 | unlink "/tmp/clamd.sock"
65 | fi
66 | clamd --foreground &
67 | while [ ! -S "/run/clamav/clamd.sock" ] && [ ! -S "/tmp/clamd.sock" ]; do
68 | if [ "${_timeout:=0}" -gt "${CLAMD_STARTUP_TIMEOUT:=1800}" ]; then
69 | echo
70 | echo "Failed to start clamd"
71 | exit 1
72 | fi
73 | printf "\r%s" "Socket for clamd not found yet, retrying (${_timeout}/${CLAMD_STARTUP_TIMEOUT}) ..."
74 | sleep 1
75 | _timeout="$((_timeout + 1))"
76 | done
77 | echo "socket found, clamd started."
78 | fi
79 |
80 | if [ "${CLAMAV_NO_MILTERD:-true}" != "true" ]; then
81 | echo "Starting clamav milterd"
82 | clamav-milter &
83 | fi
84 |
85 | # Wait forever (or until canceled)
86 | exec tail -f "/dev/null"
87 | fi
88 |
89 | exit 0
90 |
--------------------------------------------------------------------------------
/resources/latest/kubecop-values.yaml:
--------------------------------------------------------------------------------
1 | image:
2 | repository: quay.io/armosec/kubecop
3 | tag: latest
4 | pullPolicy: IfNotPresent
5 | kubecop:
6 | alertmanager:
7 | enabled: true
8 | endpoints: "alertmanager-operated.monitoring.svc.cluster.local:9093"
--------------------------------------------------------------------------------
/resources/system-tests/kubecop-values.yaml:
--------------------------------------------------------------------------------
1 | image:
2 | repository: kubecop
3 | tag: latest
4 | pullPolicy: IfNotPresent
5 | kubecop:
6 | recording:
7 | samplingInterval: 60s
8 | finalizationDuration: 120s
9 | finalizationJitter: 1s
10 | alertmanager:
11 | enabled: true
12 | endpoints: "alertmanager-operated.monitoring.svc.cluster.local:9093"
--------------------------------------------------------------------------------
/scripts/install-in-pod.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | # Check that we got a filename as an argument
5 | if [ -z "$1" ]; then
6 | echo "Usage: $0 "
7 | exit 1
8 | fi
9 |
10 | export NAMESPACE=kapprofiler-dev-env
11 | # for each pod in the namespace, copy the file to the pod
12 | POD_LIST=$(kubectl -n $NAMESPACE get pods -l k8s-app=kapprofiler-dev-env -o jsonpath="{.items[*].metadata.name}")
13 | for POD in $POD_LIST; do
14 | kubectl exec -it $POD -n $NAMESPACE -- rm -f /bin/$1
15 | echo "Copying $1 to $POD"
16 | kubectl cp $1 $NAMESPACE/$POD:/bin/$1
17 | done
18 | # export POD=$(kubectl -n $NAMESPACE get pods -l k8s-app=kapprofiler-dev-env -o jsonpath="{.items[0].metadata.name}")
19 |
20 | # kubectl cp $1 $NAMESPACE/$POD:/bin/$1
--------------------------------------------------------------------------------
/scripts/open-shell-in-pod.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | export NAMESPACE=kapprofiler-dev-env
4 |
5 | # for each pod in the namespace, run the command in the background and store the pid in cop_pids.txt file
6 | POD_LIST=$(kubectl -n $NAMESPACE get pods -l k8s-app=kapprofiler-dev-env -o jsonpath="{.items[*].metadata.name}")
7 |
8 | # Take the first pod from the list
9 | POD_NAME=$(echo $POD_LIST | cut -d' ' -f1)
10 |
11 | # If there are multiple pods, let the user choose one
12 | if [ $(echo $POD_LIST | wc -w) -gt 1 ]; then
13 | echo "Multiple pods found in the namespace $NAMESPACE. Please choose one:"
14 | select POD_NAME in $POD_LIST; do
15 | break
16 | done
17 | fi
18 |
19 | kubectl exec -it $POD_NAME -n $NAMESPACE -- bash
20 |
--------------------------------------------------------------------------------
/scripts/resolve-pools-addresses.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | domains=("2cryptocalc.com" "2miners.com" "antpool.com" "asia1.ethpool.org" "bohemianpool.com" "botbox.dev" "btm.antpool.com" "c3pool.com" "c4pool.org" "ca.minexmr.com" "cn.stratum.slushpool.com" "dash.antpool.com" "data.miningpoolstats.stream" "de.minexmr.com" "eth-ar.dwarfpool.com" "eth-asia.dwarfpool.com" "eth-asia1.nanopool.org" "eth-au.dwarfpool.com" "eth-au1.nanopool.org" "eth-br.dwarfpool.com" "eth-cn.dwarfpool.com" "eth-cn2.dwarfpool.com" "eth-eu.dwarfpool.com" "eth-eu1.nanopool.org" "eth-eu2.nanopool.org" "eth-hk.dwarfpool.com" "eth-jp1.nanopool.org" "eth-ru.dwarfpool.com" "eth-ru2.dwarfpool.com" "eth-sg.dwarfpool.com" "eth-us-east1.nanopool.org" "eth-us-west1.nanopool.org" "eth-us.dwarfpool.com" "eth-us2.dwarfpool.com" "eth.antpool.com" "eu.stratum.slushpool.com" "eu1.ethermine.org" "eu1.ethpool.org" "fastpool.xyz" "fr.minexmr.com" "kriptokyng.com" "mine.moneropool.com" "mine.xmrpool.net" "miningmadness.com" "monero.cedric-crispin.com" "monero.crypto-pool.fr" "monero.fairhash.org" "monero.hashvault.pro" "monero.herominers.com" "monerod.org" "monerohash.com" "moneroocean.stream" "monerop.com" "multi-pools.com" "p2pool.io" "pool.kryptex.com" "pool.minexmr.com" "pool.monero.hashvault.pro" "pool.rplant.xyz" "pool.supportxmr.com" "pool.xmr.pt" "prohashing.com" "rx.unmineable.com" "sg.minexmr.com" "sg.stratum.slushpool.com" "skypool.org" "solo-xmr.2miners.com" "ss.antpool.com" "stratum-btm.antpool.com" "stratum-dash.antpool.com" "stratum-eth.antpool.com" "stratum-ltc.antpool.com" "stratum-xmc.antpool.com" "stratum-zec.antpool.com" "stratum.antpool.com" "supportxmr.com" "trustpool.cc" "us-east.stratum.slushpool.com" "us1.ethermine.org" "us1.ethpool.org" "us2.ethermine.org" "us2.ethpool.org" "web.xmrpool.eu" "www.domajorpool.com" "www.dxpool.com" "www.mining-dutch.nl" "xmc.antpool.com" "xmr-asia1.nanopool.org" "xmr-au1.nanopool.org" "xmr-eu1.nanopool.org" "xmr-eu2.nanopool.org" "xmr-jp1.nanopool.org" "xmr-us-east1.nanopool.org" "xmr-us-west1.nanopool.org" "xmr.2miners.com" "xmr.crypto-pool.fr" "xmr.gntl.uk" "xmr.nanopool.org" "xmr.pool-pay.com" "xmr.pool.minergate.com" "xmr.solopool.org" "xmr.volt-mine.com" "xmr.zeropool.io" "zec.antpool.com" "zergpool.com")
4 |
5 | for domain in "${domains[@]}"; do
6 | ip=$(nslookup "$domain" | awk '/^Address: / { print $2 }')
7 | echo "$domain: $ip"
8 | done
9 |
--------------------------------------------------------------------------------
/scripts/run-system-tests.sh:
--------------------------------------------------------------------------------
1 | #/bin/bash
2 |
3 | # This script is used to run system tests on a single machine.
4 |
5 |
6 | kubectl port-forward svc/alertmanager-operated 9093:9093 -n monitoring &
7 | ALERT_MANAGER_PORT_PID=$!
8 | sleep 5
9 | # Check that port forwarding is working.
10 | if ! curl -s http://localhost:9093/api/v2/alerts > /dev/null ; then
11 | kill $ALERT_MANAGER_PORT_PID
12 | exit 1
13 | fi
14 | kubectl port-forward -n monitoring svc/prometheus-kube-prometheus-prometheus 9090:9090 &
15 | PROMETHEUS_PORT_PID=$!
16 | sleep 5
17 | # Check that port forwarding is working if not delete the previous port forwarding.
18 | if ! curl -s http://localhost:9090/api/v1/query?query=up > /dev/null; then
19 | kill $ALERT_MANAGER_PORT_PID
20 | kill $PROMETHEUS_PORT_PID
21 | exit 1
22 | fi
23 |
24 | python3 system-tests/run.py
25 | test_result=$?
26 |
27 | kill $ALERT_MANAGER_PORT_PID
28 | kill $PROMETHEUS_PORT_PID
29 | exit $test_result
--------------------------------------------------------------------------------
/scripts/setup-system-test-cluster.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # This script is used to setup a system test cluster on a single machine.
4 |
5 | # Function to print message and exit.
6 | function error_exit {
7 | kubectl delete namespace monitoring 2> /dev/null
8 | kubectl delete namespace kubescape 2> /dev/null
9 | echo "$1" 1>&2
10 | exit 1
11 | }
12 |
13 | # Check that kubectl is installed.
14 | if ! [ -x "$(command -v kubectl)" ]; then
15 | echo "kubectl is not installed. Please install kubectl and try again."
16 | exit 1
17 | fi
18 |
19 | # Check that either miniKube or kind is installed.
20 | if ! [ -x "$(command -v minikube)" ] && ! [ -x "$(command -v kind)" ]; then
21 | echo "Either minikube or kind is not installed. Please install one of them and try again."
22 | exit 1
23 | fi
24 |
25 | # Check if the cluster is kind cluster by checking current context.
26 | if [ "$(kubectl config current-context)" == "kind-kind" ]; then
27 | echo "Kind cluster detected."
28 | # Load the docker image into the kind cluster.
29 | kind load docker-image kubecop:latest || error_exit "Failed to load docker image into kind cluster."
30 | fi
31 |
32 | # Check if the cluster is minikube cluster by checking current context.
33 | if [ "$(kubectl config current-context)" == "minikube" ]; then
34 | echo "Minikube cluster detected."
35 | # Load the docker image into the minikube cluster.
36 | minikube image load kubecop:latest || error_exit "Failed to load docker image into minikube cluster."
37 | fi
38 |
39 |
40 | # Check that helm is installed.
41 | if ! [ -x "$(command -v helm)" ]; then
42 | echo "helm is not installed. Please install helm and try again."
43 | exit 1
44 | fi
45 |
46 | # Add prometheus helm repo and install prometheus.
47 | helm repo add prometheus-community https://prometheus-community.github.io/helm-charts || error_exit "Failed to add prometheus helm repo."
48 | helm repo update || error_exit "Failed to update helm repos."
49 | helm install prometheus prometheus-community/kube-prometheus-stack \
50 | --namespace monitoring --create-namespace --wait --timeout 5m \
51 | --set grafana.enabled=true || error_exit "Failed to install prometheus."
52 |
53 | # Check that the prometheus pod is running
54 | kubectl wait --for=condition=Ready pod -l app.kubernetes.io/name=prometheus -n monitoring --timeout=300s || error_exit "Prometheus did not start."
55 |
56 | # Install kubescape app profile
57 | helm install kubecop chart/kubecop --set kubecop.prometheusExporter.enabled=true --set kubecop.pprofserver.enabled=true --set clamAV.enabled=true \
58 | -f resources/system-tests/kubecop-values.yaml \
59 | -n kubescape --create-namespace --wait --timeout 5m || error_exit "Failed to install kubecop."
60 |
61 | # Check that the kubecop pod is running
62 | kubectl wait --for=condition=Ready pod -l app.kubernetes.io/name=kubecop -n kubescape --timeout=300s || error_exit "Kubecop did not start."
63 |
64 | echo "System test cluster setup complete."
65 |
66 | # port forward prometheus
67 | # kubectl port-forward -n monitoring svc/prometheus-kube-prometheus-prometheus 9090:9090 &
68 | # kubectl port-forward svc/alertmanager-operated 9093:9093 -n monitoring &
69 |
--------------------------------------------------------------------------------
/scripts/validate-crd.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # print commands and their arguments as they are executed
4 | set -x
5 | # Store the original checksum of the file
6 | original_checksum=$(md5sum chart/kubecop/charts/clustered-crds/crds/runtime-rule-binding.crd.yaml | awk '{print $1}')
7 | original_checksum_namespaced=$(md5sum chart/kubecop/charts/namespaced-crds/crds/runtime-rule-binding.crd.yaml | awk '{print $1}')
8 |
9 | # Run md_gen
10 | go run cmd/rule_md_gen/generate_md.go
11 | if [ $? -ne 0 ]; then
12 | echo "md_gen failed."
13 | exit 1
14 | fi
15 |
16 | # Store the new checksum of the file
17 | new_checksum=$(md5sum chart/kubecop/charts/clustered-crds/crds/runtime-rule-binding.crd.yaml | awk '{print $1}')
18 | new_checksum_namespaced=$(md5sum chart/kubecop/charts/namespaced-crds/crds/runtime-rule-binding.crd.yaml | awk '{print $1}')
19 |
20 | # Compare the checksums
21 | if [[ "$original_checksum" == "$new_checksum" ]]; then
22 | echo "The file is identical before and after running md_gen."
23 | else
24 | echo "The file has changed after running md_gen."
25 | exit 1
26 | fi
27 |
28 | if [[ "$original_checksum_namespaced" == "$new_checksum_namespaced" ]]; then
29 | echo "The file is identical before and after running md_gen."
30 | else
31 | echo "The file has changed after running md_gen."
32 | exit 1
33 | fi
--------------------------------------------------------------------------------
/system-tests/all_alerts_from_malicious_app.py:
--------------------------------------------------------------------------------
1 | from kubernetes_wrappers import Namespace, Workload, KubernetesObjects
2 | import os
3 | import time
4 |
5 | def all_alerts_from_malicious_app(test_framework):
6 | # Create a namespace
7 | ns = Namespace(name=None)
8 | profiles_namespace_name = os.environ.get("STORE_NAMESPACE")
9 | profiles_namespace = None
10 | if profiles_namespace_name:
11 | profiles_namespace = Namespace(name=profiles_namespace_name)
12 | ns = Namespace(name='test-namespace')
13 |
14 | if ns:
15 | # Create application profile
16 | app_profile = None
17 | if profiles_namespace_name:
18 | app_profile = KubernetesObjects(namespace=profiles_namespace,object_file=os.path.join(test_framework.get_root_directoty(),"resources/malicious-job-app-profile-namespaced.yaml"))
19 | else:
20 | app_profile = KubernetesObjects(namespace=ns,object_file=os.path.join(test_framework.get_root_directoty(),"resources/malicious-job-app-profile.yaml"))
21 |
22 | # Create a workload
23 | workload = Workload(namespace=ns,workload_file=os.path.join(test_framework.get_root_directoty(),"resources/malicious-job.yaml"))
24 |
25 | # Wait for the workload to be ready
26 | workload.wait_for_ready(timeout=120)
27 |
28 | # Wait 125 seconds for the alerts to be generated
29 | print("Waiting 20 seconds for the alerts to be generated")
30 | time.sleep(20)
31 |
32 | # This application should have signaled all alerts types by now
33 |
34 | # Get all the alert for the namespace
35 | alerts = test_framework.get_alerts(namespace=ns)
36 |
37 | # Validate that all alerts are signaled
38 | expected_alerts = [
39 | "Unexpected process launched",
40 | "Unexpected file access",
41 | "Unexpected system call",
42 | "Unexpected capability used",
43 | "Unexpected domain request",
44 | "Unexpected Service Account Token Access",
45 | "Kubernetes Client Executed",
46 | "Exec from malicious source",
47 | "Kernel Module Load",
48 | "Exec Binary Not In Base Image",
49 | # "Malicious SSH Connection", (This rule needs to be updated to be more reliable).
50 | "Exec from mount",
51 | "Crypto Miner detected"
52 | ]
53 |
54 | for alert in alerts:
55 | rule_name = alert['labels']['rule_name']
56 | if rule_name in expected_alerts:
57 | expected_alerts.remove(rule_name)
58 |
59 | assert len(expected_alerts) == 0, f"Expected alerts {expected_alerts} were not signaled"
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
--------------------------------------------------------------------------------
/system-tests/basic_alert_tests.py:
--------------------------------------------------------------------------------
1 | from kubernetes_wrappers import Namespace, Workload, KubernetesObjects
2 | import os
3 | import time
4 |
5 | def basic_alert_test(test_framework):
6 | print("Running basic alert test")
7 |
8 | # Create a namespace
9 | ns = Namespace(name=None)
10 | profiles_namespace_name = os.environ.get("STORE_NAMESPACE")
11 | profiles_namespace = None
12 | if profiles_namespace_name:
13 | profiles_namespace = Namespace(name=profiles_namespace_name)
14 | ns = Namespace(name='test-namespace')
15 |
16 | if ns:
17 | # Create application profile
18 | app_profile = None
19 | if profiles_namespace_name:
20 | app_profile = KubernetesObjects(namespace=profiles_namespace,object_file=os.path.join(test_framework.get_root_directoty(),"resources/nginx-app-profile-namespaced.yaml"))
21 | else:
22 | app_profile = KubernetesObjects(namespace=ns,object_file=os.path.join(test_framework.get_root_directoty(),"resources/nginx-app-profile.yaml"))
23 |
24 | # Create a workload
25 | workload = Workload(namespace=ns,workload_file=os.path.join(test_framework.get_root_directoty(),"resources/nginx-deployment.yaml"))
26 |
27 | # Wait for the workload to be ready
28 | workload.wait_for_ready(timeout=120)
29 |
30 | # Exec into the nginx pod and create a file in the /tmp directory
31 | workload.exec_into_pod(command=["touch", "/tmp/nginx-test"])
32 |
33 | # Wait for the alert to be signaled
34 | time.sleep(5)
35 |
36 | # Get all the alert for the namespace
37 | alerts = test_framework.get_alerts(namespace=ns)
38 |
39 | # Validate that all alerts are signaled
40 | expected_alerts = [
41 | "Unexpected process launched"
42 | ]
43 |
44 | for alert in alerts:
45 | rule_name = alert['labels']['rule_name']
46 | if rule_name in expected_alerts:
47 | expected_alerts.remove(rule_name)
48 |
49 | assert len(expected_alerts) == 0, f"Expected alerts {expected_alerts} were not signaled"
50 |
51 |
--------------------------------------------------------------------------------
/system-tests/basic_load_activities.py:
--------------------------------------------------------------------------------
1 | from kubernetes_wrappers import Namespace, Workload, KubernetesObjects
2 | import os
3 | import time
4 |
5 | def basic_load_activities(test_framework):
6 | print("Running basic load activities test")
7 |
8 | # Create a namespace
9 | ns = Namespace(name=None)
10 | profiles_namespace_name = os.environ.get("STORE_NAMESPACE")
11 | profiles_namespace = None
12 | if profiles_namespace_name:
13 | profiles_namespace = Namespace(name=profiles_namespace_name)
14 | ns = Namespace(name='test-namespace')
15 |
16 | if ns:
17 | # Create application profile
18 | app_profile = None
19 | if profiles_namespace_name:
20 | app_profile = KubernetesObjects(namespace=profiles_namespace,object_file=os.path.join(test_framework.get_root_directoty(),"resources/nginx-app-profile-namespaced.yaml"))
21 | else:
22 | app_profile = KubernetesObjects(namespace=ns,object_file=os.path.join(test_framework.get_root_directoty(),"resources/nginx-app-profile.yaml"))
23 |
24 | # Create a workload
25 | nginx = Workload(namespace=ns,workload_file=os.path.join(test_framework.get_root_directoty(),"resources/nginx-deployment.yaml"))
26 |
27 | # Wait for the workload to be ready
28 | nginx.wait_for_ready(timeout=120)
29 |
30 | # Create loader
31 | loader = Workload(namespace=ns,workload_file=os.path.join(test_framework.get_root_directoty(),"resources/locust-deployment.yaml"))
32 |
33 | # Wait for the workload to be ready
34 | loader.wait_for_ready(timeout=120)
35 |
36 | time_start = time.time()
37 |
38 | # Create a load of 5 minutes
39 | time.sleep(300)
40 |
41 | time_end= time.time()
42 |
43 | # Get the average CPU usage of KubeCop
44 | cpu_usage = test_framework.get_average_cpu_usage(namespace='kubescape', workload="kubecop", time_start=time_start, time_end=time_end)
45 |
46 | assert cpu_usage < 0.1, f"CPU usage of KubeCop is too high. CPU usage is {cpu_usage}"
47 |
--------------------------------------------------------------------------------
/system-tests/change_rule_binding_in_the_middle.py:
--------------------------------------------------------------------------------
1 | # system tests for rule binding.
2 | #
3 | # These tests are run as part of the system-tests/run.py script.
4 | #
5 | # The tests are run in the following order:
6 | # 1. rule_binding_apply_test
7 | # 2. rule_binding_delete_test
8 | # 3. rule_binding_update_test
9 |
10 | import subprocess
11 | import sys
12 | import time
13 | import kill_in_the_middle
14 |
15 | alert_manager_url = "http://localhost:9093"
16 |
17 |
--------------------------------------------------------------------------------
/system-tests/crashlooping-container/crashlooping-deploy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: bash-sleep-deployment
5 | spec:
6 | replicas: 1
7 | selector:
8 | matchLabels:
9 | app: bash-sleep
10 | template:
11 | metadata:
12 | labels:
13 | app: bash-sleep
14 | spec:
15 | containers:
16 | - name: bash-container
17 | image: bash
18 | command: ["sh", "-c", "sleep 1; exit 1"]
19 |
--------------------------------------------------------------------------------
/system-tests/creation_app_profile_memory_leak.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | import os
3 | import time
4 |
5 | from promtopic import save_plot_png, send_promql_query_to_prom
6 | from kubernetes_wrappers import Namespace, Workload, KubernetesObjects
7 |
8 | def install_app_no_application_profile_no_leak(test_framework):
9 | print("Running install app no application profile test")
10 |
11 | # Create a namespace
12 | ns = Namespace(name=None)
13 | namespace = ns.name()
14 | profiles_namespace = None
15 | if os.environ.get("STORE_NAMESPACE"):
16 | profiles_namespace = Namespace(name=os.environ.get("STORE_NAMESPACE"))
17 | ns = Namespace(name='test-namespace')
18 | namespace = ns.name()
19 |
20 | try:
21 | time_start = time.time()
22 |
23 | # Install nginx in kubernetes by applying the nginx deployment yaml without pre-creating the profile
24 | subprocess.check_call(["kubectl", "-n", namespace , "apply", "-f", "dev/nginx/nginx-deployment.yaml"])
25 |
26 | # Wait for nginx to be ready
27 | subprocess.check_call(["kubectl", "-n", namespace , "wait", "--for=condition=ready", "pod", "-l", "app=nginx", "--timeout=120s"])
28 |
29 | # Get the pod name of the nginx pod
30 | nginx_pod_name = subprocess.check_output(["kubectl", "-n", namespace , "get", "pod", "-l", "app=nginx", "-o", "jsonpath='{.items[0].metadata.name}'"]).decode("utf-8").strip("'")
31 |
32 | print("Waiting 150 seconds for the final application profile to be created")
33 | time.sleep(150)
34 |
35 | get_proc = None
36 | if os.environ.get("STORE_NAMESPACE"):
37 | get_proc = subprocess.run(["kubectl", "-n", os.environ.get("STORE_NAMESPACE"), "get", "applicationprofiles", f"pod-{nginx_pod_name}-test-namespace", "-oyaml"], capture_output=True)
38 | else:
39 | get_proc = subprocess.run(["kubectl", "-n", namespace, "get", "applicationprofiles", f"pod-{nginx_pod_name}", "-oyaml"], capture_output=True)
40 | assert get_proc.returncode == 0 and 'kapprofiler.kubescape.io/final: "true"' in get_proc.stdout.decode("utf-8"), f"final applicationprofile ({get_proc.returncode}) did not got created {get_proc.stdout.decode('utf-8')}"
41 |
42 | # wait for 60 seconds for the GC to run, so the memory leak can be detected
43 | time.sleep(60)
44 |
45 | # Get kubecop pod name
46 | kc_pod_name = subprocess.check_output(["kubectl", "-n", "kubescape", "get", "pods", "-l", "app.kubernetes.io/name=kubecop", "-o", "jsonpath='{.items[0].metadata.name}'"], universal_newlines=True).strip("'")
47 | # Build query to get memory usage
48 | query = 'sum(container_memory_working_set_bytes{pod="%s"}) by (container)'%kc_pod_name
49 | timestamps, values = send_promql_query_to_prom("install_app_no_application_profile_no_leak_mem", query, time_start,time_end=time.time())
50 | save_plot_png("install_app_no_application_profile_no_leak_mem", values=values,timestamps=timestamps, metric_name='Memory Usage (bytes)')
51 |
52 | # validate that there is no memory leak, but tolerate 20mb memory leak
53 | assert int(values[-1]) <= int(values[0]) + 20000000, f"Memory leak detected in kubecop pod. Memory usage at the end of the test is {values[-1]} and at the beginning of the test is {values[0]}"
54 |
55 |
56 | except Exception as e:
57 | print("Exception: ", e)
58 | # Delete the namespace
59 | subprocess.check_call(["kubectl", "delete", "namespace", namespace])
60 | if profiles_namespace:
61 | subprocess.run(["kubectl", "delete", "applicationprofile", f"pod-{nginx_pod_name}-test-namespace", "-n", os.environ.get("STORE_NAMESPACE")])
62 | subprocess.run(["kubectl", "delete", "applicationprofile", f"deployment-nginx-deployment-test-namespace", "-n", os.environ.get("STORE_NAMESPACE")])
63 | return 1
64 |
65 | subprocess.check_call(["kubectl", "delete", "namespace", namespace])
66 | if profiles_namespace:
67 | subprocess.run(["kubectl", "delete", "applicationprofile", f"pod-{nginx_pod_name}-test-namespace", "-n", os.environ.get("STORE_NAMESPACE")])
68 | subprocess.run(["kubectl", "delete", "applicationprofile", f"deployment-nginx-deployment-test-namespace", "-n", os.environ.get("STORE_NAMESPACE")])
69 | return 0
70 |
71 |
--------------------------------------------------------------------------------
/system-tests/finalization_alert_test.py:
--------------------------------------------------------------------------------
1 | from kubernetes_wrappers import Namespace, Workload, KubernetesObjects
2 | import os
3 | import time
4 |
5 | def finalization_alert_test(test_framework):
6 | print("Running simple finalization alert test")
7 |
8 | # Create a namespace
9 | ns = Namespace(name=None)
10 | profiles_namespace = None
11 | if os.environ.get("STORE_NAMESPACE"):
12 | profiles_namespace = Namespace(name=os.environ.get("STORE_NAMESPACE"))
13 | ns = Namespace(name='test-namespace')
14 |
15 | if ns:
16 | # Create a workload
17 | workload = Workload(namespace=ns,workload_file=os.path.join(test_framework.get_root_directoty(),"resources/nginx-deployment.yaml"))
18 |
19 | # Wait for the workload to be ready
20 | workload.wait_for_ready(timeout=120)
21 |
22 | # Wait for 160 seconds to allow the profiling process to end and do finalization
23 | time.sleep(160)
24 |
25 | # Exec into the nginx pod and create a file in the /tmp directory
26 | workload.exec_into_pod(command=["touch", "/tmp/nginx-test"])
27 |
28 | # Wait for the alert to be signaled
29 | time.sleep(5)
30 |
31 | # Get all the alert for the namespace
32 | alerts = test_framework.get_alerts(namespace=ns)
33 |
34 | # Validate that all alerts are signaled
35 | expected_alerts = [
36 | "Unexpected process launched"
37 | ]
38 |
39 | for alert in alerts:
40 | rule_name = alert['labels']['rule_name']
41 | if rule_name in expected_alerts:
42 | expected_alerts.remove(rule_name)
43 |
44 | assert len(expected_alerts) == 0, f"Expected alerts {expected_alerts} were not signaled"
45 |
46 |
--------------------------------------------------------------------------------
/system-tests/kubernetes_wrappers.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | import random
3 | import string
4 | import yaml
5 |
6 | class Namespace:
7 | def __init__(self, name):
8 | if name == None:
9 | # Generate a random namespace name
10 | self.ns_name = "kubecop-test-" + ''.join(random.choice(string.ascii_lowercase) for i in range(4))
11 | else:
12 | self.ns_name = name
13 | # Create namespace
14 | if subprocess.call(["kubectl", "get", "namespace", self.ns_name]) != 0:
15 | subprocess.check_call(["kubectl", "create", "namespace", self.ns_name])
16 | self.created_ns = True
17 | else:
18 | self.ns_name = name
19 | self.created_ns = False
20 | def __del__(self):
21 | # Delete the namespace
22 | if self.created_ns:
23 | subprocess.call(["kubectl", "delete", "namespace", self.ns_name])
24 |
25 | def name(self):
26 | return self.ns_name
27 |
28 | def __str__(self):
29 | return self.ns_name
30 |
31 | class Workload:
32 | def __init__(self, namespace, workload_file):
33 | self.namespace = namespace
34 | self.workload_file = workload_file
35 | # Apply workload
36 | subprocess.check_call(["kubectl", "-n", self.namespace.name(), "apply", "-f", self.workload_file])
37 | # Load the workload file
38 | with open(self.workload_file, 'r') as f:
39 | self.workload = yaml.safe_load(f)
40 | self.workload_kind = self.workload['kind']
41 | self.workload_name = self.workload['metadata']['name']
42 | # Get the labels for the Pod
43 | if self.workload_kind == "Deployment":
44 | self.workload_labels = self.workload['spec']['template']['metadata']['labels']
45 | elif self.workload_kind == "Pod":
46 | self.workload_labels = self.workload['metadata']['labels']
47 | elif self.workload_kind in ["StatefulSet", "DaemonSet"]:
48 | self.workload_labels = self.workload['spec']['template']['metadata']['labels']
49 | elif self.workload_kind == "Job":
50 | self.workload_labels = self.workload['spec']['template']['metadata']['labels']
51 | else:
52 | raise Exception("Unknown workload kind %s"%self.workload_kind)
53 |
54 |
55 | def __del__(self):
56 | # Delete the workload
57 | subprocess.call(["kubectl", "-n", self.namespace.name(), "delete", "-f", self.workload_file])
58 |
59 | def wait_for_ready(self, timeout):
60 | # Find the application label in the workload file
61 | app_label = None
62 | for key in self.workload_labels:
63 | if key == "app":
64 | app_label = self.workload_labels[key]
65 | break
66 | if app_label == None:
67 | raise Exception("Could not find app label in workload file %s"%self.workload_file)
68 |
69 | # Wait for the workload to be ready
70 | subprocess.check_call(["kubectl", "-n", self.namespace.name(), "wait", "--for=condition=ready", "pod", "-l", "app="+app_label, "--timeout=%ss"%timeout])
71 |
72 | def exec_into_pod(self, command):
73 | # Find the application label in the workload file
74 | app_label = None
75 | for key in self.workload_labels:
76 | if key == "app":
77 | app_label = self.workload_labels[key]
78 | break
79 | if app_label == None:
80 | raise Exception("Could not find app label in workload file %s"%self.workload_file)
81 | # Get the pod name of the pod
82 | pod_name = subprocess.check_output(["kubectl", "-n", self.namespace.name(), "get", "pod", "-l", "app="+app_label, "-o", "jsonpath='{.items[0].metadata.name}'"]).decode("utf-8").strip("'")
83 | # Exec into the pod
84 | subprocess.check_call(["kubectl", "-n", self.namespace.name(), "exec", pod_name, "--"] + command)
85 |
86 | class KubernetesObjects:
87 | def __init__(self, namespace, object_file):
88 | self.namespace = namespace
89 | self.object_file = object_file
90 | # Apply workload
91 | subprocess.check_call(["kubectl", "-n", self.namespace.name(), "apply", "-f", self.object_file])
92 | def __del__(self):
93 | # Delete the workload
94 | subprocess.call(["kubectl", "-n", self.namespace.name(), "delete", "-f", self.object_file])
--------------------------------------------------------------------------------
/system-tests/load_app_by_app_no_memory_cpu_load.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/armosec/kubecop/758dd96345c315e269291d120a9a02766a9fa314/system-tests/load_app_by_app_no_memory_cpu_load.py
--------------------------------------------------------------------------------
/system-tests/locustimage/Containerfile:
--------------------------------------------------------------------------------
1 | # Dockerfile
2 | FROM locustio/locust
3 | COPY locustfile.py /locustfile.py
4 |
--------------------------------------------------------------------------------
/system-tests/locustimage/locustfile.py:
--------------------------------------------------------------------------------
1 | # locustfile.py
2 | from locust import HttpUser, task, constant_throughput
3 | import os
4 |
5 | class QuickstartUser(HttpUser):
6 | wait_time = constant_throughput(0.1)
7 | host = os.getenv("TARGET_URL")
8 | @task
9 | def request(self):
10 | self.client.get("/")
11 |
--------------------------------------------------------------------------------
/system-tests/pprof.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | import requests
3 | import time
4 |
5 |
6 | class pprof_recorder:
7 | def __init__(self, namespace, pod_name, port):
8 | self.namespace = namespace
9 | self.pod_name = pod_name
10 | self.port = port
11 | self.proc = None
12 | # Start kubectl port-forward as a subprocess
13 | port_forward_command = "kubectl -n %s port-forward pod/%s %d:%d" % (self.namespace,self.pod_name, self.port, self.port)
14 | self.proc = subprocess.Popen(port_forward_command, shell=True)
15 |
16 | # Give it a moment to establish the port forwarding
17 | time.sleep(2)
18 |
19 | def __del__(self):
20 | if self.proc:
21 | self.proc.terminate()
22 | self.proc.wait()
23 |
24 | def record(self, duration=60, type="cpu", filename=None):
25 | if type == "cpu":
26 | url = 'http://localhost:%d/debug/pprof/profile?seconds=%d' % (self.port, duration)
27 | elif type == "mem":
28 | url = 'http://localhost:%d/debug/pprof/heap?seconds=%d' % (self.port, duration)
29 | response = requests.get(url)
30 | response.raise_for_status()
31 | if filename:
32 | with open(filename, 'wb') as f:
33 | f.write(response.content)
34 | return True
35 | else:
36 | return response.content
37 |
38 | def record_detached(self, duration=60, type="cpu", filename='pprof.pprof'):
39 | # Call the record function in a different thread
40 | import threading
41 | thread = threading.Thread(target=self.record, args=(duration, type, filename))
42 | thread.start()
43 | return True
44 |
45 |
--------------------------------------------------------------------------------
/system-tests/promtopic.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import matplotlib.pyplot as plt
3 | import sys
4 | import os
5 | import subprocess
6 | from datetime import datetime
7 |
8 |
9 | # Function to execute PromQL query
10 | def execute_promql_query(prometheus_url, query, time_start, time_end, steps):
11 | #print("Query: %s" % query)
12 | #print("Start: %s" % time_start)
13 | #print("End: %s" % time_end)
14 | #print("Steps: %s" % steps)
15 | #print("Prometheus URL: %s" % prometheus_url)
16 | response = requests.get(f'{prometheus_url}/api/v1/query_range', params={'query': query,'start':time_start,'end':time_end,'step':steps})
17 | results = response.json()
18 | #print("Results: %s" % results)
19 | if results['status'] != 'success':
20 | print(results)
21 | raise Exception("Query failed")
22 | return results['data']['result']
23 |
24 | def plotprom_cpu_usage(test_case_name,time_start, time_end, steps = '1s'):
25 | print("Ploting test %s from %s to %s" % (test_case_name, time_start, time_end))
26 |
27 | try:
28 | # Get kubecop pod name
29 | pod_name = subprocess.check_output(["kubectl", "-n", "kubescape", "get", "pods", "-l", "app.kubernetes.io/name=kubecop", "-o", "jsonpath='{.items[0].metadata.name}'"], universal_newlines=True).strip("'")
30 | # Build query
31 | query = 'sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{namespace="kubescape", pod="%s",container="kubecop"}) by (container)'%pod_name
32 |
33 | timestamps, values = send_promql_query_to_prom(test_case_name, query, time_start, time_end, steps)
34 | values = [float(item) for item in values]
35 | return save_plot_png(test_case_name+"_cpu", timestamps, values, metric_name='CPU Usage (ms)')
36 | except Exception as e:
37 | print("Exception: ", e)
38 | return 1
39 |
40 | def get_average_cpu_usage(pod_name, time_start, time_end):
41 | # Build query
42 | query ='avg by(cpu, instance) (irate(container_cpu_usage_seconds_total{pod="%s"}[5m]))' % pod_name
43 | timestamps, values = send_promql_query_to_prom("get_average_cpu_usage", query, time_start, time_end)
44 | # Calculate average
45 | values = [float(item) for item in values]
46 | return sum(values)/len(values)
47 |
48 | def plotprom_mem(test_case_name,time_start, time_end, steps = '1s'):
49 | print("Ploting test %s from %s to %s" % (test_case_name, time_start, time_end))
50 |
51 | try:
52 | # Get kubecop pod name
53 | pod_name = subprocess.check_output(["kubectl", "-n", "kubescape", "get", "pods", "-l", "app.kubernetes.io/name=kubecop", "-o", "jsonpath='{.items[0].metadata.name}'"], universal_newlines=True).strip("'")
54 | # Build query
55 | query = 'sum(container_memory_working_set_bytes{pod="%s", container="kubecop"}) by (container)'%pod_name
56 | timestamps, values = send_promql_query_to_prom(test_case_name, query, time_start, time_end, steps)
57 | # values = [int(item) for item in values]
58 | return save_plot_png(test_case_name+"_mem", timestamps, values, metric_name='Memory Usage (bytes)')
59 | except Exception as e:
60 | print("Exception: ", e)
61 | return 1
62 |
63 | def save_plot_png(test_case_name, timestamps, values, metric_name):
64 | plt.plot(timestamps, values)
65 | plt.title(f'KubeCop {metric_name} - {test_case_name}')
66 | plt.xlabel('Time (epoch)')
67 | plt.ylabel(metric_name)
68 |
69 | # Convert test case name to file name
70 | filename = test_case_name.replace(' ', '_').lower()
71 |
72 | # Save plot to an image file
73 | plt.savefig('%s.png'%filename)
74 | plt.clf()
75 |
76 | return 0
77 |
78 | def send_promql_query_to_prom(test_case_name, query, time_start, time_end, steps = '1s'):
79 | # Get prometheus url
80 | prometheus_url = 'http://localhost:9090'
81 | if 'PROMETHEUS_URL' in os.environ:
82 | prometheus_url = os.environ['PROMETHEUS_URL']
83 |
84 | # Execute the query
85 | data = execute_promql_query(prometheus_url, query, time_start, time_end, steps)
86 |
87 | # Example of processing and plotting
88 | # This will vary greatly depending on the shape of your data
89 | assert len(data) > 0, "No data found in prometheus when looking for %s" % test_case_name
90 | timestamps = [datetime.fromtimestamp(item[0]).strftime("%M:%S") for item in data[0]['values']] # Assuming the first result and it's a time series
91 | values = [item[1] for item in data[0]['values']]
92 | return timestamps, values
93 |
94 |
95 | if __name__ == "__main__":
96 | test_case_name = sys.argv[1]
97 | time_start = float(sys.argv[2])
98 | time_end = float(sys.argv[3])
99 | plotprom_cpu_usage(test_case_name=test_case_name, time_start=time_start, time_end=time_end)
--------------------------------------------------------------------------------
/system-tests/requirements.txt:
--------------------------------------------------------------------------------
1 | requests
2 | matplotlib
--------------------------------------------------------------------------------
/system-tests/resources/locust-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: http-loader
5 | spec:
6 | selector:
7 | matchLabels:
8 | app: http-loader
9 | replicas: 1
10 | template:
11 | metadata:
12 | labels:
13 | app: http-loader
14 | spec:
15 | containers:
16 | - name: locust
17 | image: quay.io/armosec/kubecop:test-loader-v1
18 | env:
19 | - name: TARGET_URL
20 | value: "http://nginx-service"
21 | args:
22 | - -f
23 | - /locustfile.py
24 | - --headless
25 | - -u
26 | - "1000"
27 | - -r
28 | - "100"
29 | imagePullPolicy: Always
30 |
--------------------------------------------------------------------------------
/system-tests/resources/malicious-job.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | name: kubecop-malicious-app-job
5 | spec:
6 | template:
7 | metadata:
8 | labels:
9 | app: kubecop-malicious-app
10 | spec:
11 | containers:
12 | - name: kubecop-malicious-app
13 | image: quay.io/armosec/kubecop:malicious-app-v1
14 | imagePullPolicy: Always
15 | env:
16 | - name: WAIT_FOR_SIGTERM
17 | value: "true"
18 | - name: WAIT_BEFORE_START
19 | value: "2s"
20 | volumeMounts:
21 | - mountPath: /podmount
22 | name: mount-for-alert
23 | restartPolicy: Never
24 | volumes:
25 | - name: mount-for-alert
26 | emptyDir: {}
27 | backoffLimit: 1
28 |
--------------------------------------------------------------------------------
/system-tests/resources/nginx-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: nginx-deployment
5 | spec:
6 | selector:
7 | matchLabels:
8 | app: nginx
9 | replicas: 1
10 | template:
11 | metadata:
12 | labels:
13 | app: nginx
14 | spec:
15 | containers:
16 | - name: nginx
17 | image: nginx:1.14.2
18 | ports:
19 | - containerPort: 80
--------------------------------------------------------------------------------
/system-tests/resources/nginx-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: nginx-service
5 | spec:
6 | selector:
7 | app: nginx
8 | ports:
9 | - protocol: TCP
10 | port: 80
11 | targetPort: 80
--------------------------------------------------------------------------------
/system-tests/rule_binding_apply_test.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 |
3 | def rule_binding_apply_test(test_framework):
4 | print("Running rule binding apply test")
5 |
6 | try:
7 | subprocess.check_call(["kubectl", "apply", "-f", "system-tests/rule_binding_crds_files/all-valid.yaml"])
8 | subprocess.check_call(["kubectl", "delete", "-f", "system-tests/rule_binding_crds_files/all-valid.yaml"])
9 | # invalid fields
10 | proc_stat = subprocess.run(["kubectl", "apply", "-f", "system-tests/rule_binding_crds_files/invalid-name.yaml"], check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
11 | if proc_stat.returncode == 0:
12 | print("Invalid name test failed")
13 | return 1
14 |
15 | proc_stat = subprocess.run(["kubectl", "apply", "-f", "system-tests/rule_binding_crds_files/invalid-id.yaml"], check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
16 | if proc_stat.returncode == 0:
17 | print("Invalid id test failed")
18 | return 1
19 |
20 | proc_stat = subprocess.run(["kubectl", "apply", "-f", "system-tests/rule_binding_crds_files/invalid-tag.yaml"], check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
21 | if proc_stat.returncode == 0:
22 | print("Invalid tag test failed")
23 | return 1
24 |
25 | # duplicate fields
26 | proc_stat = subprocess.run(["kubectl", "apply", "-f", "system-tests/rule_binding_crds_files/dup-fields-name-tag.yaml"], check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
27 | if proc_stat.returncode == 0:
28 | print("Duplicate fields name-tag test failed")
29 | return 1
30 |
31 | proc_stat = subprocess.run(["kubectl", "apply", "-f", "system-tests/rule_binding_crds_files/dup-fields-name-id.yaml"], check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
32 | if proc_stat.returncode == 0:
33 | print("Duplicate fields name-id test failed")
34 | return 1
35 |
36 | proc_stat = subprocess.run(["kubectl", "apply", "-f", "system-tests/rule_binding_crds_files/dup-fields-id-tag.yaml"], check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
37 | if proc_stat.returncode == 0:
38 | print("Duplicate fields id-tag test failed")
39 | return 1
40 |
41 | except Exception as e:
42 | print("Exception occured: %s" % e)
43 | return 1
44 |
45 | return 0
--------------------------------------------------------------------------------
/system-tests/rule_binding_crds_files/all-valid.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kubescape.io/v1
2 | kind: RuntimeRuleAlertBinding
3 | metadata:
4 | name: all-pods-1
5 | spec:
6 | namespaceSelector:
7 | podSelector:
8 | rules:
9 | - ruleName: "Unexpected process launched"
10 | - ruleID: "R0005"
11 | - ruleTags:
12 | - "exec"
13 | - "whitelisted"
14 |
15 |
--------------------------------------------------------------------------------
/system-tests/rule_binding_crds_files/dup-fields-id-tag.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kubescape.io/v1
2 | kind: RuntimeRuleAlertBinding
3 | metadata:
4 | name: all-pods-1
5 | spec:
6 | namespaceSelector:
7 | podSelector:
8 | rules:
9 | - ruleName: "Unexpected process launched"
10 | - ruleID: "R0005"
11 | ruleTags:
12 | - "exec"
13 | - "whitelisted"
14 |
15 |
--------------------------------------------------------------------------------
/system-tests/rule_binding_crds_files/dup-fields-name-id.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kubescape.io/v1
2 | kind: RuntimeRuleAlertBinding
3 | metadata:
4 | name: all-pods-1
5 | spec:
6 | namespaceSelector:
7 | podSelector:
8 | rules:
9 | - ruleName: "Unexpected process launched"
10 | ruleID: "R0005"
11 | - ruleTags:
12 | - "exec"
13 | - "whitelisted"
14 |
15 |
--------------------------------------------------------------------------------
/system-tests/rule_binding_crds_files/dup-fields-name-tag.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kubescape.io/v1
2 | kind: RuntimeRuleAlertBinding
3 | metadata:
4 | name: all-pods-1
5 | spec:
6 | namespaceSelector:
7 | podSelector:
8 | rules:
9 | - ruleName: "Unexpected process launched"
10 | ruleTags:
11 | - "exec"
12 | - "whitelisted"
13 | - ruleID: "R0005"
14 |
15 |
--------------------------------------------------------------------------------
/system-tests/rule_binding_crds_files/invalid-id.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kubescape.io/v1
2 | kind: RuntimeRuleAlertBinding
3 | metadata:
4 | name: all-pods-1
5 | spec:
6 | namespaceSelector:
7 | podSelector:
8 | rules:
9 | - ruleID: "R0005666"
10 |
--------------------------------------------------------------------------------
/system-tests/rule_binding_crds_files/invalid-name.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kubescape.io/v1
2 | kind: RuntimeRuleAlertBinding
3 | metadata:
4 | name: all-pods-1
5 | spec:
6 | namespaceSelector:
7 | podSelector:
8 | rules:
9 | - ruleName: "Unexpected process launchedffdsfds"
10 |
--------------------------------------------------------------------------------
/system-tests/rule_binding_crds_files/invalid-tag.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kubescape.io/v1
2 | kind: RuntimeRuleAlertBinding
3 | metadata:
4 | name: all-pods-1
5 | spec:
6 | namespaceSelector:
7 | podSelector:
8 | rules:
9 | - ruleTags:
10 | - "exec"
11 | - "whitelistedmnmnn"
12 |
13 |
--------------------------------------------------------------------------------