├── .github └── workflows │ ├── release.yaml │ └── test.yaml ├── Dockerfile ├── LICENSE ├── README.md ├── cmd └── root.go ├── deploy ├── cluster_role.yaml ├── cluster_role_binding.yaml ├── kustomization.yaml ├── namespace.yaml ├── role.yaml ├── role_binding.yaml ├── service.yaml ├── service_account.yaml └── statefulset.yaml ├── go.mod ├── go.sum ├── images ├── image_scan.png └── node_scan.png ├── main.go └── pkg ├── exporter └── exporter.go ├── kubernetes └── kubernetes.go ├── scanner ├── image │ ├── image.go │ ├── image_test.go │ └── interface.go ├── node │ ├── interface.go │ ├── node.go │ └── node_test.go └── scanner.go └── trivy └── trivy.go /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | name: Release 2 | on: 3 | release: 4 | types: [published] 5 | env: 6 | REGISTRY: ghcr.io 7 | jobs: 8 | build_and_push: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: checkout 12 | uses: actions/checkout@v2 13 | - name: Set up Docker Buildx 14 | uses: docker/setup-buildx-action@v1 15 | - name: Docker meta 16 | id: meta 17 | uses: docker/metadata-action@v3 18 | with: 19 | images: ${{ env.REGISTRY }}/${{ github.repository }} 20 | tags: | 21 | type=ref,event=tag 22 | - name: Login to GitHub Container Registry 23 | uses: docker/login-action@v1 24 | with: 25 | registry: ${{ env.REGISTRY }} 26 | username: ${{ github.repository_owner }} 27 | password: ${{ secrets.CR_PAT }} 28 | - name: Build and push 29 | uses: docker/build-push-action@v2 30 | with: 31 | context: . 32 | push: true 33 | tags: ${{ steps.meta.outputs.tags }} 34 | labels: ${{ steps.meta.outputs.labels }} -------------------------------------------------------------------------------- /.github/workflows/test.yaml: -------------------------------------------------------------------------------- 1 | name: Test 2 | on: 3 | push: 4 | branches: [ main ] 5 | pull_request: 6 | jobs: 7 | test: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v2 11 | - name: Set up Go 12 | uses: actions/setup-go@v2 13 | with: 14 | go-version: 1.17 15 | - name: Cache 16 | uses: actions/cache@v2.1.0 17 | with: 18 | path: | 19 | ~/go/pkg/mod 20 | ~/.cache/go-build 21 | key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} 22 | restore-keys: | 23 | ${{ runner.os }}-go- 24 | - name: Build 25 | run: go build -v ./... 26 | - name: Test 27 | run: go test -v ./... -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.17.5-alpine AS build 2 | 3 | WORKDIR /src/ 4 | 5 | RUN apk add --update --no-cache curl git 6 | RUN curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /bin v0.22.0 7 | 8 | COPY go.mod . 9 | COPY go.sum . 10 | RUN go mod download 11 | 12 | COPY . . 13 | RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o /bin/vulnerability-exporter 14 | 15 | FROM alpine:3.15.0 16 | COPY --from=build /bin/vulnerability-exporter /bin/vulnerability-exporter 17 | COPY --from=build /bin/trivy /usr/local/bin/trivy 18 | ENTRYPOINT ["/bin/vulnerability-exporter"] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 hnts 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Vulnerability Exporter 2 | A Prometheus Exporter for managing vulnerabilities in kubernetes by using trivy 3 | 4 | ## Abstract 5 | **! This project is under development.** 6 | 7 | Vulnerability exporter scan and export vulnerabilities of images and nodes in kubernetes cluster. 8 | 9 | Inspirated by [kube-trivy-expoter](https://github.com/kaidotdev/kube-trivy-exporter). 10 | 11 | ### Image Scan 12 | ```Image Scan``` scans for vulnerabilities in container images of workloads deployed in kubernetes. 13 | 14 | ``` 15 | trivy_image_vulnerabilities{namespace="argocd", fixedVersion="0.3.3", image="ghcr.io/dexidp/dex:v2.27.0", installedVersion="v0.3.2",layer="sha256:d8d076827e5aadd843d9da261228639f575be6e840b463e99381e6d861be90fc", pkgName="golang.org/x/text", severity="HIGH", vulnerabilityId="CVE-2020-14040", workloadKind="Deployment", workloadName="argocd-dex-server"} 16 | ``` 17 | 18 | #### View metrics by using Grafana 19 | ![image_scan_metrics](images/image_scan.png) 20 | 21 | 22 | ### Node Scan 23 | ```Image Scan``` scans vulnerabilities of the nodes of kuberntes cluster. 24 | 25 | ``` 26 | trivy_node_vulnerabilities{fixedVersion="0.12.3", installedVersion="0.12.2",nodeName="master-node", pkgName="Flask", severity="HIGH" vulnerabilityId="CVE-2018-1000656"} 27 | ``` 28 | 29 | #### View metrics by using Grafana 30 | ![node_scan_metrics](images/node_scan.png) 31 | 32 | ## Installation 33 | ``` 34 | $ kubectl apply -k deploy 35 | ``` 36 | -------------------------------------------------------------------------------- /cmd/root.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "context" 5 | "net/http" 6 | "os" 7 | "os/signal" 8 | "syscall" 9 | "time" 10 | 11 | "github.com/hnts/vulnerability-exporter/pkg/exporter" 12 | k8s "github.com/hnts/vulnerability-exporter/pkg/kubernetes" 13 | "github.com/hnts/vulnerability-exporter/pkg/scanner" 14 | "github.com/hnts/vulnerability-exporter/pkg/scanner/image" 15 | "github.com/hnts/vulnerability-exporter/pkg/scanner/node" 16 | "github.com/hnts/vulnerability-exporter/pkg/trivy" 17 | "github.com/prometheus/client_golang/prometheus" 18 | "github.com/prometheus/client_golang/prometheus/promhttp" 19 | "github.com/spf13/cobra" 20 | "k8s.io/client-go/kubernetes" 21 | "k8s.io/client-go/rest" 22 | "k8s.io/client-go/tools/clientcmd" 23 | "k8s.io/klog/v2" 24 | ) 25 | 26 | var ( 27 | listenAddress string 28 | metricsPath string 29 | usedNamespace string 30 | kubeConfigPath string 31 | scanInterval uint32 32 | imageConcurrency uint8 33 | nodeConcurrency uint8 34 | 35 | rootCmd = &cobra.Command{ 36 | Use: "vulnerability-exporter", 37 | Short: "A Prometheus Exporter for managing vulnerabilities in kubernetes by using trivy", 38 | Run: func(cmd *cobra.Command, args []string) { 39 | defer klog.Flush() 40 | 41 | var config *rest.Config 42 | var configErr error 43 | 44 | if kubeConfigPath != "" { 45 | config, configErr = clientcmd.BuildConfigFromFlags("", kubeConfigPath) 46 | } else { 47 | config, configErr = rest.InClusterConfig() 48 | } 49 | 50 | if configErr != nil { 51 | klog.Fatalf("failed to initialize kube config: %s", configErr.Error()) 52 | } 53 | 54 | clientset, err := kubernetes.NewForConfig(config) 55 | if err != nil { 56 | klog.Fatalf("failed to create kubernetes client: %s", err.Error()) 57 | } 58 | 59 | kclient := k8s.NewClient(clientset) 60 | tclient := trivy.Client{} 61 | scanners := []scanner.Scanner{ 62 | image.NewImageScanner(kclient, tclient, imageConcurrency), 63 | node.NewNodeScanner(kclient, usedNamespace, nodeConcurrency), 64 | } 65 | 66 | mux := http.NewServeMux() 67 | mux.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) }) 68 | mux.Handle(metricsPath, promhttp.Handler()) 69 | 70 | server := http.Server{ 71 | Addr: listenAddress, 72 | Handler: mux, 73 | } 74 | exporter := exporter.NewExporter(scanners, server) 75 | prometheus.MustRegister(exporter) 76 | 77 | ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) 78 | defer stop() 79 | 80 | klog.Info("Start vulnerability-exporter") 81 | exporter.Listen(ctx, time.Duration(scanInterval)*time.Second) 82 | }, 83 | } 84 | ) 85 | 86 | func Execute() { 87 | err := rootCmd.Execute() 88 | if err != nil { 89 | os.Exit(1) 90 | } 91 | } 92 | 93 | func init() { 94 | klog.InitFlags(nil) 95 | 96 | rootCmd.Flags().StringVar(&listenAddress, "listen-address", ":9321", "Address to listen on for web interface and telemetry") 97 | rootCmd.Flags().StringVar(&metricsPath, "metrics-path", "/metrics", "Path under which to expose metrics") 98 | rootCmd.Flags().StringVar(&usedNamespace, "used-namespace", "default", "Namespace in which the Pod that scans nodes for vulnerabilities is created") 99 | rootCmd.Flags().StringVar(&kubeConfigPath, "kubeconfig", "", "Absolute path to the kubeconfig file") 100 | rootCmd.Flags().Uint32Var(&scanInterval, "scan-interval", 6000, "Seconds in the interval to execute to scan vulnerability") 101 | rootCmd.Flags().Uint8Var(&imageConcurrency, "image-scan-concurrency", 30, "The number of images to be scanned at a time in goroutine") 102 | rootCmd.Flags().Uint8Var(&nodeConcurrency, "node-scan-concurrency", 1, "The number of nodes to be scanned at a time in goroutine") 103 | } 104 | -------------------------------------------------------------------------------- /deploy/cluster_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: vulnerability-exporter-cluster-role 5 | rules: 6 | - apiGroups: [""] 7 | resources: ["pods"] 8 | verbs: ["get", "watch", "list"] 9 | - apiGroups: [""] 10 | resources: ["nodes"] 11 | verbs: ["list"] 12 | - apiGroups: ["apps"] 13 | resources: ["deployments", "statefulsets", "daemonsets"] 14 | verbs: ["list"] 15 | - apiGroups: ["batch"] 16 | resources: ["cronjobs"] 17 | verbs: ["list"] -------------------------------------------------------------------------------- /deploy/cluster_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: vulnerability-exporter-role-binding 5 | subjects: 6 | - kind: ServiceAccount 7 | name: vulnerability-exporter-account 8 | apiGroup: "" 9 | roleRef: 10 | kind: ClusterRole 11 | name: vulnerability-exporter-cluster-role 12 | apiGroup: rbac.authorization.k8s.io -------------------------------------------------------------------------------- /deploy/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: vulnerability-exporter 4 | 5 | resources: 6 | - statefulset.yaml 7 | - service_account.yaml 8 | - namespace.yaml 9 | - role.yaml 10 | - role_binding.yaml 11 | - cluster_role.yaml 12 | - cluster_role_binding.yaml 13 | - service.yaml 14 | 15 | 16 | -------------------------------------------------------------------------------- /deploy/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: vulnerability-exporter -------------------------------------------------------------------------------- /deploy/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: vulnerability-exporter-role 5 | rules: 6 | - apiGroups: [""] 7 | resources: ["pods"] 8 | verbs: ["create", "delete"] 9 | - apiGroups: [""] 10 | resources: ["pods/log"] 11 | verbs: ["get"] -------------------------------------------------------------------------------- /deploy/role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: vulnerability-exporter-role-binding 5 | subjects: 6 | - kind: ServiceAccount 7 | name: vulnerability-exporter-account 8 | apiGroup: "" 9 | roleRef: 10 | kind: Role 11 | name: vulnerability-exporter-role 12 | apiGroup: rbac.authorization.k8s.io -------------------------------------------------------------------------------- /deploy/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: vulnerability-exporter 7 | name: vulnerability-exporter 8 | spec: 9 | clusterIP: None 10 | selector: 11 | app: vulnerability-exporter 12 | ports: 13 | - name: "http" 14 | protocol: "TCP" 15 | port: 9321 16 | targetPort: http -------------------------------------------------------------------------------- /deploy/service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: vulnerability-exporter-account -------------------------------------------------------------------------------- /deploy/statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: vulnerability-exporter 5 | labels: 6 | app: vulnerability-exporter 7 | spec: 8 | serviceName: vulnerability-exporter 9 | replicas: 1 10 | updateStrategy: 11 | type: RollingUpdate 12 | selector: 13 | matchLabels: 14 | app: vulnerability-exporter 15 | template: 16 | metadata: 17 | labels: 18 | app: vulnerability-exporter 19 | spec: 20 | serviceAccountName: vulnerability-exporter-account 21 | containers: 22 | - name: vulnerability-exporter 23 | image: ghcr.io/hnts/vulnerability-exporter:v0.1.1 24 | imagePullPolicy: Always 25 | args: 26 | - --scan-interval=3600 27 | - --used-namespace=vulnerability-exporter 28 | ports: 29 | - name: http 30 | containerPort: 9321 31 | volumeMounts: 32 | - name: cache 33 | mountPath: /var/lib/trivy 34 | volumes: 35 | - name: cache 36 | emptyDir: 37 | {} -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/hnts/vulnerability-exporter 2 | 3 | go 1.17 4 | 5 | require ( 6 | github.com/aquasecurity/trivy v0.22.0 7 | github.com/prometheus/client_golang v1.11.0 8 | github.com/spf13/cobra v1.3.0 9 | golang.org/x/sync v0.0.0-20210220032951-036812b2e83c 10 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 11 | k8s.io/api v0.23.1 12 | k8s.io/apimachinery v0.23.1 13 | k8s.io/client-go v0.23.1 14 | k8s.io/klog/v2 v2.30.0 15 | ) 16 | 17 | require ( 18 | github.com/Masterminds/goutils v1.1.1 // indirect 19 | github.com/Masterminds/semver v1.5.0 // indirect 20 | github.com/Masterminds/sprig v2.22.0+incompatible // indirect 21 | github.com/aquasecurity/fanal v0.0.0-20211224205755-c94f68b6d71a // indirect 22 | github.com/aquasecurity/trivy-db v0.0.0-20210916043317-726b7b72a47b // indirect 23 | github.com/beorn7/perks v1.0.1 // indirect 24 | github.com/caarlos0/env/v6 v6.0.0 // indirect 25 | github.com/cespare/xxhash/v2 v2.1.2 // indirect 26 | github.com/davecgh/go-spew v1.1.1 // indirect 27 | github.com/fatih/color v1.13.0 // indirect 28 | github.com/go-logr/logr v1.2.0 // indirect 29 | github.com/gogo/protobuf v1.3.2 // indirect 30 | github.com/golang/protobuf v1.5.2 // indirect 31 | github.com/google/go-cmp v0.5.6 // indirect 32 | github.com/google/go-containerregistry v0.6.0 // indirect 33 | github.com/google/gofuzz v1.1.0 // indirect 34 | github.com/google/uuid v1.3.0 // indirect 35 | github.com/googleapis/gnostic v0.5.5 // indirect 36 | github.com/huandu/xstrings v1.3.2 // indirect 37 | github.com/imdario/mergo v0.3.12 // indirect 38 | github.com/inconshreveable/mousetrap v1.0.0 // indirect 39 | github.com/json-iterator/go v1.1.12 // indirect 40 | github.com/mattn/go-colorable v0.1.12 // indirect 41 | github.com/mattn/go-isatty v0.0.14 // indirect 42 | github.com/mattn/go-runewidth v0.0.12 // indirect 43 | github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect 44 | github.com/mitchellh/copystructure v1.1.1 // indirect 45 | github.com/mitchellh/reflectwalk v1.0.1 // indirect 46 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 47 | github.com/modern-go/reflect2 v1.0.2 // indirect 48 | github.com/olekukonko/tablewriter v0.0.5 // indirect 49 | github.com/pmezard/go-difflib v1.0.0 // indirect 50 | github.com/prometheus/client_model v0.2.0 // indirect 51 | github.com/prometheus/common v0.29.0 // indirect 52 | github.com/prometheus/procfs v0.6.0 // indirect 53 | github.com/rivo/uniseg v0.2.0 // indirect 54 | github.com/spf13/pflag v1.0.5 // indirect 55 | github.com/stretchr/objx v0.3.0 // indirect 56 | github.com/stretchr/testify v1.7.0 // indirect 57 | go.etcd.io/bbolt v1.3.6 // indirect 58 | golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect 59 | golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect 60 | golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect 61 | golang.org/x/sys v0.0.0-20211205182925-97ca703d548d // indirect 62 | golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect 63 | golang.org/x/text v0.3.7 // indirect 64 | golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect 65 | google.golang.org/appengine v1.6.7 // indirect 66 | google.golang.org/protobuf v1.27.1 // indirect 67 | gopkg.in/inf.v0 v0.9.1 // indirect 68 | gopkg.in/yaml.v2 v2.4.0 // indirect 69 | gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect 70 | k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect 71 | k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b // indirect 72 | sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect 73 | sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect 74 | sigs.k8s.io/yaml v1.3.0 // indirect 75 | ) 76 | -------------------------------------------------------------------------------- /images/image_scan.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hnts/vulnerability-exporter/face92c87a26cb2abbd9d05598d56520f17a364b/images/image_scan.png -------------------------------------------------------------------------------- /images/node_scan.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hnts/vulnerability-exporter/face92c87a26cb2abbd9d05598d56520f17a364b/images/node_scan.png -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "github.com/hnts/vulnerability-exporter/cmd" 4 | 5 | func main() { 6 | cmd.Execute() 7 | } 8 | -------------------------------------------------------------------------------- /pkg/exporter/exporter.go: -------------------------------------------------------------------------------- 1 | package exporter 2 | 3 | import ( 4 | "context" 5 | "net/http" 6 | "sync" 7 | "time" 8 | 9 | "github.com/hnts/vulnerability-exporter/pkg/scanner" 10 | "github.com/prometheus/client_golang/prometheus" 11 | "k8s.io/klog/v2" 12 | ) 13 | 14 | type Exporter struct { 15 | scanners []scanner.Scanner 16 | metrics []prometheus.Collector 17 | server http.Server 18 | } 19 | 20 | func NewExporter(scanners []scanner.Scanner, server http.Server) *Exporter { 21 | return &Exporter{ 22 | scanners: scanners, 23 | metrics: []prometheus.Collector{}, 24 | server: server, 25 | } 26 | } 27 | 28 | func (e *Exporter) Listen(ctx context.Context, interval time.Duration) { 29 | go func() { 30 | e.execute(ctx) 31 | ticker := time.NewTicker(interval) 32 | LOOP: 33 | for { 34 | select { 35 | case <-ticker.C: 36 | e.execute(ctx) 37 | case <-ctx.Done(): 38 | ticker.Stop() 39 | break LOOP 40 | } 41 | } 42 | }() 43 | 44 | go func() { 45 | if err := e.server.ListenAndServe(); err != http.ErrServerClosed { 46 | klog.Fatalf("failed to start server: %s", err) 47 | } 48 | }() 49 | 50 | <-ctx.Done() 51 | klog.Info("Now that sigterm has been detected, run the cleanup and shutdown server...") 52 | e.cleanup() 53 | err := e.server.Shutdown(context.Background()) 54 | if err != nil { 55 | klog.Warningf("failed to shutdown server: %s", err) 56 | } 57 | } 58 | 59 | func (e *Exporter) Describe(ch chan<- *prometheus.Desc) { 60 | for _, c := range e.metrics { 61 | c.Describe(ch) 62 | } 63 | } 64 | func (e *Exporter) Collect(ch chan<- prometheus.Metric) { 65 | for _, c := range e.metrics { 66 | c.Collect(ch) 67 | } 68 | } 69 | 70 | func (e *Exporter) execute(ctx context.Context) { 71 | metrics := []prometheus.Collector{} 72 | for _, s := range e.scanners { 73 | c, err := s.Scan(ctx) 74 | if err != nil { 75 | klog.Warningf("failed to execute Scan of %s: %s", s.Name(), err) 76 | } 77 | metrics = append(metrics, c) 78 | } 79 | 80 | e.metrics = metrics 81 | } 82 | 83 | func (e *Exporter) cleanup() { 84 | wg := sync.WaitGroup{} 85 | for _, s := range e.scanners { 86 | wg.Add(1) 87 | go func(s scanner.Scanner) { 88 | defer wg.Done() 89 | err := s.CleanUp() 90 | if err != nil { 91 | klog.Warningf("failed to clean up %s: %s", s.Name(), err) 92 | } 93 | }(s) 94 | } 95 | wg.Wait() 96 | } 97 | -------------------------------------------------------------------------------- /pkg/kubernetes/kubernetes.go: -------------------------------------------------------------------------------- 1 | package kubernetes 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "io" 7 | 8 | "golang.org/x/xerrors" 9 | appv1 "k8s.io/api/apps/v1" 10 | batchv1 "k8s.io/api/batch/v1" 11 | v1 "k8s.io/api/core/v1" 12 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 13 | "k8s.io/client-go/kubernetes" 14 | ) 15 | 16 | type Client struct { 17 | clientSet kubernetes.Interface 18 | } 19 | 20 | func NewClient(clientSet kubernetes.Interface) Client { 21 | return Client{ 22 | clientSet: clientSet, 23 | } 24 | } 25 | 26 | func (c Client) CreatePod(ctx context.Context, ns string, pod *v1.Pod) error { 27 | _, err := c.clientSet.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) 28 | if err != nil { 29 | return xerrors.Errorf("failed to create pod %s: %w", pod.Name, err) 30 | } 31 | 32 | return nil 33 | } 34 | 35 | func (c Client) DeletePod(ctx context.Context, ns string, name string) error { 36 | err := c.clientSet.CoreV1().Pods(ns).Delete(ctx, name, metav1.DeleteOptions{}) 37 | if err != nil { 38 | return xerrors.Errorf("failed to delete pod %s: %w", name, err) 39 | } 40 | 41 | return nil 42 | } 43 | 44 | func (c Client) GetPod(ctx context.Context, name string, ns string) (*v1.Pod, error) { 45 | pod, err := c.clientSet.CoreV1().Pods(ns).Get(ctx, name, metav1.GetOptions{}) 46 | if err != nil { 47 | return nil, xerrors.Errorf("failed to get pod %s: %w", name, err) 48 | } 49 | 50 | return pod, nil 51 | } 52 | 53 | func (c Client) ListPodsByLabel(ctx context.Context, label *metav1.LabelSelector) ([]v1.Pod, error) { 54 | pods, err := c.clientSet.CoreV1().Pods("").List(ctx, metav1.ListOptions{ 55 | LabelSelector: metav1.FormatLabelSelector(label), 56 | }) 57 | if err != nil { 58 | return nil, err 59 | } 60 | 61 | return pods.Items, nil 62 | } 63 | 64 | func (c Client) GetContainerLog(ctx context.Context, ns string, podName string, containerName string) (string, error) { 65 | req := c.clientSet.CoreV1().Pods(ns).GetLogs(podName, &v1.PodLogOptions{Container: containerName}) 66 | 67 | body, err := req.Stream(ctx) 68 | if err != nil { 69 | return "", xerrors.Errorf("failed to open stream: %w", err) 70 | } 71 | defer body.Close() 72 | 73 | var log bytes.Buffer 74 | n, err := io.Copy(&log, body) 75 | if err != nil { 76 | return "", xerrors.New("failed to copy information from body to log") 77 | } 78 | if n == 0 { 79 | return "", xerrors.New("The log retrieved from the pod is empty") 80 | } 81 | 82 | return log.String(), nil 83 | } 84 | 85 | func (c Client) ListNodes(ctx context.Context) ([]v1.Node, error) { 86 | nodes, err := c.clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) 87 | if err != nil { 88 | return []v1.Node{}, xerrors.Errorf("failed to list nodes: %w", err) 89 | } 90 | 91 | return nodes.Items, nil 92 | } 93 | 94 | func (c Client) WaitUntilCompleted(namespace string, podName string) error { 95 | ctx := context.Background() 96 | w, err := c.clientSet.CoreV1().Pods(namespace).Watch(ctx, metav1.ListOptions{}) 97 | if err != nil { 98 | return xerrors.Errorf("failed to watch pods: %w", err) 99 | } 100 | 101 | defer w.Stop() 102 | 103 | ch := w.ResultChan() 104 | for { 105 | select { 106 | case <-ctx.Done(): 107 | return xerrors.New("pod execution timeout") 108 | case obj := <-ch: 109 | pod, ok := obj.Object.(*v1.Pod) 110 | if !ok { 111 | return xerrors.Errorf("failed to cast to pod from watch object: %w", obj) 112 | } 113 | 114 | if pod.Name == podName { 115 | if pod.Status.Phase == "Succeeded" { 116 | return nil 117 | } 118 | 119 | if pod.Status.Phase == "Failed" { 120 | return xerrors.Errorf("The status of the pod is \"Failed\" because %s", pod.Status.Reason) 121 | } 122 | } 123 | } 124 | } 125 | } 126 | 127 | func (c Client) ListDeployments(ctx context.Context) ([]appv1.Deployment, error) { 128 | deployments, err := c.clientSet.AppsV1().Deployments("").List(ctx, metav1.ListOptions{}) 129 | if err != nil { 130 | return []appv1.Deployment{}, xerrors.Errorf("failed to list deployments: %w", err) 131 | } 132 | 133 | return deployments.Items, nil 134 | } 135 | 136 | func (c Client) ListStatefulSets(ctx context.Context) ([]appv1.StatefulSet, error) { 137 | statefulSets, err := c.clientSet.AppsV1().StatefulSets("").List(ctx, metav1.ListOptions{}) 138 | if err != nil { 139 | return []appv1.StatefulSet{}, xerrors.Errorf("failed to list statefulsets: %w", err) 140 | } 141 | 142 | return statefulSets.Items, nil 143 | } 144 | 145 | func (c Client) ListDaemonSets(ctx context.Context) ([]appv1.DaemonSet, error) { 146 | daemonSets, err := c.clientSet.AppsV1().DaemonSets("").List(ctx, metav1.ListOptions{}) 147 | if err != nil { 148 | return []appv1.DaemonSet{}, xerrors.Errorf("failed to list daemonsets: %w", err) 149 | } 150 | 151 | return daemonSets.Items, nil 152 | } 153 | 154 | func (c Client) ListCronJobs(ctx context.Context) ([]batchv1.CronJob, error) { 155 | cronJobs, err := c.clientSet.BatchV1().CronJobs("").List(ctx, metav1.ListOptions{}) 156 | if err != nil { 157 | return []batchv1.CronJob{}, xerrors.Errorf("failed to list cronjob: %w", err) 158 | } 159 | 160 | return cronJobs.Items, nil 161 | } 162 | -------------------------------------------------------------------------------- /pkg/scanner/image/image.go: -------------------------------------------------------------------------------- 1 | package image 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | 7 | "github.com/aquasecurity/trivy/pkg/report" 8 | "github.com/prometheus/client_golang/prometheus" 9 | "golang.org/x/sync/semaphore" 10 | "golang.org/x/xerrors" 11 | "k8s.io/klog/v2" 12 | ) 13 | 14 | var ( 15 | imageVulnerbilities = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 16 | Namespace: "trivy", 17 | Name: "image_vulnerabilities", 18 | Help: "Vulnerbilities of cotainer images detected by trivy", 19 | }, []string{ 20 | "image", 21 | "layer", 22 | "vulnerabilityId", 23 | "pkgName", 24 | "installedVersion", 25 | "severity", 26 | "fixedVersion", 27 | "namespace", 28 | "workloadKind", 29 | "workloadName", 30 | }, 31 | ) 32 | ) 33 | 34 | type ImageScanner struct { 35 | kubernetesClient KubernetesClient 36 | trivyClient TrivyClient 37 | concurrency uint8 38 | } 39 | 40 | func NewImageScanner(kclient KubernetesClient, tclient TrivyClient, concurrency uint8) *ImageScanner { 41 | return &ImageScanner{ 42 | kubernetesClient: kclient, 43 | trivyClient: tclient, 44 | concurrency: concurrency, 45 | } 46 | } 47 | 48 | func (i *ImageScanner) Name() string { 49 | return "image-vulnerability-scanner" 50 | } 51 | 52 | func (i *ImageScanner) Scan(ctx context.Context) (prometheus.Collector, error) { 53 | workloads, err := i.getTargetWorkloads(ctx) 54 | if err != nil { 55 | return imageVulnerbilities, xerrors.Errorf("failed to get workloads: %w", err) 56 | } 57 | images := uniquedContainerImages(workloads) 58 | cacheDir := "/var/lib/trivy" 59 | 60 | reports, err := i.ScanImages(ctx, images, cacheDir, int64(i.concurrency)) 61 | if err != nil { 62 | return imageVulnerbilities, err 63 | } 64 | 65 | imageVulnerbilities.Reset() 66 | for _, rp := range reports { 67 | for _, w := range workloads { 68 | for _, i := range w.images { 69 | if rp.ArtifactName == i { 70 | for _, rs := range rp.Results { 71 | for _, v := range rs.Vulnerabilities { 72 | labels := []string{ 73 | rp.ArtifactName, 74 | v.Layer.DiffID, 75 | v.VulnerabilityID, 76 | v.PkgName, 77 | v.InstalledVersion, 78 | v.Severity, 79 | v.FixedVersion, 80 | w.namespace, 81 | w.kind, 82 | w.name, 83 | } 84 | imageVulnerbilities.WithLabelValues(labels...).Set(1) 85 | } 86 | } 87 | } 88 | } 89 | } 90 | } 91 | 92 | return imageVulnerbilities, nil 93 | } 94 | 95 | func (i *ImageScanner) CleanUp() error { 96 | return nil 97 | } 98 | 99 | func (i *ImageScanner) ScanImages(ctx context.Context, images []string, cacheDir string, concurrency int64) ([]report.Report, error) { 100 | err := i.trivyClient.UpdateDatabase(ctx, cacheDir) 101 | if err != nil { 102 | return nil, xerrors.Errorf("failed to update trivy database: %w", err) 103 | } 104 | 105 | wg := sync.WaitGroup{} 106 | mutex := &sync.Mutex{} 107 | reports := []report.Report{} 108 | sem := semaphore.NewWeighted(concurrency) 109 | for _, image := range images { 110 | wg.Add(1) 111 | if err := sem.Acquire(ctx, 1); err != nil { 112 | klog.Warningf("failed to acquire semaphore: %w", err) 113 | continue 114 | } 115 | 116 | go func(image string) { 117 | defer func() { 118 | sem.Release(1) 119 | wg.Done() 120 | }() 121 | 122 | report, err := i.trivyClient.ScanImage(ctx, image, cacheDir) 123 | if err != nil { 124 | klog.Warningf("failed to scan image(%s): %s", image, err) 125 | return 126 | } 127 | 128 | mutex.Lock() 129 | reports = append(reports, *report) 130 | mutex.Unlock() 131 | }(image) 132 | } 133 | wg.Wait() 134 | 135 | return reports, nil 136 | } 137 | 138 | type workload struct { 139 | name string 140 | namespace string 141 | kind string 142 | images []string 143 | } 144 | 145 | func (i *ImageScanner) getTargetWorkloads(ctx context.Context) ([]workload, error) { 146 | var workloads []workload 147 | 148 | deployments, err := i.kubernetesClient.ListDeployments(ctx) 149 | if err != nil { 150 | return nil, err 151 | } 152 | for _, d := range deployments { 153 | w := workload{ 154 | name: d.Name, 155 | namespace: d.Namespace, 156 | kind: "Deployment", 157 | images: func() []string { 158 | var images []string 159 | for _, c := range d.Spec.Template.Spec.Containers { 160 | images = append(images, c.Image) 161 | } 162 | return images 163 | }(), 164 | } 165 | workloads = append(workloads, w) 166 | } 167 | 168 | statefulSets, err := i.kubernetesClient.ListStatefulSets(ctx) 169 | if err != nil { 170 | return nil, err 171 | } 172 | for _, s := range statefulSets { 173 | w := workload{ 174 | name: s.Name, 175 | namespace: s.Namespace, 176 | kind: "StatefulSet", 177 | images: func() []string { 178 | var images []string 179 | for _, c := range s.Spec.Template.Spec.Containers { 180 | images = append(images, c.Image) 181 | } 182 | return images 183 | }(), 184 | } 185 | workloads = append(workloads, w) 186 | } 187 | 188 | daemonSets, err := i.kubernetesClient.ListDaemonSets(ctx) 189 | if err != nil { 190 | return nil, err 191 | } 192 | for _, d := range daemonSets { 193 | w := workload{ 194 | name: d.Name, 195 | namespace: d.Namespace, 196 | kind: "DaemonSet", 197 | images: func() []string { 198 | var images []string 199 | for _, c := range d.Spec.Template.Spec.Containers { 200 | images = append(images, c.Image) 201 | } 202 | return images 203 | }(), 204 | } 205 | workloads = append(workloads, w) 206 | } 207 | 208 | cronJobs, err := i.kubernetesClient.ListCronJobs(ctx) 209 | if err != nil { 210 | return nil, err 211 | } 212 | for _, c := range cronJobs { 213 | w := workload{ 214 | name: c.Name, 215 | namespace: c.Namespace, 216 | kind: "Deployment", 217 | images: func() []string { 218 | var images []string 219 | for _, c := range c.Spec.JobTemplate.Spec.Template.Spec.Containers { 220 | images = append(images, c.Image) 221 | } 222 | return images 223 | }(), 224 | } 225 | workloads = append(workloads, w) 226 | } 227 | 228 | return workloads, err 229 | } 230 | 231 | func uniquedContainerImages(workloads []workload) []string { 232 | var images []string 233 | 234 | for _, w := range workloads { 235 | images = append(images, w.images...) 236 | } 237 | 238 | keys := make(map[string]bool) 239 | var uimages []string 240 | for _, image := range images { 241 | if _, value := keys[image]; !value { 242 | keys[image] = true 243 | uimages = append(uimages, image) 244 | } 245 | } 246 | 247 | return uimages 248 | } 249 | -------------------------------------------------------------------------------- /pkg/scanner/image/image_test.go: -------------------------------------------------------------------------------- 1 | package image_test 2 | 3 | import ( 4 | "context" 5 | "reflect" 6 | "testing" 7 | 8 | "github.com/aquasecurity/trivy/pkg/report" 9 | "github.com/hnts/vulnerability-exporter/pkg/scanner/image" 10 | "golang.org/x/xerrors" 11 | v1 "k8s.io/api/apps/v1" 12 | batchv1 "k8s.io/api/batch/v1" 13 | ) 14 | 15 | type fakeTrivyClient struct { 16 | FakeScanImage func(ctx context.Context, image string, cacheDir string) (*report.Report, error) 17 | FakeUpdateDatabase func(ctx context.Context, cacheDir string) error 18 | } 19 | 20 | var _ image.TrivyClient = &fakeTrivyClient{} 21 | 22 | func (f *fakeTrivyClient) UpdateDatabase(ctx context.Context, cacheDir string) error { 23 | return f.FakeUpdateDatabase(ctx, cacheDir) 24 | } 25 | 26 | func (f *fakeTrivyClient) ScanImage(ctx context.Context, image string, cacheDir string) (*report.Report, error) { 27 | return f.FakeScanImage(ctx, image, cacheDir) 28 | } 29 | 30 | type fakeKuberneteClient struct { 31 | FakeListDeployments func(ctx context.Context) ([]v1.Deployment, error) 32 | FakeListStatefulSets func(ctx context.Context) ([]v1.StatefulSet, error) 33 | FakeListDaemonSets func(ctx context.Context) ([]v1.DaemonSet, error) 34 | FakeListCronJobs func(ctx context.Context) ([]batchv1.CronJob, error) 35 | } 36 | 37 | var _ image.KubernetesClient = &fakeKuberneteClient{} 38 | 39 | func (f *fakeKuberneteClient) ListDeployments(ctx context.Context) ([]v1.Deployment, error) { 40 | return f.FakeListDeployments(ctx) 41 | } 42 | 43 | func (f *fakeKuberneteClient) ListDaemonSets(ctx context.Context) ([]v1.DaemonSet, error) { 44 | return f.FakeListDaemonSets(ctx) 45 | } 46 | 47 | func (f *fakeKuberneteClient) ListStatefulSets(ctx context.Context) ([]v1.StatefulSet, error) { 48 | return f.FakeListStatefulSets(ctx) 49 | } 50 | 51 | func (f *fakeKuberneteClient) ListCronJobs(ctx context.Context) ([]batchv1.CronJob, error) { 52 | return f.FakeListCronJobs(ctx) 53 | } 54 | 55 | func TestImageScanner_ScanImages(t *testing.T) { 56 | var concurrency uint8 = 30 57 | type fields struct { 58 | kubernetesClient image.KubernetesClient 59 | trivyClient image.TrivyClient 60 | concurrency uint8 61 | } 62 | type args struct { 63 | ctx context.Context 64 | images []string 65 | cacheDir string 66 | } 67 | tests := []struct { 68 | name string 69 | fields fields 70 | args args 71 | want []report.Report 72 | wantErr bool 73 | }{ 74 | { 75 | "scan image successfully", 76 | fields{ 77 | &fakeKuberneteClient{ 78 | FakeListDeployments: func(ctx context.Context) ([]v1.Deployment, error) { 79 | return []v1.Deployment{}, nil 80 | }, 81 | FakeListDaemonSets: func(ctx context.Context) ([]v1.DaemonSet, error) { 82 | return []v1.DaemonSet{}, nil 83 | }, 84 | FakeListStatefulSets: func(ctx context.Context) ([]v1.StatefulSet, error) { 85 | return []v1.StatefulSet{}, nil 86 | }, 87 | FakeListCronJobs: func(ctx context.Context) ([]batchv1.CronJob, error) { 88 | return []batchv1.CronJob{}, nil 89 | }, 90 | }, 91 | &fakeTrivyClient{ 92 | FakeScanImage: func(ctx context.Context, image string, cacheDir string) (*report.Report, error) { 93 | return &report.Report{}, nil 94 | }, 95 | FakeUpdateDatabase: func(ctx context.Context, cacheDir string) error { 96 | return nil 97 | }, 98 | }, 99 | concurrency, 100 | }, 101 | args{ 102 | context.Background(), 103 | []string{"nginx"}, 104 | "cache-dir", 105 | }, 106 | []report.Report{{}}, 107 | false, 108 | }, 109 | { 110 | "failed to update database", 111 | fields{ 112 | &fakeKuberneteClient{ 113 | FakeListDeployments: func(ctx context.Context) ([]v1.Deployment, error) { 114 | return []v1.Deployment{}, nil 115 | }, 116 | FakeListDaemonSets: func(ctx context.Context) ([]v1.DaemonSet, error) { 117 | return []v1.DaemonSet{}, nil 118 | }, 119 | FakeListStatefulSets: func(ctx context.Context) ([]v1.StatefulSet, error) { 120 | return []v1.StatefulSet{}, nil 121 | }, 122 | FakeListCronJobs: func(ctx context.Context) ([]batchv1.CronJob, error) { 123 | return []batchv1.CronJob{}, nil 124 | }, 125 | }, 126 | &fakeTrivyClient{ 127 | FakeScanImage: func(ctx context.Context, image string, cacheDir string) (*report.Report, error) { 128 | return &report.Report{}, nil 129 | }, 130 | FakeUpdateDatabase: func(ctx context.Context, cacheDir string) error { 131 | return xerrors.New("failed to update database") 132 | }, 133 | }, 134 | concurrency, 135 | }, 136 | args{ 137 | context.Background(), 138 | []string{"nginx"}, 139 | "cache-dir", 140 | }, 141 | nil, 142 | true, 143 | }, 144 | { 145 | "failed to scan some images", 146 | fields{ 147 | &fakeKuberneteClient{ 148 | FakeListDeployments: func(ctx context.Context) ([]v1.Deployment, error) { 149 | return []v1.Deployment{}, nil 150 | }, 151 | FakeListDaemonSets: func(ctx context.Context) ([]v1.DaemonSet, error) { 152 | return []v1.DaemonSet{}, nil 153 | }, 154 | FakeListStatefulSets: func(ctx context.Context) ([]v1.StatefulSet, error) { 155 | return []v1.StatefulSet{}, nil 156 | }, 157 | FakeListCronJobs: func(ctx context.Context) ([]batchv1.CronJob, error) { 158 | return []batchv1.CronJob{}, nil 159 | }, 160 | }, 161 | &fakeTrivyClient{ 162 | FakeScanImage: func(ctx context.Context, image string, cacheDir string) (*report.Report, error) { 163 | return nil, xerrors.New("failed to scan images") 164 | }, 165 | FakeUpdateDatabase: func(ctx context.Context, cacheDir string) error { 166 | return nil 167 | }, 168 | }, 169 | concurrency, 170 | }, 171 | args{ 172 | context.Background(), 173 | []string{"nginx"}, 174 | "cache-dir", 175 | }, 176 | []report.Report{}, 177 | false, 178 | }, 179 | } 180 | for _, tt := range tests { 181 | t.Run(tt.name, func(t *testing.T) { 182 | i := image.NewImageScanner(tt.fields.kubernetesClient, tt.fields.trivyClient, tt.fields.concurrency) 183 | got, err := i.ScanImages(tt.args.ctx, tt.args.images, tt.args.cacheDir, int64(tt.fields.concurrency)) 184 | if (err != nil) != tt.wantErr { 185 | t.Errorf("ImageScanner.ScanImages() error = %v, wantErr %v", err, tt.wantErr) 186 | return 187 | } 188 | if !reflect.DeepEqual(got, tt.want) { 189 | t.Errorf("ImageScanner.ScanImages() = %v, want %v", got, tt.want) 190 | } 191 | }) 192 | } 193 | } 194 | -------------------------------------------------------------------------------- /pkg/scanner/image/interface.go: -------------------------------------------------------------------------------- 1 | package image 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/aquasecurity/trivy/pkg/report" 7 | v1 "k8s.io/api/apps/v1" 8 | batchv1 "k8s.io/api/batch/v1" 9 | ) 10 | 11 | type KubernetesClient interface { 12 | ListDeployments(ctx context.Context) ([]v1.Deployment, error) 13 | ListStatefulSets(ctx context.Context) ([]v1.StatefulSet, error) 14 | ListDaemonSets(ctx context.Context) ([]v1.DaemonSet, error) 15 | ListCronJobs(ctx context.Context) ([]batchv1.CronJob, error) 16 | } 17 | 18 | type TrivyClient interface { 19 | ScanImage(ctx context.Context, image string, cacheDir string) (*report.Report, error) 20 | UpdateDatabase(ctx context.Context, cacheDir string) error 21 | } 22 | -------------------------------------------------------------------------------- /pkg/scanner/node/interface.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "context" 5 | 6 | v1 "k8s.io/api/core/v1" 7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | ) 9 | 10 | type KubernetesClient interface { 11 | ListNodes(ctx context.Context) ([]v1.Node, error) 12 | ListPodsByLabel(ctx context.Context, label *metav1.LabelSelector) ([]v1.Pod, error) 13 | CreatePod(ctx context.Context, namespace string, pod *v1.Pod) error 14 | DeletePod(ctx context.Context, namespace string, podName string) error 15 | WaitUntilCompleted(namespace string, podName string) error 16 | GetContainerLog(ctx context.Context, namespace string, podName string, containerName string) (string, error) 17 | } 18 | -------------------------------------------------------------------------------- /pkg/scanner/node/node.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "sync" 7 | 8 | "github.com/aquasecurity/trivy/pkg/report" 9 | "github.com/prometheus/client_golang/prometheus" 10 | "golang.org/x/sync/semaphore" 11 | "golang.org/x/xerrors" 12 | v1 "k8s.io/api/core/v1" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | "k8s.io/klog/v2" 15 | ) 16 | 17 | var ( 18 | nodeVulnerbilities = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 19 | Namespace: "trivy", 20 | Name: "node_vulnerabilities", 21 | Help: "Vulnerbilities of cluster node detected by trivy", 22 | }, []string{ 23 | "nodeName", 24 | "vulnerabilityId", 25 | "pkgName", 26 | "installedVersion", 27 | "severity", 28 | "fixedVersion", 29 | }, 30 | ) 31 | ) 32 | 33 | type NodeScanner struct { 34 | kubernetesClient KubernetesClient 35 | namespace string 36 | concurrency uint8 37 | } 38 | 39 | func NewNodeScanner(kclient KubernetesClient, ns string, concurrency uint8) *NodeScanner { 40 | return &NodeScanner{ 41 | kubernetesClient: kclient, 42 | namespace: ns, 43 | concurrency: concurrency, 44 | } 45 | } 46 | 47 | func (n *NodeScanner) Name() string { 48 | return "node-vulnerability-scanner" 49 | } 50 | 51 | func (n *NodeScanner) Scan(ctx context.Context) (prometheus.Collector, error) { 52 | nodes, err := n.kubernetesClient.ListNodes(ctx) 53 | if err != nil { 54 | return nodeVulnerbilities, xerrors.Errorf("failed to list nodes for scanning: %w", err) 55 | } 56 | 57 | wg := sync.WaitGroup{} 58 | reports := map[string]report.Report{} 59 | sem := semaphore.NewWeighted(int64(n.concurrency)) 60 | for _, node := range nodes { 61 | wg.Add(1) 62 | if err := sem.Acquire(ctx, 1); err != nil { 63 | klog.Warningf("failed to acquire semaphore: %w", err) 64 | continue 65 | } 66 | 67 | go func(nodeName string) { 68 | defer func() { 69 | sem.Release(1) 70 | wg.Done() 71 | }() 72 | report, err := n.ScanNode(ctx, nodeName) 73 | if err != nil { 74 | klog.Warningf("failed to scan node(%s): %s", nodeName, err) 75 | return 76 | } 77 | 78 | reports[nodeName] = *report 79 | }(node.Name) 80 | } 81 | wg.Wait() 82 | 83 | nodeVulnerbilities.Reset() 84 | for nodeName, rp := range reports { 85 | for _, rs := range rp.Results { 86 | for _, v := range rs.Vulnerabilities { 87 | labels := []string{ 88 | nodeName, 89 | v.VulnerabilityID, 90 | v.PkgName, 91 | v.InstalledVersion, 92 | v.Severity, 93 | v.FixedVersion, 94 | } 95 | nodeVulnerbilities.WithLabelValues(labels...).Set(1) 96 | } 97 | } 98 | } 99 | 100 | return nodeVulnerbilities, nil 101 | } 102 | 103 | func (n *NodeScanner) CleanUp() error { 104 | labelSelector := &metav1.LabelSelector{ 105 | MatchLabels: map[string]string{ 106 | "app": "trivy-node-scanner", 107 | }, 108 | } 109 | 110 | pods, err := n.kubernetesClient.ListPodsByLabel(context.Background(), labelSelector) 111 | if err != nil { 112 | return xerrors.Errorf("failed to list the remaining pods scanning nodes: %s", err) 113 | } 114 | 115 | for _, p := range pods { 116 | err := n.kubernetesClient.DeletePod(context.Background(), p.Namespace, p.Name) 117 | if err != nil { 118 | return xerrors.Errorf("failed to delete pod(%s): %s", p.Name, err) 119 | } 120 | } 121 | 122 | return nil 123 | } 124 | 125 | func (n *NodeScanner) ScanNode(ctx context.Context, nodeName string) (*report.Report, error) { 126 | podName := "trivy-" + nodeName 127 | pod := buildPod(podName, nodeName) 128 | 129 | err := n.kubernetesClient.CreatePod(ctx, n.namespace, pod) 130 | if err != nil { 131 | return nil, xerrors.Errorf("failed to create pod(%s) for scanning node: %s", pod.Name, err) 132 | } 133 | 134 | defer func() { 135 | err = n.kubernetesClient.DeletePod(context.Background(), n.namespace, pod.Name) 136 | if err != nil { 137 | klog.Warningf("failed to delete pod(%s) after scanning %s: %s", pod.Name, nodeName, err) 138 | } 139 | }() 140 | 141 | err = n.kubernetesClient.WaitUntilCompleted(n.namespace, pod.Name) 142 | if err != nil { 143 | return nil, xerrors.Errorf("failed to complete pod(%s) for scanning node: %s", pod.Name, err) 144 | } 145 | 146 | containerName := "trivy" 147 | log, err := n.kubernetesClient.GetContainerLog(ctx, n.namespace, pod.Name, containerName) 148 | if err != nil { 149 | return nil, xerrors.Errorf("failed to get container log from %s: %w", pod.Name, err) 150 | } 151 | 152 | var rp report.Report 153 | err = json.Unmarshal([]byte(log), &rp) 154 | if err != nil { 155 | return nil, xerrors.Errorf("failed to unmarshal from container log string to report: %w", err) 156 | } 157 | 158 | return &rp, nil 159 | } 160 | 161 | func buildPod(podName string, nodeName string) *v1.Pod { 162 | return &v1.Pod{ 163 | TypeMeta: metav1.TypeMeta{ 164 | Kind: "Pod", 165 | APIVersion: "v1", 166 | }, 167 | ObjectMeta: metav1.ObjectMeta{ 168 | Name: podName, 169 | Labels: map[string]string{ 170 | "app": "trivy-node-scanner", 171 | }, 172 | }, 173 | Spec: v1.PodSpec{ 174 | Affinity: &v1.Affinity{ 175 | NodeAffinity: &v1.NodeAffinity{ 176 | RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ 177 | NodeSelectorTerms: []v1.NodeSelectorTerm{ 178 | { 179 | MatchExpressions: []v1.NodeSelectorRequirement{ 180 | { 181 | Key: "kubernetes.io/hostname", 182 | Operator: v1.NodeSelectorOpIn, 183 | Values: []string{nodeName}, 184 | }, 185 | }, 186 | }, 187 | }, 188 | }, 189 | }, 190 | }, 191 | Tolerations: []v1.Toleration{ 192 | { 193 | Key: "node-role.kubernetes.io/master", 194 | Effect: v1.TaintEffectNoSchedule, 195 | Operator: v1.TolerationOpExists, 196 | }, 197 | { 198 | Key: "node-role.kubernetes.io/control-plane", 199 | Effect: v1.TaintEffectNoSchedule, 200 | Operator: v1.TolerationOpExists, 201 | }, 202 | }, 203 | Containers: []v1.Container{ 204 | { 205 | Name: "trivy", 206 | Image: "ghcr.io/aquasecurity/trivy:0.22.0", 207 | Args: []string{ 208 | "--quiet", 209 | "rootfs", 210 | "--format", 211 | "json", 212 | "--no-progress", 213 | "--skip-dirs", 214 | "/host/proc", 215 | "/host", 216 | }, 217 | VolumeMounts: []v1.VolumeMount{ 218 | { 219 | Name: "host", 220 | MountPath: "/host", 221 | }, 222 | }, 223 | }, 224 | }, 225 | RestartPolicy: v1.RestartPolicyNever, 226 | Volumes: []v1.Volume{ 227 | { 228 | Name: "host", 229 | VolumeSource: v1.VolumeSource{ 230 | HostPath: &v1.HostPathVolumeSource{ 231 | Path: "/", 232 | }, 233 | }, 234 | }, 235 | }, 236 | }, 237 | } 238 | } 239 | -------------------------------------------------------------------------------- /pkg/scanner/node/node_test.go: -------------------------------------------------------------------------------- 1 | package node_test 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "reflect" 7 | "testing" 8 | 9 | "github.com/aquasecurity/trivy/pkg/report" 10 | "github.com/hnts/vulnerability-exporter/pkg/scanner/node" 11 | "golang.org/x/xerrors" 12 | v1 "k8s.io/api/core/v1" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | ) 15 | 16 | var resultJson string = `{ 17 | "SchemaVersion": 2, 18 | "ArtifactName": "test", 19 | "ArtifactType": "filesystem", 20 | "Metadata": { 21 | "OS": { 22 | "Family": "ubuntu", 23 | "Name": "20.04" 24 | }, 25 | "ImageConfig": { 26 | "architecture": "", 27 | "created": "0001-01-01T00:00:00Z", 28 | "os": "", 29 | "rootfs": { 30 | "type": "", 31 | "diff_ids": null 32 | }, 33 | "config": {} 34 | } 35 | }, 36 | "Results": [ 37 | { 38 | "Target": "test (ubuntu 20.04)", 39 | "Class": "os-pkgs", 40 | "Type": "ubuntu", 41 | "Vulnerabilities": [ 42 | { 43 | "VulnerabilityID": "CVE-2012-6655", 44 | "PkgName": "accountsservice", 45 | "InstalledVersion": "0.6.55-0ubuntu12~20.04.5", 46 | "Layer": {}, 47 | "SeveritySource": "ubuntu", 48 | "PrimaryURL": "https://avd.aquasec.com/nvd/cve-2012-6655", 49 | "Title": "accountsservice: local encrypted password disclosure when changing password", 50 | "Description": "An issue exists AccountService 0.6.37 in the user_change_password_authorized_cb() function in user.c which could let a local users obtain encrypted passwords.", 51 | "Severity": "LOW", 52 | "CweIDs": [ 53 | "CWE-732" 54 | ], 55 | "CVSS": { 56 | "nvd": { 57 | "V2Vector": "AV:L/AC:L/Au:N/C:P/I:N/A:N", 58 | "V3Vector": "CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:L/I:N/A:N", 59 | "V2Score": 2.1, 60 | "V3Score": 3.3 61 | }, 62 | "redhat": { 63 | "V2Vector": "AV:L/AC:M/Au:N/C:P/I:N/A:N", 64 | "V2Score": 1.9 65 | } 66 | }, 67 | "References": [ 68 | "http://openwall.com/lists/oss-security/2014/08/15/5", 69 | "http://www.openwall.com/lists/oss-security/2014/08/16/7", 70 | "http://www.securityfocus.com/bid/69245", 71 | "https://bugzilla.redhat.com/show_bug.cgi?id=CVE-2012-6655", 72 | "https://bugzilla.suse.com/show_bug.cgi?id=CVE-2012-6655", 73 | "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2012-6655", 74 | "https://exchange.xforce.ibmcloud.com/vulnerabilities/95325", 75 | "https://security-tracker.debian.org/tracker/CVE-2012-6655" 76 | ], 77 | "PublishedDate": "2019-11-27T18:15:00Z", 78 | "LastModifiedDate": "2019-12-16T19:47:00Z" 79 | } 80 | ] 81 | } 82 | ] 83 | }` 84 | 85 | type fakeKuberneteClient struct { 86 | FakeListNodes func(ctx context.Context) ([]v1.Node, error) 87 | FakeListPodsByLabel func(ctx context.Context, label *metav1.LabelSelector) ([]v1.Pod, error) 88 | FakeCreatePod func(ctx context.Context, namespace string, pod *v1.Pod) error 89 | FakeDeletePod func(ctx context.Context, namespace string, podName string) error 90 | FakeWaitUntilCompleted func(namespace string, podName string) error 91 | FakeGetContainerLog func(ctx context.Context, namespace string, podName string, containerName string) (string, error) 92 | } 93 | 94 | var _ node.KubernetesClient = &fakeKuberneteClient{} 95 | 96 | func (f *fakeKuberneteClient) ListNodes(ctx context.Context) ([]v1.Node, error) { 97 | return f.FakeListNodes(ctx) 98 | } 99 | 100 | func (f *fakeKuberneteClient) ListPodsByLabel(ctx context.Context, label *metav1.LabelSelector) ([]v1.Pod, error) { 101 | return f.FakeListPodsByLabel(ctx, label) 102 | } 103 | 104 | func (f *fakeKuberneteClient) CreatePod(ctx context.Context, namespace string, pod *v1.Pod) error { 105 | return f.FakeCreatePod(ctx, namespace, pod) 106 | } 107 | 108 | func (f *fakeKuberneteClient) DeletePod(ctx context.Context, namespace string, podName string) error { 109 | return f.FakeDeletePod(ctx, namespace, podName) 110 | } 111 | 112 | func (f *fakeKuberneteClient) WaitUntilCompleted(namespace string, podName string) error { 113 | return f.FakeWaitUntilCompleted(namespace, podName) 114 | } 115 | 116 | func (f *fakeKuberneteClient) GetContainerLog(ctx context.Context, namespace string, podName string, containerName string) (string, error) { 117 | return f.FakeGetContainerLog(ctx, namespace, podName, containerName) 118 | } 119 | 120 | func getReportStruct(resultJson string) *report.Report { 121 | var report report.Report 122 | _ = json.Unmarshal([]byte(resultJson), &report) 123 | 124 | return &report 125 | } 126 | 127 | func Test_ScanNode(t *testing.T) { 128 | var concurrency uint8 = 5 129 | type fields struct { 130 | kubernetesClient node.KubernetesClient 131 | namespace string 132 | concurrency uint8 133 | } 134 | type args struct { 135 | ctx context.Context 136 | nodeName string 137 | } 138 | tests := []struct { 139 | name string 140 | fields fields 141 | args args 142 | want *report.Report 143 | wantErr bool 144 | }{ 145 | { 146 | "get invalid result from container logs", 147 | fields{ 148 | &fakeKuberneteClient{ 149 | FakeListNodes: func(ctx context.Context) ([]v1.Node, error) { 150 | return []v1.Node{}, nil 151 | }, 152 | FakeListPodsByLabel: func(ctx context.Context, label *metav1.LabelSelector) ([]v1.Pod, error) { 153 | return []v1.Pod{}, nil 154 | }, 155 | FakeDeletePod: func(ctx context.Context, namespace string, podName string) error { 156 | return nil 157 | }, 158 | FakeCreatePod: func(ctx context.Context, namespace string, pod *v1.Pod) error { 159 | return nil 160 | }, 161 | FakeWaitUntilCompleted: func(namespace string, podName string) error { 162 | return nil 163 | }, 164 | FakeGetContainerLog: func(ctx context.Context, namespace string, podName string, containerName string) (string, error) { 165 | return "test test", nil 166 | }, 167 | }, 168 | "test-ns", 169 | concurrency, 170 | }, 171 | args{ 172 | context.Background(), 173 | "node", 174 | }, 175 | nil, 176 | true, 177 | }, 178 | { 179 | "failed to create pod", 180 | fields{ 181 | &fakeKuberneteClient{ 182 | FakeListNodes: func(ctx context.Context) ([]v1.Node, error) { 183 | return []v1.Node{}, nil 184 | }, 185 | FakeListPodsByLabel: func(ctx context.Context, label *metav1.LabelSelector) ([]v1.Pod, error) { 186 | return []v1.Pod{}, nil 187 | }, 188 | FakeDeletePod: func(ctx context.Context, namespace string, podName string) error { 189 | return nil 190 | }, 191 | FakeCreatePod: func(ctx context.Context, namespace string, pod *v1.Pod) error { 192 | return xerrors.New("failed to create pod") 193 | }, 194 | FakeWaitUntilCompleted: func(namespace string, podName string) error { 195 | return nil 196 | }, 197 | FakeGetContainerLog: func(ctx context.Context, namespace string, podName string, containerName string) (string, error) { 198 | return resultJson, nil 199 | }, 200 | }, 201 | "test-ns", 202 | concurrency, 203 | }, 204 | args{ 205 | context.Background(), 206 | "node", 207 | }, 208 | nil, 209 | true, 210 | }, 211 | { 212 | "failed to complete pod", 213 | fields{ 214 | &fakeKuberneteClient{ 215 | FakeListNodes: func(ctx context.Context) ([]v1.Node, error) { 216 | return []v1.Node{}, nil 217 | }, 218 | FakeListPodsByLabel: func(ctx context.Context, label *metav1.LabelSelector) ([]v1.Pod, error) { 219 | return []v1.Pod{}, nil 220 | }, 221 | FakeDeletePod: func(ctx context.Context, namespace string, podName string) error { 222 | return nil 223 | }, 224 | FakeCreatePod: func(ctx context.Context, namespace string, pod *v1.Pod) error { 225 | return nil 226 | }, 227 | FakeWaitUntilCompleted: func(namespace string, podName string) error { 228 | return xerrors.New("failed to complete pod") 229 | }, 230 | FakeGetContainerLog: func(ctx context.Context, namespace string, podName string, containerName string) (string, error) { 231 | return resultJson, nil 232 | }, 233 | }, 234 | "test-ns", 235 | concurrency, 236 | }, 237 | args{ 238 | context.Background(), 239 | "node", 240 | }, 241 | nil, 242 | true, 243 | }, 244 | { 245 | "failed to get container log from pod", 246 | fields{ 247 | &fakeKuberneteClient{ 248 | FakeListNodes: func(ctx context.Context) ([]v1.Node, error) { 249 | return []v1.Node{}, nil 250 | }, 251 | FakeListPodsByLabel: func(ctx context.Context, label *metav1.LabelSelector) ([]v1.Pod, error) { 252 | return []v1.Pod{}, nil 253 | }, 254 | FakeDeletePod: func(ctx context.Context, namespace string, podName string) error { 255 | return nil 256 | }, 257 | FakeCreatePod: func(ctx context.Context, namespace string, pod *v1.Pod) error { 258 | return nil 259 | }, 260 | FakeWaitUntilCompleted: func(namespace string, podName string) error { 261 | return nil 262 | }, 263 | FakeGetContainerLog: func(ctx context.Context, namespace string, podName string, containerName string) (string, error) { 264 | return "", xerrors.New("failed to get container log from pod") 265 | }, 266 | }, 267 | "test-ns", 268 | concurrency, 269 | }, 270 | args{ 271 | context.Background(), 272 | "node", 273 | }, 274 | nil, 275 | true, 276 | }, 277 | { 278 | "scan node successfully", 279 | fields{ 280 | &fakeKuberneteClient{ 281 | FakeListNodes: func(ctx context.Context) ([]v1.Node, error) { 282 | return []v1.Node{}, nil 283 | }, 284 | FakeListPodsByLabel: func(ctx context.Context, label *metav1.LabelSelector) ([]v1.Pod, error) { 285 | return []v1.Pod{}, nil 286 | }, 287 | FakeDeletePod: func(ctx context.Context, namespace string, podName string) error { 288 | return nil 289 | }, 290 | FakeCreatePod: func(ctx context.Context, namespace string, pod *v1.Pod) error { 291 | return nil 292 | }, 293 | FakeWaitUntilCompleted: func(namespace string, podName string) error { 294 | return nil 295 | }, 296 | FakeGetContainerLog: func(ctx context.Context, namespace string, podName string, containerName string) (string, error) { 297 | return resultJson, nil 298 | }, 299 | }, 300 | "test-ns", 301 | concurrency, 302 | }, 303 | args{ 304 | context.Background(), 305 | "node", 306 | }, 307 | getReportStruct(resultJson), 308 | false, 309 | }, 310 | } 311 | for _, tt := range tests { 312 | t.Run(tt.name, func(t *testing.T) { 313 | n := node.NewNodeScanner(tt.fields.kubernetesClient, tt.fields.namespace, tt.fields.concurrency) 314 | got, err := n.ScanNode(tt.args.ctx, tt.args.nodeName) 315 | if (err != nil) != tt.wantErr { 316 | t.Errorf("NodeScanner.ScanNode() error = %v, wantErr %v", err, tt.wantErr) 317 | return 318 | } 319 | if !reflect.DeepEqual(got, tt.want) { 320 | t.Errorf("NodeScanner.ScanNode() = %v, want %v", got, tt.want) 321 | } 322 | }) 323 | } 324 | } 325 | -------------------------------------------------------------------------------- /pkg/scanner/scanner.go: -------------------------------------------------------------------------------- 1 | package scanner 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/prometheus/client_golang/prometheus" 7 | ) 8 | 9 | type Scanner interface { 10 | Name() string 11 | Scan(ctx context.Context) (prometheus.Collector, error) 12 | CleanUp() error 13 | } 14 | -------------------------------------------------------------------------------- /pkg/trivy/trivy.go: -------------------------------------------------------------------------------- 1 | package trivy 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "os/exec" 7 | 8 | "github.com/aquasecurity/trivy/pkg/report" 9 | "golang.org/x/xerrors" 10 | ) 11 | 12 | type Client struct{} 13 | 14 | func (c Client) ScanImage(ctx context.Context, image string, cacheDir string) (*report.Report, error) { 15 | result, err := exec.CommandContext(ctx, "trivy", "--cache-dir", cacheDir, "--quiet", "image", "--skip-update", "--no-progress", "-f", "json", image).CombinedOutput() 16 | if err != nil { 17 | return nil, xerrors.Errorf("failed to execute trivy image: %s: %s", err, result) 18 | } 19 | 20 | var report report.Report 21 | err = json.Unmarshal([]byte(result), &report) 22 | if err != nil { 23 | return nil, err 24 | } 25 | 26 | return &report, nil 27 | } 28 | 29 | func (c Client) UpdateDatabase(ctx context.Context, cacheDir string) error { 30 | result, err := exec.CommandContext(ctx, "trivy", "--cache-dir", cacheDir, "image", "--download-db-only").CombinedOutput() 31 | if err != nil { 32 | return xerrors.Errorf("failed to execute trivy image --download-db-only: %s: %s", err, result) 33 | } 34 | 35 | return nil 36 | } 37 | --------------------------------------------------------------------------------