├── pkg ├── sinks │ ├── redis.go │ ├── splunk.go │ ├── cloudwatch.go │ ├── logstash.go │ ├── sms.go │ ├── email.go │ ├── inmemory.go │ ├── syslog.go │ ├── teams_test.go │ ├── sns.go │ ├── stdout.go │ ├── pipe.go │ ├── sqs.go │ ├── pubsub.go │ ├── kinesis.go │ ├── file.go │ ├── tmpl_test.go │ ├── firehose.go │ ├── teams.go │ ├── webhook.go │ ├── sink.go │ ├── avro.go │ ├── eventbridge.go │ ├── tmpl.go │ ├── opsgenie.go │ ├── slack.go │ ├── loki.go │ ├── receiver.go │ ├── opensearch.go │ ├── elasticsearch.go │ ├── opscenter.go │ ├── opscenter_test.go │ ├── bigquery.go │ └── kafka.go ├── exporter │ ├── event.go │ ├── watcher.go │ ├── router.go │ ├── receivers.go │ ├── sync_registry.go │ ├── route.go │ ├── engine.go │ ├── engine_test.go │ ├── channel_registry.go │ ├── rule.go │ ├── config.go │ ├── config_test.go │ ├── route_test.go │ └── rule_test.go ├── version │ └── version.go ├── setup │ ├── setup.go │ └── setup_test.go ├── kube │ ├── client.go │ ├── event.go │ ├── event_test.go │ ├── objects.go │ ├── leaderelection.go │ ├── watcher.go │ └── watcher_test.go ├── batch │ ├── writer.go │ └── writer_test.go └── metrics │ └── metrics.go ├── .gitignore ├── event-exporter.dockerignore ├── kustomization.yaml ├── config.yaml ├── .github ├── dependabot.yml └── workflows │ ├── test.yml │ └── release.yml ├── deploy ├── 01-config.yaml ├── 00-roles.yaml └── 02-deployment.yaml ├── Dockerfile ├── Makefile ├── config.example.yaml ├── main.go ├── go.mod ├── LICENSE └── README.md /pkg/sinks/redis.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | -------------------------------------------------------------------------------- /pkg/sinks/splunk.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | -------------------------------------------------------------------------------- /pkg/sinks/cloudwatch.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | -------------------------------------------------------------------------------- /pkg/sinks/logstash.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | -------------------------------------------------------------------------------- /pkg/exporter/event.go: -------------------------------------------------------------------------------- 1 | package exporter 2 | 3 | -------------------------------------------------------------------------------- /pkg/exporter/watcher.go: -------------------------------------------------------------------------------- 1 | package exporter 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | .DS_Store 3 | /kubernetes-event-exporter 4 | -------------------------------------------------------------------------------- /pkg/sinks/sms.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | // Just kidding, please don't implement this -------------------------------------------------------------------------------- /pkg/sinks/email.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | // Just kidding, please don't implement this -------------------------------------------------------------------------------- /event-exporter.dockerignore: -------------------------------------------------------------------------------- 1 | # Ignore everything 2 | * 3 | 4 | # Include pkg directory 5 | !pkg/ 6 | -------------------------------------------------------------------------------- /kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - deploy/00-roles.yaml 5 | - deploy/01-config.yaml 6 | - deploy/02-deployment.yaml 7 | -------------------------------------------------------------------------------- /config.yaml: -------------------------------------------------------------------------------- 1 | logLevel: error 2 | logFormat: json 3 | clusterName: my-super-local-cluster 4 | route: 5 | routes: 6 | - match: 7 | - receiver: "dump" 8 | receivers: 9 | - name: "dump" 10 | stdout: {} -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: gomod 4 | directory: / 5 | schedule: 6 | interval: weekly 7 | open-pull-requests-limit: 99 8 | - package-ecosystem: github-actions 9 | directory: / 10 | schedule: 11 | interval: weekly 12 | open-pull-requests-limit: 99 13 | -------------------------------------------------------------------------------- /pkg/exporter/router.go: -------------------------------------------------------------------------------- 1 | package exporter 2 | 3 | import "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 4 | 5 | type Router struct { 6 | cfg *Config 7 | rcvr ReceiverRegistry 8 | } 9 | 10 | func (r *Router) ProcessEvent(event *kube.EnhancedEvent) { 11 | r.cfg.Route.ProcessEvent(event, r.rcvr) 12 | } 13 | -------------------------------------------------------------------------------- /pkg/exporter/receivers.go: -------------------------------------------------------------------------------- 1 | package exporter 2 | 3 | import ( 4 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 5 | "github.com/resmoio/kubernetes-event-exporter/pkg/sinks" 6 | ) 7 | 8 | // ReceiverRegistry registers a receiver with the appropriate sink 9 | type ReceiverRegistry interface { 10 | SendEvent(string, *kube.EnhancedEvent) 11 | Register(string, sinks.Sink) 12 | Close() 13 | } 14 | -------------------------------------------------------------------------------- /deploy/01-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: event-exporter-cfg 5 | namespace: monitoring 6 | data: 7 | config.yaml: | 8 | logLevel: warn 9 | logFormat: json 10 | metricsNamePrefix: event_exporter_ 11 | route: 12 | routes: 13 | - match: 14 | - receiver: "dump" 15 | receivers: 16 | - name: "dump" 17 | stdout: {} -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | on: [push, pull_request] 3 | jobs: 4 | build: 5 | name: Build 6 | runs-on: ubuntu-latest 7 | steps: 8 | 9 | - name: Set up Go 1.20 10 | uses: actions/setup-go@v3 11 | with: 12 | go-version: "1.20" 13 | id: go 14 | 15 | - name: Check out code into the Go module directory 16 | uses: actions/checkout@v3 17 | 18 | - name: Build 19 | run: go build -v . 20 | 21 | - name: Test 22 | run: go test ./... 23 | -------------------------------------------------------------------------------- /pkg/sinks/inmemory.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | import ( 4 | "context" 5 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 6 | ) 7 | 8 | type InMemoryConfig struct { 9 | Ref *InMemory 10 | } 11 | 12 | type InMemory struct { 13 | Events []*kube.EnhancedEvent 14 | Config *InMemoryConfig 15 | } 16 | 17 | func (i *InMemory) Send(ctx context.Context, ev *kube.EnhancedEvent) error { 18 | i.Events = append(i.Events, ev) 19 | return nil 20 | } 21 | 22 | func (i *InMemory) Close() { 23 | // No-op 24 | } 25 | 26 | 27 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.20 AS builder 2 | 3 | ARG VERSION 4 | ENV PKG github.com/resmoio/kubernetes-event-exporter/pkg 5 | 6 | ADD . /app 7 | WORKDIR /app 8 | RUN CGO_ENABLED=0 GOOS=linux GO11MODULE=on go build -ldflags="-s -w -X ${PKG}/version.Version=${VERSION}" -a -o /main . 9 | 10 | FROM gcr.io/distroless/static:nonroot 11 | COPY --from=builder --chown=nonroot:nonroot /main /kubernetes-event-exporter 12 | 13 | # https://github.com/GoogleContainerTools/distroless/blob/main/base/base.bzl#L8C1-L9C1 14 | USER 65532 15 | 16 | ENTRYPOINT ["/kubernetes-event-exporter"] 17 | -------------------------------------------------------------------------------- /pkg/version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | import ( 4 | "runtime" 5 | "runtime/debug" 6 | ) 7 | 8 | // Build information. Populated at build-time. 9 | var ( 10 | Version = "unknown" 11 | GoVersion = runtime.Version() 12 | GoOS = runtime.GOOS 13 | GoArch = runtime.GOARCH 14 | ) 15 | 16 | func Revision() string { 17 | bi, ok := debug.ReadBuildInfo() 18 | 19 | if ok { 20 | for _, kv := range bi.Settings { 21 | switch kv.Key { 22 | case "vcs.revision": 23 | return kv.Value 24 | } 25 | } 26 | } 27 | 28 | return "unknown" 29 | } 30 | -------------------------------------------------------------------------------- /deploy/00-roles.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | namespace: monitoring 5 | name: event-exporter 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1 8 | kind: ClusterRoleBinding 9 | metadata: 10 | name: event-exporter 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: ClusterRole 14 | name: event-exporter 15 | subjects: 16 | - kind: ServiceAccount 17 | namespace: monitoring 18 | name: event-exporter 19 | --- 20 | apiVersion: rbac.authorization.k8s.io/v1 21 | kind: ClusterRole 22 | metadata: 23 | name: event-exporter 24 | rules: 25 | - apiGroups: ["*"] 26 | resources: ["*"] 27 | verbs: ["get", "watch", "list"] 28 | - apiGroups: ["coordination.k8s.io"] 29 | resources: ["leases"] 30 | verbs: ["*"] 31 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: build 2 | build: tidy ## Build the CLI 3 | go build 4 | 5 | build-image: ## Build the Docker image 6 | docker build -t kubernetes-event-exporter . 7 | 8 | .PHONY: fmt 9 | fmt: ## Run go fmt against code 10 | gofmt -s -l -w . 11 | 12 | .PHONY: vet 13 | vet: ## Run go vet against code 14 | go vet ./... 15 | 16 | tidy: ## Run go mod tidy 17 | go mod tidy 18 | 19 | test: tidy ## Run tests 20 | go test -cover -mod=mod -v ./... 21 | 22 | clean: ## Delete go.sum and clean mod cache 23 | go clean -modcache 24 | rm go.sum 25 | 26 | .PHONY: help 27 | help: ## Display this help. 28 | @cat $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' 29 | -------------------------------------------------------------------------------- /pkg/setup/setup.go: -------------------------------------------------------------------------------- 1 | package setup 2 | 3 | import ( 4 | "errors" 5 | "strings" 6 | 7 | "github.com/goccy/go-yaml" 8 | "github.com/resmoio/kubernetes-event-exporter/pkg/exporter" 9 | ) 10 | 11 | func ParseConfigFromBytes(configBytes []byte) (exporter.Config, error) { 12 | var config exporter.Config 13 | err := yaml.Unmarshal(configBytes, &config) 14 | if err != nil { 15 | errMsg := err.Error() 16 | errLines := strings.Split(errMsg, "\n") 17 | if len(errLines) > 0 { 18 | errMsg = errLines[0] 19 | } 20 | for _, line := range errLines { 21 | if strings.Contains(line, "> ") { 22 | errMsg += ": [ line " + line + "]" 23 | if strings.Contains(line, "{{") { 24 | errMsg += ": " + "Need to wrap values with special characters in quotes" 25 | } 26 | } 27 | } 28 | errMsg = "Cannot parse config to YAML: " + errMsg 29 | return exporter.Config{}, errors.New(errMsg) 30 | } 31 | 32 | return config, nil 33 | } 34 | -------------------------------------------------------------------------------- /pkg/sinks/syslog.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 7 | "log/syslog" 8 | ) 9 | 10 | type SyslogConfig struct { 11 | Network string `yaml:"network"` 12 | Address string `yaml:"address"` 13 | Tag string `yaml:"tag"` 14 | } 15 | 16 | type SyslogSink struct { 17 | sw *syslog.Writer 18 | } 19 | 20 | func NewSyslogSink(config *SyslogConfig) (Sink, error) { 21 | w, err := syslog.Dial(config.Network, config.Address, syslog.LOG_LOCAL0, config.Tag) 22 | if err != nil { 23 | return nil, err 24 | } 25 | return &SyslogSink{sw: w}, nil 26 | } 27 | 28 | func (w *SyslogSink) Close() { 29 | w.sw.Close() 30 | } 31 | 32 | func (w *SyslogSink) Send(ctx context.Context, ev *kube.EnhancedEvent) error { 33 | 34 | if b, err := json.Marshal(ev); err == nil { 35 | _, writeErr := w.sw.Write(b) 36 | 37 | if writeErr != nil { 38 | return writeErr 39 | } 40 | } else { 41 | return err 42 | } 43 | return nil 44 | } 45 | -------------------------------------------------------------------------------- /pkg/kube/client.go: -------------------------------------------------------------------------------- 1 | package kube 2 | 3 | import ( 4 | "k8s.io/client-go/kubernetes" 5 | "k8s.io/client-go/rest" 6 | "k8s.io/client-go/tools/clientcmd" 7 | "os" 8 | ) 9 | 10 | // GetKubernetesClient returns the client if it's possible in cluster, otherwise tries to read HOME 11 | func GetKubernetesClient() (*kubernetes.Clientset, error) { 12 | config, err := GetKubernetesConfig("") 13 | if err != nil { 14 | return nil, err 15 | } 16 | 17 | return kubernetes.NewForConfig(config) 18 | } 19 | 20 | func GetKubernetesConfig(kubeconfig string) (*rest.Config, error) { 21 | if len(kubeconfig) > 0 { 22 | return clientcmd.BuildConfigFromFlags("", kubeconfig) 23 | } 24 | 25 | // If kubeconfig is not set, try to use in cluster config. 26 | config, err := rest.InClusterConfig() 27 | if err == nil { 28 | return config, nil 29 | } else if err != rest.ErrNotInCluster { 30 | return nil, err 31 | } 32 | 33 | // Read KUBECONFIG env variable as fallback 34 | return clientcmd.BuildConfigFromFlags("", os.Getenv("KUBECONFIG")) 35 | } 36 | -------------------------------------------------------------------------------- /pkg/exporter/sync_registry.go: -------------------------------------------------------------------------------- 1 | package exporter 2 | 3 | import ( 4 | "context" 5 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 6 | "github.com/resmoio/kubernetes-event-exporter/pkg/sinks" 7 | "github.com/rs/zerolog/log" 8 | ) 9 | 10 | // SyncRegistry is for development purposes and performs poorly and blocks when an event is received so it is 11 | // not suited for high volume & production workloads 12 | type SyncRegistry struct { 13 | reg map[string]sinks.Sink 14 | } 15 | 16 | func (s *SyncRegistry) SendEvent(name string, event *kube.EnhancedEvent) { 17 | err := s.reg[name].Send(context.Background(), event) 18 | if err != nil { 19 | log.Debug().Err(err).Str("sink", name).Str("event", string(event.UID)).Msg("Cannot send event") 20 | } 21 | } 22 | 23 | func (s *SyncRegistry) Register(name string, sink sinks.Sink) { 24 | if s.reg == nil { 25 | s.reg = make(map[string]sinks.Sink) 26 | } 27 | 28 | s.reg[name] = sink 29 | } 30 | 31 | func (s *SyncRegistry) Close() { 32 | for name, sink := range s.reg { 33 | log.Info().Str("sink", name).Msg("Closing sink") 34 | sink.Close() 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /pkg/sinks/teams_test.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | import ( 4 | "context" 5 | "net/http" 6 | "net/http/httptest" 7 | "testing" 8 | 9 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestTeams_Send(t *testing.T) { 14 | ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 15 | w.WriteHeader(http.StatusOK) 16 | })) 17 | defer ts.Close() 18 | client := Teams{cfg: &TeamsConfig{Endpoint: ts.URL}} 19 | 20 | err := client.Send(context.Background(), &kube.EnhancedEvent{}) 21 | 22 | assert.NoError(t, err) 23 | } 24 | 25 | func TestTeams_Send_WhenTeamsReturnsRateLimited(t *testing.T) { 26 | ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 27 | w.WriteHeader(http.StatusOK) 28 | _, _ = w.Write([]byte("Webhook message delivery failed with error: Microsoft Teams endpoint returned HTTP error 429 with ContextId tcid=0")) 29 | })) 30 | defer ts.Close() 31 | client := Teams{cfg: &TeamsConfig{Endpoint: ts.URL}} 32 | 33 | err := client.Send(context.Background(), &kube.EnhancedEvent{}) 34 | 35 | assert.ErrorContains(t, err, "rate limited") 36 | } 37 | -------------------------------------------------------------------------------- /pkg/exporter/route.go: -------------------------------------------------------------------------------- 1 | package exporter 2 | 3 | import "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 4 | 5 | // Route allows using rules to drop events or match events to specific receivers. 6 | // It also allows using routes recursively for complex route building to fit 7 | // most of the needs 8 | type Route struct { 9 | Drop []Rule 10 | Match []Rule 11 | Routes []Route 12 | } 13 | 14 | func (r *Route) ProcessEvent(ev *kube.EnhancedEvent, registry ReceiverRegistry) { 15 | // First determine whether we will drop the event: If any of the drop is matched, we break the loop 16 | for _, v := range r.Drop { 17 | if v.MatchesEvent(ev) { 18 | return 19 | } 20 | } 21 | 22 | // It has match rules, it should go to the matchers 23 | matchesAll := true 24 | for _, rule := range r.Match { 25 | if rule.MatchesEvent(ev) { 26 | if rule.Receiver != "" { 27 | registry.SendEvent(rule.Receiver, ev) 28 | // Send the event down the hole 29 | } 30 | } else { 31 | matchesAll = false 32 | } 33 | } 34 | 35 | // If all matches are satisfied, we can send them down to the rabbit hole 36 | if matchesAll { 37 | for _, subRoute := range r.Routes { 38 | subRoute.ProcessEvent(ev, registry) 39 | } 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /pkg/sinks/sns.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | import ( 4 | "context" 5 | "github.com/aws/aws-sdk-go/aws" 6 | "github.com/aws/aws-sdk-go/aws/session" 7 | "github.com/aws/aws-sdk-go/service/sns" 8 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 9 | ) 10 | 11 | type SNSConfig struct { 12 | TopicARN string `yaml:"topicARN"` 13 | Region string `yaml:"region"` 14 | Layout map[string]interface{} `yaml:"layout"` 15 | } 16 | 17 | type SNSSink struct { 18 | cfg *SNSConfig 19 | svc *sns.SNS 20 | } 21 | 22 | func NewSNSSink(cfg *SNSConfig) (Sink, error) { 23 | sess, err := session.NewSession(&aws.Config{ 24 | Region: aws.String(cfg.Region)}, 25 | ) 26 | if err != nil { 27 | return nil, err 28 | } 29 | 30 | svc := sns.New(sess) 31 | return &SNSSink{ 32 | cfg: cfg, 33 | svc: svc, 34 | }, nil 35 | } 36 | 37 | func (s *SNSSink) Send(ctx context.Context, ev *kube.EnhancedEvent) error { 38 | toSend, e := serializeEventWithLayout(s.cfg.Layout, ev) 39 | if e != nil { 40 | return e 41 | } 42 | 43 | _, err := s.svc.PublishWithContext(ctx, &sns.PublishInput{ 44 | Message: aws.String(string(toSend)), 45 | TopicArn: aws.String(s.cfg.TopicARN), 46 | }) 47 | 48 | return err 49 | } 50 | 51 | func (s *SNSSink) Close() { 52 | } 53 | -------------------------------------------------------------------------------- /pkg/exporter/engine.go: -------------------------------------------------------------------------------- 1 | package exporter 2 | 3 | import ( 4 | "reflect" 5 | 6 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 7 | "github.com/rs/zerolog/log" 8 | ) 9 | 10 | // Engine is responsible for initializing the receivers from sinks 11 | type Engine struct { 12 | Route Route 13 | Registry ReceiverRegistry 14 | } 15 | 16 | func NewEngine(config *Config, registry ReceiverRegistry) *Engine { 17 | for _, v := range config.Receivers { 18 | sink, err := v.GetSink() 19 | if err != nil { 20 | log.Fatal().Err(err).Str("name", v.Name).Msg("Cannot initialize sink") 21 | } 22 | 23 | log.Info(). 24 | Str("name", v.Name). 25 | Str("type", reflect.TypeOf(sink).String()). 26 | Msg("Registering sink") 27 | 28 | registry.Register(v.Name, sink) 29 | } 30 | 31 | return &Engine{ 32 | Route: config.Route, 33 | Registry: registry, 34 | } 35 | } 36 | 37 | // OnEvent does not care whether event is add or update. Prior filtering should be done in the controller/watcher 38 | func (e *Engine) OnEvent(event *kube.EnhancedEvent) { 39 | e.Route.ProcessEvent(event, e.Registry) 40 | } 41 | 42 | // Stop stops all registered sinks 43 | func (e *Engine) Stop() { 44 | log.Info().Msg("Closing sinks") 45 | e.Registry.Close() 46 | log.Info().Msg("All sinks closed") 47 | } 48 | -------------------------------------------------------------------------------- /pkg/sinks/stdout.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "io" 7 | "log" 8 | "os" 9 | 10 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 11 | ) 12 | 13 | type StdoutConfig struct { 14 | // DeDot all labels and annotations in the event. For both the event and the involvedObject 15 | DeDot bool `yaml:"deDot"` 16 | Layout map[string]interface{} `yaml:"layout"` 17 | } 18 | 19 | func (f *StdoutConfig) Validate() error { 20 | return nil 21 | } 22 | 23 | type Stdout struct { 24 | writer io.Writer 25 | encoder *json.Encoder 26 | cfg *StdoutConfig 27 | } 28 | 29 | func NewStdoutSink(config *StdoutConfig) (*Stdout, error) { 30 | logger := log.New(os.Stdout, "", 0) 31 | writer := logger.Writer() 32 | 33 | return &Stdout{ 34 | writer: writer, 35 | encoder: json.NewEncoder(writer), 36 | cfg: config, 37 | }, nil 38 | } 39 | 40 | func (f *Stdout) Close() { 41 | } 42 | 43 | func (f *Stdout) Send(ctx context.Context, ev *kube.EnhancedEvent) error { 44 | if f.cfg.DeDot { 45 | de := ev.DeDot() 46 | ev = &de 47 | } 48 | 49 | if f.cfg.Layout == nil { 50 | return f.encoder.Encode(ev) 51 | } 52 | 53 | res, err := convertLayoutTemplate(f.cfg.Layout, ev) 54 | if err != nil { 55 | return err 56 | } 57 | 58 | return f.encoder.Encode(res) 59 | } 60 | -------------------------------------------------------------------------------- /deploy/02-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: event-exporter 5 | namespace: monitoring 6 | spec: 7 | replicas: 1 8 | template: 9 | metadata: 10 | labels: 11 | app: event-exporter 12 | version: v1 13 | annotations: 14 | prometheus.io/scrape: 'true' 15 | prometheus.io/port: '2112' 16 | prometheus.io/path: '/metrics' 17 | spec: 18 | serviceAccountName: event-exporter 19 | securityContext: 20 | runAsNonRoot: true 21 | seccompProfile: 22 | type: RuntimeDefault 23 | containers: 24 | - name: event-exporter 25 | # The good practice would be to pin the version. This is just a reference so that we don't 26 | # have to update this file in each release. 27 | image: ghcr.io/resmoio/kubernetes-event-exporter:latest 28 | imagePullPolicy: IfNotPresent 29 | args: 30 | - -conf=/data/config.yaml 31 | volumeMounts: 32 | - mountPath: /data 33 | name: cfg 34 | securityContext: 35 | allowPrivilegeEscalation: false 36 | capabilities: 37 | drop: [ALL] 38 | volumes: 39 | - name: cfg 40 | configMap: 41 | name: event-exporter-cfg 42 | selector: 43 | matchLabels: 44 | app: event-exporter 45 | version: v1 46 | -------------------------------------------------------------------------------- /pkg/sinks/pipe.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "io" 7 | "os" 8 | 9 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 10 | ) 11 | 12 | type PipeConfig struct { 13 | Path string `yaml:"path"` 14 | // DeDot all labels and annotations in the event. For both the event and the involvedObject 15 | DeDot bool `yaml:"deDot"` 16 | Layout map[string]interface{} `yaml:"layout"` 17 | } 18 | 19 | func (f *PipeConfig) Validate() error { 20 | return nil 21 | } 22 | 23 | type Pipe struct { 24 | writer io.WriteCloser 25 | encoder *json.Encoder 26 | cfg *PipeConfig 27 | } 28 | 29 | func NewPipeSink(config *PipeConfig) (*Pipe, error) { 30 | mode := os.FileMode(0644) 31 | f, err := os.OpenFile(config.Path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode) 32 | if err != nil { 33 | return nil, err 34 | } 35 | return &Pipe{ 36 | writer: f, 37 | encoder: json.NewEncoder(f), 38 | cfg: config, 39 | }, nil 40 | } 41 | 42 | func (f *Pipe) Close() { 43 | _ = f.writer.Close() 44 | } 45 | 46 | func (f *Pipe) Send(ctx context.Context, ev *kube.EnhancedEvent) error { 47 | if f.cfg.DeDot { 48 | de := ev.DeDot() 49 | ev = &de 50 | } 51 | 52 | if f.cfg.Layout == nil { 53 | return f.encoder.Encode(ev) 54 | } 55 | 56 | res, err := convertLayoutTemplate(f.cfg.Layout, ev) 57 | if err != nil { 58 | return err 59 | } 60 | 61 | return f.encoder.Encode(res) 62 | } 63 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Publish Docker image 2 | 3 | on: 4 | release: 5 | types: [published] 6 | 7 | jobs: 8 | push_to_registries: 9 | name: Push Docker image to multiple registries 10 | runs-on: ubuntu-latest 11 | permissions: 12 | packages: write 13 | contents: read 14 | steps: 15 | - name: Check out the repo 16 | uses: actions/checkout@v3 17 | 18 | - name: Set up QEMU 19 | uses: docker/setup-qemu-action@v2 20 | 21 | - name: Set up Docker Buildx 22 | uses: docker/setup-buildx-action@v2 23 | 24 | - name: Log in to the Container registry 25 | uses: docker/login-action@f054a8b539a109f9f41c372932f1ae047eff08c9 26 | with: 27 | registry: ghcr.io 28 | username: ${{ github.actor }} 29 | password: ${{ secrets.GITHUB_TOKEN }} 30 | 31 | - name: Extract metadata (tags, labels) for Docker 32 | id: meta 33 | uses: docker/metadata-action@98669ae865ea3cffbcbaa878cf57c20bbf1c6c38 34 | with: 35 | images: | 36 | ghcr.io/${{ github.repository }} 37 | 38 | - name: Build and push Docker images 39 | uses: docker/build-push-action@v3 40 | with: 41 | context: . 42 | push: true 43 | platforms: linux/amd64,linux/arm64 44 | tags: ${{ steps.meta.outputs.tags }} 45 | labels: ${{ steps.meta.outputs.labels }} 46 | build-args: | 47 | VERSION=${{ steps.meta.outputs.version }} 48 | -------------------------------------------------------------------------------- /pkg/sinks/sqs.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | import ( 4 | "context" 5 | "github.com/aws/aws-sdk-go/aws" 6 | "github.com/aws/aws-sdk-go/aws/session" 7 | "github.com/aws/aws-sdk-go/service/sqs" 8 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 9 | ) 10 | 11 | type SQSConfig struct { 12 | QueueName string `yaml:"queueName"` 13 | Region string `yaml:"region"` 14 | Layout map[string]interface{} `yaml:"layout"` 15 | } 16 | 17 | type SQSSink struct { 18 | cfg *SQSConfig 19 | svc *sqs.SQS 20 | queueURL string 21 | } 22 | 23 | func NewSQSSink(cfg *SQSConfig) (Sink, error) { 24 | sess, err := session.NewSession(&aws.Config{ 25 | Region: aws.String(cfg.Region)}, 26 | ) 27 | if err != nil { 28 | return nil, err 29 | } 30 | 31 | svc := sqs.New(sess) 32 | out, err := svc.GetQueueUrl(&sqs.GetQueueUrlInput{ 33 | QueueName: &cfg.QueueName, 34 | }) 35 | 36 | if err != nil { 37 | return nil, err 38 | } 39 | 40 | return &SQSSink{ 41 | cfg: cfg, 42 | svc: svc, 43 | queueURL: *out.QueueUrl, 44 | }, nil 45 | } 46 | 47 | func (s *SQSSink) Send(ctx context.Context, ev *kube.EnhancedEvent) error { 48 | toSend, e := serializeEventWithLayout(s.cfg.Layout, ev) 49 | if e != nil { 50 | return e 51 | } 52 | 53 | _, err := s.svc.SendMessageWithContext(ctx, &sqs.SendMessageInput{ 54 | MessageBody: aws.String(string(toSend)), 55 | QueueUrl: &s.queueURL, 56 | }) 57 | 58 | return err 59 | } 60 | 61 | func (s *SQSSink) Close() { 62 | // No-op 63 | } 64 | -------------------------------------------------------------------------------- /pkg/exporter/engine_test.go: -------------------------------------------------------------------------------- 1 | package exporter 2 | 3 | import ( 4 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 5 | "github.com/resmoio/kubernetes-event-exporter/pkg/sinks" 6 | "github.com/stretchr/testify/assert" 7 | "testing" 8 | ) 9 | 10 | func TestEngineNoRoutes(t *testing.T) { 11 | cfg := &Config{ 12 | Route: Route{}, 13 | Receivers: nil, 14 | } 15 | 16 | e := NewEngine(cfg, &SyncRegistry{}) 17 | ev := &kube.EnhancedEvent{} 18 | e.OnEvent(ev) 19 | } 20 | 21 | func TestEngineSimple(t *testing.T) { 22 | config := &sinks.InMemoryConfig{} 23 | cfg := &Config{ 24 | Route: Route{ 25 | Match: []Rule{{ 26 | Receiver: "in-mem", 27 | }}, 28 | }, 29 | Receivers: []sinks.ReceiverConfig{{ 30 | Name: "in-mem", 31 | InMemory: config, 32 | }}, 33 | } 34 | 35 | e := NewEngine(cfg, &SyncRegistry{}) 36 | ev := &kube.EnhancedEvent{} 37 | e.OnEvent(ev) 38 | 39 | assert.Contains(t, config.Ref.Events, ev) 40 | } 41 | 42 | func TestEngineDropSimple(t *testing.T) { 43 | config := &sinks.InMemoryConfig{} 44 | cfg := &Config{ 45 | Route: Route{ 46 | Drop: []Rule{{ 47 | // Drops anything 48 | }}, 49 | Match: []Rule{{ 50 | Receiver: "in-mem", 51 | }}, 52 | }, 53 | Receivers: []sinks.ReceiverConfig{{ 54 | Name: "in-mem", 55 | InMemory: config, 56 | }}, 57 | } 58 | 59 | e := NewEngine(cfg, &SyncRegistry{}) 60 | ev := &kube.EnhancedEvent{} 61 | e.OnEvent(ev) 62 | 63 | assert.NotContains(t, config.Ref.Events, ev) 64 | assert.Empty(t, config.Ref.Events) 65 | } 66 | -------------------------------------------------------------------------------- /pkg/sinks/pubsub.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | import ( 4 | "context" 5 | 6 | "cloud.google.com/go/pubsub" 7 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 8 | "github.com/rs/zerolog/log" 9 | ) 10 | 11 | type PubsubConfig struct { 12 | GcloudProjectId string `yaml:"gcloud_project_id"` 13 | Topic string `yaml:"topic"` 14 | CreateTopic bool `yaml:"create_topic"` 15 | } 16 | 17 | type PubsubSink struct { 18 | cfg *PubsubConfig 19 | pubsubClient *pubsub.Client 20 | topic *pubsub.Topic 21 | } 22 | 23 | func NewPubsubSink(cfg *PubsubConfig) (Sink, error) { 24 | ctx := context.Background() 25 | pubsubClient, err := pubsub.NewClient(ctx, cfg.GcloudProjectId) // TODO: add options here 26 | if err != nil { 27 | return nil, err 28 | } 29 | 30 | var topic *pubsub.Topic 31 | if cfg.CreateTopic { 32 | topic, err = pubsubClient.CreateTopic(context.Background(), cfg.Topic) 33 | if err != nil { 34 | return nil, err 35 | } 36 | log.Info().Msgf("pubsub: created topic: %s", cfg.Topic) 37 | } else { 38 | topic = pubsubClient.Topic(cfg.Topic) 39 | } 40 | 41 | return &PubsubSink{ 42 | pubsubClient: pubsubClient, 43 | topic: topic, 44 | cfg: cfg, 45 | }, nil 46 | } 47 | 48 | func (ps *PubsubSink) Send(ctx context.Context, ev *kube.EnhancedEvent) error { 49 | msg := &pubsub.Message{ 50 | Data: ev.ToJSON(), 51 | } 52 | _, err := ps.topic.Publish(ctx, msg).Get(ctx) 53 | return err 54 | } 55 | 56 | func (ps *PubsubSink) Close() { 57 | log.Info().Msgf("pubsub: Closing topic...") 58 | ps.pubsubClient.Close() 59 | } 60 | -------------------------------------------------------------------------------- /pkg/sinks/kinesis.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "github.com/aws/aws-sdk-go/aws" 7 | "github.com/aws/aws-sdk-go/aws/session" 8 | "github.com/aws/aws-sdk-go/service/kinesis" 9 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 10 | ) 11 | 12 | type KinesisConfig struct { 13 | StreamName string `yaml:"streamName"` 14 | Region string `yaml:"region"` 15 | Layout map[string]interface{} `yaml:"layout"` 16 | } 17 | 18 | type KinesisSink struct { 19 | cfg *KinesisConfig 20 | svc *kinesis.Kinesis 21 | } 22 | 23 | func NewKinesisSink(cfg *KinesisConfig) (Sink, error) { 24 | sess, err := session.NewSession(&aws.Config{ 25 | Region: aws.String(cfg.Region)}, 26 | ) 27 | if err != nil { 28 | return nil, err 29 | } 30 | 31 | return &KinesisSink{ 32 | cfg: cfg, 33 | svc: kinesis.New(sess), 34 | }, nil 35 | } 36 | 37 | func (k *KinesisSink) Send(ctx context.Context, ev *kube.EnhancedEvent) error { 38 | var toSend []byte 39 | 40 | if k.cfg.Layout != nil { 41 | res, err := convertLayoutTemplate(k.cfg.Layout, ev) 42 | if err != nil { 43 | return err 44 | } 45 | 46 | toSend, err = json.Marshal(res) 47 | if err != nil { 48 | return err 49 | } 50 | } else { 51 | toSend = ev.ToJSON() 52 | } 53 | 54 | _, err := k.svc.PutRecord(&kinesis.PutRecordInput{ 55 | Data: toSend, 56 | PartitionKey: aws.String(string(ev.UID)), 57 | StreamName: aws.String(k.cfg.StreamName), 58 | }) 59 | 60 | return err 61 | } 62 | 63 | func (k *KinesisSink) Close() { 64 | // No-op 65 | } 66 | -------------------------------------------------------------------------------- /pkg/sinks/file.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "io" 7 | 8 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 9 | "gopkg.in/natefinch/lumberjack.v2" 10 | ) 11 | 12 | type FileConfig struct { 13 | Path string `yaml:"path"` 14 | Layout map[string]interface{} `yaml:"layout"` 15 | MaxSize int `yaml:"maxsize"` 16 | MaxAge int `yaml:"maxage"` 17 | MaxBackups int `yaml:"maxbackups"` 18 | DeDot bool `yaml:"deDot"` 19 | } 20 | 21 | func (f *FileConfig) Validate() error { 22 | return nil 23 | } 24 | 25 | type File struct { 26 | writer io.WriteCloser 27 | encoder *json.Encoder 28 | layout map[string]interface{} 29 | DeDot bool 30 | } 31 | 32 | func NewFileSink(config *FileConfig) (*File, error) { 33 | writer := &lumberjack.Logger{ 34 | Filename: config.Path, 35 | MaxSize: config.MaxSize, 36 | MaxBackups: config.MaxBackups, 37 | MaxAge: config.MaxAge, 38 | } 39 | 40 | return &File{ 41 | writer: writer, 42 | encoder: json.NewEncoder(writer), 43 | layout: config.Layout, 44 | DeDot: config.DeDot, 45 | }, nil 46 | } 47 | 48 | func (f *File) Close() { 49 | _ = f.writer.Close() 50 | } 51 | 52 | func (f *File) Send(ctx context.Context, ev *kube.EnhancedEvent) error { 53 | if f.DeDot { 54 | de := ev.DeDot() 55 | ev = &de 56 | } 57 | if f.layout == nil { 58 | return f.encoder.Encode(ev) 59 | } 60 | 61 | res, err := convertLayoutTemplate(f.layout, ev) 62 | if err != nil { 63 | return err 64 | } 65 | 66 | return f.encoder.Encode(res) 67 | } 68 | -------------------------------------------------------------------------------- /pkg/sinks/tmpl_test.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | import ( 4 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 5 | "github.com/stretchr/testify/require" 6 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | "testing" 8 | "time" 9 | ) 10 | 11 | func TestLayoutConvert(t *testing.T) { 12 | ev := &kube.EnhancedEvent{} 13 | ev.Namespace = "default" 14 | ev.Type = "Warning" 15 | ev.InvolvedObject.Kind = "Pod" 16 | ev.InvolvedObject.Name = "nginx-server-123abc-456def" 17 | ev.Message = "Successfully pulled image \"nginx:latest\"" 18 | ev.FirstTimestamp = v1.Time{Time: time.Now()} 19 | 20 | // Because Go, when parsing yaml, its []interface, not []string 21 | var tagz interface{} 22 | tagz = make([]interface{}, 2) 23 | tagz.([]interface{})[0] = "sre" 24 | tagz.([]interface{})[1] = "ops" 25 | 26 | layout := map[string]interface{}{ 27 | "details": map[interface{}]interface{}{ 28 | "message": "{{ .Message }}", 29 | "kind": "{{ .InvolvedObject.Kind }}", 30 | "name": "{{ .InvolvedObject.Name }}", 31 | "namespace": "{{ .Namespace }}", 32 | "type": "{{ .Type }}", 33 | "tags": tagz, 34 | }, 35 | "eventType": "kube-event", 36 | "region": "us-west-2", 37 | "createdAt": "{{ .GetTimestampMs }}", // TODO: Test Int casts 38 | } 39 | 40 | res, err := convertLayoutTemplate(layout, ev) 41 | require.NoError(t, err) 42 | require.Equal(t, res["eventType"], "kube-event") 43 | 44 | val, ok := res["details"].(map[string]interface{}) 45 | 46 | require.True(t, ok, "cannot cast to event") 47 | 48 | val2, ok2 := val["message"].(string) 49 | require.True(t, ok2, "cannot cast message to string") 50 | 51 | require.Equal(t, val2, ev.Message) 52 | } 53 | -------------------------------------------------------------------------------- /pkg/sinks/firehose.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | 7 | "github.com/aws/aws-sdk-go/aws" 8 | "github.com/aws/aws-sdk-go/aws/session" 9 | "github.com/aws/aws-sdk-go/service/firehose" 10 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 11 | ) 12 | 13 | type FirehoseConfig struct { 14 | DeliveryStreamName string `yaml:"deliveryStreamName"` 15 | Region string `yaml:"region"` 16 | Layout map[string]interface{} `yaml:"layout"` 17 | // DeDot all labels and annotations in the event. For both the event and the involvedObject 18 | DeDot bool `yaml:"deDot"` 19 | } 20 | 21 | type FirehoseSink struct { 22 | cfg *FirehoseConfig 23 | svc *firehose.Firehose 24 | } 25 | 26 | func NewFirehoseSink(cfg *FirehoseConfig) (Sink, error) { 27 | sess, err := session.NewSession(&aws.Config{ 28 | Region: aws.String(cfg.Region)}, 29 | ) 30 | if err != nil { 31 | return nil, err 32 | } 33 | 34 | return &FirehoseSink{ 35 | cfg: cfg, 36 | svc: firehose.New(sess), 37 | }, nil 38 | } 39 | 40 | func (f *FirehoseSink) Send(ctx context.Context, ev *kube.EnhancedEvent) error { 41 | var toSend []byte 42 | 43 | if f.cfg.DeDot { 44 | de := ev.DeDot() 45 | ev = &de 46 | } 47 | 48 | if f.cfg.Layout != nil { 49 | res, err := convertLayoutTemplate(f.cfg.Layout, ev) 50 | if err != nil { 51 | return err 52 | } 53 | 54 | toSend, err = json.Marshal(res) 55 | if err != nil { 56 | return err 57 | } 58 | } else { 59 | toSend = ev.ToJSON() 60 | } 61 | 62 | _, err := f.svc.PutRecord(&firehose.PutRecordInput{ 63 | Record: &firehose.Record{ 64 | Data: toSend, 65 | }, 66 | DeliveryStreamName: aws.String(f.cfg.DeliveryStreamName), 67 | }) 68 | 69 | return err 70 | } 71 | 72 | func (f *FirehoseSink) Close() { 73 | // No-op 74 | } 75 | -------------------------------------------------------------------------------- /pkg/sinks/teams.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "fmt" 8 | "io" 9 | "net/http" 10 | "strings" 11 | 12 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 13 | ) 14 | 15 | type TeamsConfig struct { 16 | Endpoint string `yaml:"endpoint"` 17 | Layout map[string]interface{} `yaml:"layout"` 18 | Headers map[string]string `yaml:"headers"` 19 | } 20 | 21 | func NewTeamsSink(cfg *TeamsConfig) (Sink, error) { 22 | return &Teams{cfg: cfg}, nil 23 | } 24 | 25 | type Teams struct { 26 | cfg *TeamsConfig 27 | } 28 | 29 | func (w *Teams) Close() { 30 | // No-op 31 | } 32 | 33 | func (w *Teams) Send(ctx context.Context, ev *kube.EnhancedEvent) error { 34 | event, err := serializeEventWithLayout(w.cfg.Layout, ev) 35 | if err != nil { 36 | return err 37 | } 38 | 39 | var eventData map[string]interface{} 40 | json.Unmarshal([]byte(event), &eventData) 41 | output := fmt.Sprintf("Event: %s \nStatus: %s \nMetadata: %s", eventData["message"], eventData["reason"], eventData["metadata"]) 42 | 43 | reqBody, err := json.Marshal(map[string]string{ 44 | "summary": "event", 45 | "text": string([]byte(output)), 46 | }) 47 | if err != nil { 48 | return err 49 | } 50 | 51 | req, err := http.NewRequest(http.MethodPost, w.cfg.Endpoint, bytes.NewReader(reqBody)) 52 | if err != nil { 53 | return err 54 | } 55 | req.Header.Add("Content-Type", "application/json") 56 | for k, v := range w.cfg.Headers { 57 | req.Header.Add(k, v) 58 | } 59 | 60 | resp, err := http.DefaultClient.Do(req) 61 | if err != nil { 62 | return nil 63 | } 64 | 65 | defer resp.Body.Close() 66 | body, err := io.ReadAll(resp.Body) 67 | message := string(body) 68 | 69 | if err != nil { 70 | return err 71 | } 72 | 73 | if resp.StatusCode != http.StatusOK { 74 | return fmt.Errorf("not 200: %s", message) 75 | } 76 | // see: https://learn.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/connectors-using?tabs=cURL#rate-limiting-for-connectors 77 | if strings.Contains(message, "Microsoft Teams endpoint returned HTTP error 429") { 78 | return fmt.Errorf("rate limited: %s", message) 79 | } 80 | 81 | return nil 82 | } 83 | -------------------------------------------------------------------------------- /pkg/sinks/webhook.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "errors" 7 | "fmt" 8 | "io" 9 | "net/http" 10 | 11 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 12 | "github.com/rs/zerolog/log" 13 | ) 14 | 15 | type WebhookConfig struct { 16 | Endpoint string `yaml:"endpoint"` 17 | TLS TLS `yaml:"tls"` 18 | Layout map[string]interface{} `yaml:"layout"` 19 | Headers map[string]string `yaml:"headers"` 20 | } 21 | 22 | func NewWebhook(cfg *WebhookConfig) (Sink, error) { 23 | tlsClientConfig, err := setupTLS(&cfg.TLS) 24 | if err != nil { 25 | return nil, fmt.Errorf("failed to setup TLS: %w", err) 26 | } 27 | return &Webhook{cfg: cfg, transport: &http.Transport{ 28 | Proxy: http.ProxyFromEnvironment, 29 | TLSClientConfig: tlsClientConfig, 30 | }}, nil 31 | } 32 | 33 | type Webhook struct { 34 | cfg *WebhookConfig 35 | transport *http.Transport 36 | } 37 | 38 | func (w *Webhook) Close() { 39 | w.transport.CloseIdleConnections() 40 | } 41 | 42 | func (w *Webhook) Send(ctx context.Context, ev *kube.EnhancedEvent) error { 43 | reqBody, err := serializeEventWithLayout(w.cfg.Layout, ev) 44 | if err != nil { 45 | return err 46 | } 47 | 48 | req, err := http.NewRequest(http.MethodPost, w.cfg.Endpoint, bytes.NewReader(reqBody)) 49 | if err != nil { 50 | return err 51 | } 52 | req.Header.Add("Content-Type", "application/json") 53 | 54 | for k, v := range w.cfg.Headers { 55 | realValue, err := GetString(ev, v) 56 | if err != nil { 57 | log.Debug().Err(err).Msgf("parse template failed: %s", v) 58 | req.Header.Add(k, v) 59 | } else { 60 | log.Debug().Msgf("request header: {%s: %s}", k, realValue) 61 | req.Header.Add(k, realValue) 62 | } 63 | } 64 | 65 | client := http.DefaultClient 66 | client.Transport = w.transport 67 | resp, err := client.Do(req) 68 | if err != nil { 69 | return err 70 | } 71 | 72 | defer resp.Body.Close() 73 | body, err := io.ReadAll(resp.Body) 74 | if err != nil { 75 | return err 76 | } 77 | 78 | if !(resp.StatusCode >= 200 && resp.StatusCode < 300) { 79 | return errors.New("not successfull (2xx) response: " + string(body)) 80 | } 81 | 82 | return nil 83 | } 84 | -------------------------------------------------------------------------------- /pkg/sinks/sink.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | import ( 4 | "context" 5 | "crypto/tls" 6 | "crypto/x509" 7 | "errors" 8 | "fmt" 9 | "os" 10 | 11 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 12 | ) 13 | 14 | // Sink is the interface that the third-party providers should implement. It should just get the event and 15 | // transform it depending on its configuration and submit it. Error handling for retries etc. should be handled inside 16 | // for now. 17 | type Sink interface { 18 | Send(ctx context.Context, ev *kube.EnhancedEvent) error 19 | Close() 20 | } 21 | 22 | // BatchSink is an extension Sink that can handle batch events. 23 | // NOTE: Currently no provider implements it nor the receivers can handle it. 24 | type BatchSink interface { 25 | Sink 26 | SendBatch([]*kube.EnhancedEvent) error 27 | } 28 | 29 | type TLS struct { 30 | InsecureSkipVerify bool `yaml:"insecureSkipVerify"` 31 | ServerName string `yaml:"serverName"` 32 | CaFile string `yaml:"caFile"` 33 | KeyFile string `yaml:"keyFile"` 34 | CertFile string `yaml:"certFile"` 35 | } 36 | 37 | func setupTLS(cfg *TLS) (*tls.Config, error) { 38 | tlsClientConfig := &tls.Config{ 39 | InsecureSkipVerify: cfg.InsecureSkipVerify, 40 | ServerName: cfg.ServerName, 41 | } 42 | 43 | if len(cfg.CaFile) > 0 { 44 | readFile, err := os.ReadFile(cfg.CaFile) 45 | if err != nil { 46 | return nil, err 47 | } 48 | 49 | tlsClientConfig.RootCAs = x509.NewCertPool() 50 | tlsClientConfig.RootCAs.AppendCertsFromPEM(readFile) 51 | } 52 | 53 | if len(cfg.KeyFile) > 0 && len(cfg.CertFile) > 0 { 54 | cert, err := tls.LoadX509KeyPair(cfg.CertFile, cfg.KeyFile) 55 | if err != nil { 56 | return nil, fmt.Errorf("could not read client certificate or key: %w", err) 57 | } 58 | tlsClientConfig.Certificates = append(tlsClientConfig.Certificates, cert) 59 | } 60 | if len(cfg.KeyFile) > 0 && len(cfg.CertFile) == 0 { 61 | return nil, errors.New("configured keyFile but forget certFile for client certificate authentication") 62 | } 63 | if len(cfg.KeyFile) == 0 && len(cfg.CertFile) > 0 { 64 | return nil, errors.New("configured certFile but forget keyFile for client certificate authentication") 65 | } 66 | return tlsClientConfig, nil 67 | } 68 | -------------------------------------------------------------------------------- /pkg/sinks/avro.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | // This provides avro encoding using the goavro package. 4 | // Encoding is a simple variation of the Single Object Encoding 5 | // defined in the avro spec: 6 | // https://avro.apache.org/docs/current/spec.html#single_object_encoding 7 | // 8 | // This schemaid is encoded in the leading 17 bytes of the payload 9 | // where the first byte is \0 and the next 16 bytes are the schemaID string 10 | 11 | import ( 12 | "encoding/hex" 13 | "fmt" 14 | 15 | goavro "github.com/linkedin/goavro/v2" 16 | ) 17 | 18 | // Avro is the config structure to enable avro 19 | // encoding on-demand for the kafka sink 20 | // 21 | // the schemaID is expected to be a 32 char string like an md5hash 22 | // 23 | // the schema must be a legit arvo schema. If the schema does not compile 24 | // then you'll get an error in the log at the time of kafka sink creation 25 | // 26 | // if the incoming event can't be decoded into the given avro schema 27 | // then you'll get an error message in the log and the event will not be 28 | // forwarded to kafka 29 | // 30 | 31 | type Avro struct { 32 | SchemaID string `yaml:"schemaID"` 33 | Schema string `yaml:"schema"` 34 | codec *goavro.Codec 35 | } 36 | 37 | func (a Avro) encode(textual []byte) ([]byte, error) { 38 | 39 | var err error 40 | dst, err := hex.DecodeString(a.SchemaID) 41 | if err != nil { 42 | fmt.Println(string(textual)) 43 | panic(err) 44 | } 45 | 46 | // make the header 47 | p := []byte{} 48 | // leading null byte 49 | p = append(p, byte(0)) 50 | // shemaid into the next 16 bytes 51 | p = append(p, dst...) 52 | 53 | // encode the event into avro with the schemid header 54 | avroNative, _, err := a.codec.NativeFromTextual(textual) 55 | if err != nil { 56 | return []byte{}, err 57 | } 58 | 59 | return a.codec.BinaryFromNative(p, avroNative) 60 | 61 | } 62 | 63 | // NewAvroEncoder creates an encoder which will be used 64 | // to avro encode all events prior to sending to kafka 65 | // 66 | // Its only used by the kafka sink 67 | func NewAvroEncoder(schemaID, schema string) (KafkaEncoder, error) { 68 | 69 | codec, err := goavro.NewCodecForStandardJSON(schema) 70 | if err != nil { 71 | return Avro{}, err 72 | } 73 | 74 | if len(schemaID) != 32 { 75 | return Avro{}, fmt.Errorf("Avro encoding requires a 32 character schemaID:schema id:%s:", schemaID) 76 | } 77 | 78 | return Avro{SchemaID: schemaID, codec: codec}, nil 79 | } 80 | -------------------------------------------------------------------------------- /pkg/kube/event.go: -------------------------------------------------------------------------------- 1 | package kube 2 | 3 | import ( 4 | "encoding/json" 5 | "strings" 6 | "time" 7 | 8 | corev1 "k8s.io/api/core/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | ) 11 | 12 | type EnhancedEvent struct { 13 | corev1.Event `json:",inline"` 14 | ClusterName string `json:"clusterName"` 15 | InvolvedObject EnhancedObjectReference `json:"involvedObject"` 16 | } 17 | 18 | // DeDot replaces all dots in the labels and annotations with underscores. This is required for example in the 19 | // elasticsearch sink. The dynamic mapping generation interprets dots in JSON keys as as path in a onject. 20 | // For reference see this logstash filter: https://www.elastic.co/guide/en/logstash/current/plugins-filters-de_dot.html 21 | func (e EnhancedEvent) DeDot() EnhancedEvent { 22 | c := e 23 | c.Labels = dedotMap(e.Labels) 24 | c.Annotations = dedotMap(e.Annotations) 25 | c.InvolvedObject.Labels = dedotMap(e.InvolvedObject.Labels) 26 | c.InvolvedObject.Annotations = dedotMap(e.InvolvedObject.Annotations) 27 | return c 28 | } 29 | 30 | func dedotMap(in map[string]string) map[string]string { 31 | if len(in) == 0 { 32 | return in 33 | } 34 | ret := make(map[string]string, len(in)) 35 | for key, value := range in { 36 | nKey := strings.ReplaceAll(key, ".", "_") 37 | ret[nKey] = value 38 | } 39 | return ret 40 | } 41 | 42 | type EnhancedObjectReference struct { 43 | corev1.ObjectReference `json:",inline"` 44 | Labels map[string]string `json:"labels,omitempty"` 45 | Annotations map[string]string `json:"annotations,omitempty"` 46 | OwnerReferences []metav1.OwnerReference `json:"ownerReferences,omitempty"` 47 | Deleted bool `json:"deleted"` 48 | } 49 | 50 | // ToJSON does not return an error because we are %99 confident it is JSON serializable. 51 | // TODO(makin) Is it a bad practice? It's open to discussion. 52 | func (e *EnhancedEvent) ToJSON() []byte { 53 | b, _ := json.Marshal(e) 54 | return b 55 | } 56 | 57 | func (e *EnhancedEvent) GetTimestampMs() int64 { 58 | timestamp := e.FirstTimestamp.Time 59 | if timestamp.IsZero() { 60 | timestamp = e.EventTime.Time 61 | } 62 | 63 | return timestamp.UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond)) 64 | } 65 | 66 | func (e *EnhancedEvent) GetTimestampISO8601() string { 67 | timestamp := e.FirstTimestamp.Time 68 | if timestamp.IsZero() { 69 | timestamp = e.EventTime.Time 70 | } 71 | 72 | layout := "2006-01-02T15:04:05.000Z" 73 | return timestamp.Format(layout) 74 | } 75 | -------------------------------------------------------------------------------- /pkg/exporter/channel_registry.go: -------------------------------------------------------------------------------- 1 | package exporter 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | 7 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 8 | "github.com/resmoio/kubernetes-event-exporter/pkg/metrics" 9 | "github.com/resmoio/kubernetes-event-exporter/pkg/sinks" 10 | "github.com/rs/zerolog/log" 11 | ) 12 | 13 | // ChannelBasedReceiverRegistry creates two channels for each receiver. One is for receiving events and other one is 14 | // for breaking out of the infinite loop. Each message is passed to receivers 15 | // This might not be the best way to implement such feature. A ring buffer can be better 16 | // and we might need a mechanism to drop the vents 17 | // On closing, the registry sends a signal on all exit channels, and then waits for all to complete. 18 | type ChannelBasedReceiverRegistry struct { 19 | ch map[string]chan kube.EnhancedEvent 20 | exitCh map[string]chan interface{} 21 | wg *sync.WaitGroup 22 | MetricsStore *metrics.Store 23 | } 24 | 25 | func (r *ChannelBasedReceiverRegistry) SendEvent(name string, event *kube.EnhancedEvent) { 26 | ch := r.ch[name] 27 | if ch == nil { 28 | log.Error().Str("name", name).Msg("There is no channel") 29 | } 30 | 31 | go func() { 32 | ch <- *event 33 | }() 34 | } 35 | 36 | func (r *ChannelBasedReceiverRegistry) Register(name string, receiver sinks.Sink) { 37 | if r.ch == nil { 38 | r.ch = make(map[string]chan kube.EnhancedEvent) 39 | r.exitCh = make(map[string]chan interface{}) 40 | } 41 | 42 | ch := make(chan kube.EnhancedEvent) 43 | exitCh := make(chan interface{}) 44 | 45 | r.ch[name] = ch 46 | r.exitCh[name] = exitCh 47 | 48 | if r.wg == nil { 49 | r.wg = &sync.WaitGroup{} 50 | } 51 | r.wg.Add(1) 52 | 53 | go func() { 54 | Loop: 55 | for { 56 | select { 57 | case ev := <-ch: 58 | log.Debug().Str("sink", name).Str("event", ev.Message).Msg("sending event to sink") 59 | err := receiver.Send(context.Background(), &ev) 60 | if err != nil { 61 | r.MetricsStore.SendErrors.Inc() 62 | log.Debug().Err(err).Str("sink", name).Str("event", ev.Message).Msg("Cannot send event") 63 | } 64 | case <-exitCh: 65 | log.Info().Str("sink", name).Msg("Closing the sink") 66 | break Loop 67 | } 68 | } 69 | receiver.Close() 70 | log.Info().Str("sink", name).Msg("Closed") 71 | r.wg.Done() 72 | }() 73 | } 74 | 75 | // Close signals closing to all sinks and waits for them to complete. 76 | // The wait could block indefinitely depending on the sink implementations. 77 | func (r *ChannelBasedReceiverRegistry) Close() { 78 | // Send exit command and wait for exit of all sinks 79 | for _, ec := range r.exitCh { 80 | ec <- 1 81 | } 82 | r.wg.Wait() 83 | } 84 | -------------------------------------------------------------------------------- /pkg/sinks/eventbridge.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "github.com/aws/aws-sdk-go/aws" 7 | "github.com/aws/aws-sdk-go/aws/client" 8 | "github.com/aws/aws-sdk-go/aws/session" 9 | "github.com/aws/aws-sdk-go/service/eventbridge" 10 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 11 | "github.com/rs/zerolog/log" 12 | "time" 13 | ) 14 | 15 | type EventBridgeConfig struct { 16 | DetailType string `yaml:"detailType"` 17 | Details map[string]interface{} `yaml:"details"` 18 | Source string `yaml:"source"` 19 | EventBusName string `yaml:"eventBusName"` 20 | Region string `yaml:"region"` 21 | } 22 | 23 | type EventBridgeSink struct { 24 | cfg *EventBridgeConfig 25 | svc *eventbridge.EventBridge 26 | } 27 | 28 | func NewEventBridgeSink(cfg *EventBridgeConfig) (Sink, error) { 29 | sess, err := session.NewSession(&aws.Config{ 30 | Region: aws.String(cfg.Region), 31 | Retryer: client.DefaultRetryer{ 32 | NumMaxRetries: client.DefaultRetryerMaxNumRetries, 33 | MinRetryDelay: client.DefaultRetryerMinRetryDelay, 34 | MinThrottleDelay: client.DefaultRetryerMinThrottleDelay, 35 | MaxRetryDelay: client.DefaultRetryerMaxRetryDelay, 36 | MaxThrottleDelay: client.DefaultRetryerMaxThrottleDelay, 37 | }, 38 | }, 39 | ) 40 | if err != nil { 41 | return nil, err 42 | } 43 | 44 | svc := eventbridge.New(sess) 45 | return &EventBridgeSink{ 46 | cfg: cfg, 47 | svc: svc, 48 | }, nil 49 | } 50 | 51 | func (s *EventBridgeSink) Send(ctx context.Context, ev *kube.EnhancedEvent) error { 52 | log.Info().Msg("Sending event to EventBridge ") 53 | var toSend string 54 | if s.cfg.Details != nil { 55 | res, err := convertLayoutTemplate(s.cfg.Details, ev) 56 | if err != nil { 57 | return err 58 | } 59 | 60 | b, err := json.Marshal(res) 61 | toSend = string(b) 62 | if err != nil { 63 | return err 64 | } 65 | } else { 66 | toSend = string(ev.ToJSON()) 67 | } 68 | tym := time.Now() 69 | inputRequest := eventbridge.PutEventsRequestEntry{ 70 | Detail: &toSend, 71 | DetailType: &s.cfg.DetailType, 72 | Time: &tym, 73 | Source: &s.cfg.Source, 74 | EventBusName: &s.cfg.EventBusName, 75 | } 76 | log.Info().Str("InputEvent", inputRequest.String()).Msg("Request") 77 | 78 | req, _ := s.svc.PutEventsRequest(&eventbridge.PutEventsInput{Entries: []*eventbridge.PutEventsRequestEntry{&inputRequest}}) 79 | // TODO: Retry failed events 80 | err := req.Send() 81 | if err != nil { 82 | log.Error().Err(err).Msg("EventBridge Error") 83 | return err 84 | } 85 | return nil 86 | } 87 | 88 | func (s *EventBridgeSink) Close() { 89 | } 90 | -------------------------------------------------------------------------------- /pkg/setup/setup_test.go: -------------------------------------------------------------------------------- 1 | package setup 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func Test_ParseConfigFromBytes_ExampleConfigIsCorrect(t *testing.T) { 11 | configBytes, err := os.ReadFile("../../config.example.yaml") 12 | if err != nil { 13 | assert.NoError(t, err, "cannot read config file: "+err.Error()) 14 | return 15 | } 16 | 17 | config, err := ParseConfigFromBytes(configBytes) 18 | 19 | assert.NoError(t, err) 20 | assert.NotEmpty(t, config.LogLevel) 21 | assert.NotEmpty(t, config.LogFormat) 22 | assert.NotEmpty(t, config.Route) 23 | assert.NotEmpty(t, config.Route.Routes) 24 | assert.Equal(t, 4, len(config.Route.Routes)) 25 | assert.NotEmpty(t, config.Receivers) 26 | assert.Equal(t, 10, len(config.Receivers)) 27 | } 28 | 29 | func Test_ParseConfigFromBytes_NoErrors(t *testing.T) { 30 | configBytes := []byte(` 31 | logLevel: info 32 | logFormat: json 33 | `) 34 | 35 | config, err := ParseConfigFromBytes(configBytes) 36 | 37 | assert.NoError(t, err) 38 | assert.Equal(t, "info", config.LogLevel) 39 | assert.Equal(t, "json", config.LogFormat) 40 | } 41 | 42 | func Test_ParseConfigFromBytes_ErrorWhenCurlyBracesNotEscaped(t *testing.T) { 43 | configBytes := []byte(` 44 | logLevel: {{info}} 45 | logFormat: json 46 | `) 47 | 48 | config, err := ParseConfigFromBytes(configBytes) 49 | 50 | expectedErrorLine := "> 2 | logLevel: {{info}}" 51 | expectedErrorSuggestion := "Need to wrap values with special characters in quotes" 52 | assert.NotNil(t, err) 53 | assert.Contains(t, err.Error(), expectedErrorLine) 54 | assert.Contains(t, err.Error(), expectedErrorSuggestion) 55 | assert.Equal(t, "", config.LogLevel) 56 | assert.Equal(t, "", config.LogFormat) 57 | } 58 | 59 | func Test_ParseConfigFromBytes_OkWhenCurlyBracesEscaped(t *testing.T) { 60 | configBytes := []byte(` 61 | logLevel: "{{info}}" 62 | logFormat: json 63 | `) 64 | 65 | config, err := ParseConfigFromBytes(configBytes) 66 | 67 | assert.Nil(t, err) 68 | assert.Equal(t, "{{info}}", config.LogLevel) 69 | assert.Equal(t, "json", config.LogFormat) 70 | } 71 | 72 | func Test_ParseConfigFromBytes_ErrorErrorNotWithCurlyBraces(t *testing.T) { 73 | configBytes := []byte(` 74 | logLevelNotYAMLErrorError 75 | logFormat: json 76 | `) 77 | 78 | config, err := ParseConfigFromBytes(configBytes) 79 | 80 | expectedErrorLine := "> 2 | logLevelNotYAMLErrorError" 81 | expectedErrorSuggestion := "Need to wrap values with special characters in quotes" 82 | assert.NotNil(t, err) 83 | assert.Contains(t, err.Error(), expectedErrorLine) 84 | assert.NotContains(t, err.Error(), expectedErrorSuggestion) 85 | assert.Equal(t, "", config.LogLevel) 86 | assert.Equal(t, "", config.LogFormat) 87 | } 88 | -------------------------------------------------------------------------------- /pkg/sinks/tmpl.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "text/template" 7 | 8 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 9 | "github.com/Masterminds/sprig/v3" 10 | ) 11 | 12 | func GetString(event *kube.EnhancedEvent, text string) (string, error) { 13 | tmpl, err := template.New("template").Funcs(sprig.TxtFuncMap()).Parse(text) 14 | if err != nil { 15 | return "", err 16 | } 17 | 18 | buf := new(bytes.Buffer) 19 | // TODO: Should we send event directly or more events? 20 | err = tmpl.Execute(buf, event) 21 | if err != nil { 22 | return "", err 23 | } 24 | 25 | return buf.String(), nil 26 | } 27 | 28 | func convertLayoutTemplate(layout map[string]interface{}, ev *kube.EnhancedEvent) (map[string]interface{}, error) { 29 | result := make(map[string]interface{}) 30 | 31 | for key, value := range layout { 32 | m, err := convertTemplate(value, ev) 33 | if err != nil { 34 | return nil, err 35 | } 36 | result[key] = m 37 | } 38 | return result, nil 39 | } 40 | 41 | func convertTemplate(value interface{}, ev *kube.EnhancedEvent) (interface{}, error) { 42 | switch v := value.(type) { 43 | case string: 44 | rendered, err := GetString(ev, v) 45 | if err != nil { 46 | return nil, err 47 | } 48 | 49 | return rendered, nil 50 | case map[interface{}]interface{}: 51 | strKeysMap := make(map[string]interface{}) 52 | for k, v := range v { 53 | res, err := convertTemplate(v, ev) 54 | if err != nil { 55 | return nil, err 56 | } 57 | // TODO: It's a bit dangerous 58 | strKeysMap[k.(string)] = res 59 | } 60 | return strKeysMap, nil 61 | case map[string]interface{}: 62 | strKeysMap := make(map[string]interface{}) 63 | for k, v := range v { 64 | res, err := convertTemplate(v, ev) 65 | if err != nil { 66 | return nil, err 67 | } 68 | strKeysMap[k] = res 69 | } 70 | return strKeysMap, nil 71 | case []interface{}: 72 | listConf := make([]interface{}, len(v)) 73 | for i := range v { 74 | t, err := convertTemplate(v[i], ev) 75 | if err != nil { 76 | return nil, err 77 | } 78 | listConf[i] = t 79 | } 80 | return listConf, nil 81 | } 82 | return nil, nil 83 | } 84 | 85 | func serializeEventWithLayout(layout map[string]interface{}, ev *kube.EnhancedEvent) ([]byte, error) { 86 | var toSend []byte 87 | if layout != nil { 88 | res, err := convertLayoutTemplate(layout, ev) 89 | if err != nil { 90 | return nil, err 91 | } 92 | 93 | toSend, err = json.Marshal(res) 94 | if err != nil { 95 | return nil, err 96 | } 97 | } else { 98 | toSend = ev.ToJSON() 99 | } 100 | return toSend, nil 101 | } 102 | -------------------------------------------------------------------------------- /pkg/sinks/opsgenie.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | import ( 4 | "context" 5 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 6 | "github.com/opsgenie/opsgenie-go-sdk-v2/alert" 7 | "github.com/opsgenie/opsgenie-go-sdk-v2/client" 8 | ) 9 | 10 | type OpsgenieConfig struct { 11 | ApiKey string `yaml:"apiKey"` 12 | URL client.ApiUrl `yaml:"URL"` 13 | Priority string `yaml:"priority"` 14 | Message string `yaml:"message"` 15 | Alias string `yaml:"alias"` 16 | Description string `yaml:"description"` 17 | Tags []string `yaml:"tags"` 18 | Details map[string]string `yaml:"details"` 19 | } 20 | 21 | type OpsgenieSink struct { 22 | cfg *OpsgenieConfig 23 | alertClient *alert.Client 24 | } 25 | 26 | func NewOpsgenieSink(config *OpsgenieConfig) (Sink, error) { 27 | if config.URL == "" { 28 | config.URL = client.API_URL 29 | } 30 | 31 | if config.Priority == "" { 32 | config.Priority = "P3" 33 | } 34 | 35 | alertClient, err := alert.NewClient(&client.Config{ 36 | ApiKey: config.ApiKey, 37 | OpsGenieAPIURL: config.URL, 38 | }) 39 | 40 | if err != nil { 41 | return nil, err 42 | } 43 | 44 | return &OpsgenieSink{ 45 | cfg: config, 46 | alertClient: alertClient, 47 | }, nil 48 | } 49 | 50 | func (o *OpsgenieSink) Send(ctx context.Context, ev *kube.EnhancedEvent) error { 51 | request := alert.CreateAlertRequest{ 52 | Priority: alert.Priority(o.cfg.Priority), 53 | } 54 | 55 | msg, err := GetString(ev, o.cfg.Message) 56 | if err != nil { 57 | return err 58 | } 59 | request.Message = msg 60 | 61 | // Alias is optional although highly recommended to work 62 | if o.cfg.Alias != "" { 63 | alias, err := GetString(ev, o.cfg.Alias) 64 | if err != nil { 65 | return err 66 | } 67 | request.Alias = alias 68 | } 69 | 70 | description, err := GetString(ev, o.cfg.Description) 71 | if err != nil { 72 | return err 73 | } 74 | request.Description = description 75 | 76 | if o.cfg.Tags != nil { 77 | tags := make([]string, 0) 78 | for _, v := range o.cfg.Tags { 79 | tag, err := GetString(ev, v) 80 | if err != nil { 81 | return err 82 | } 83 | tags = append(tags, tag) 84 | } 85 | request.Tags = tags 86 | } 87 | 88 | if o.cfg.Details != nil { 89 | details := make(map[string]string) 90 | for k, v := range o.cfg.Details { 91 | detail, err := GetString(ev, v) 92 | if err != nil { 93 | return err 94 | } 95 | details[k] = detail 96 | } 97 | request.Details = details 98 | } 99 | 100 | _, err = o.alertClient.Create(ctx, &request) 101 | return err 102 | } 103 | 104 | func (o *OpsgenieSink) Close() { 105 | // No-op 106 | } 107 | -------------------------------------------------------------------------------- /pkg/exporter/rule.go: -------------------------------------------------------------------------------- 1 | package exporter 2 | 3 | import ( 4 | "regexp" 5 | 6 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 7 | ) 8 | 9 | // matchString is a method to clean the code. Error handling is omitted here because these 10 | // rules are validated before use. According to regexp.MatchString, the only way it fails its 11 | // that the pattern does not compile. 12 | func matchString(pattern, s string) bool { 13 | matched, _ := regexp.MatchString(pattern, s) 14 | return matched 15 | } 16 | 17 | // Rule is for matching an event 18 | type Rule struct { 19 | Labels map[string]string 20 | Annotations map[string]string 21 | Message string 22 | APIVersion string `yaml:"apiVersion"` 23 | Kind string 24 | Namespace string 25 | Reason string 26 | Type string 27 | MinCount int32 `yaml:"minCount"` 28 | Component string 29 | Host string 30 | Receiver string 31 | } 32 | 33 | // MatchesEvent compares the rule to an event and returns a boolean value to indicate 34 | // whether the event is compatible with the rule. All fields are compared as regular expressions 35 | // so the user must keep that in mind while writing rules. 36 | func (r *Rule) MatchesEvent(ev *kube.EnhancedEvent) bool { 37 | // These rules are just basic comparison rules, if one of them fails, it means the event does not match the rule 38 | rules := [][2]string{ 39 | {r.Message, ev.Message}, 40 | {r.APIVersion, ev.InvolvedObject.APIVersion}, 41 | {r.Kind, ev.InvolvedObject.Kind}, 42 | {r.Namespace, ev.Namespace}, 43 | {r.Reason, ev.Reason}, 44 | {r.Type, ev.Type}, 45 | {r.Component, ev.Source.Component}, 46 | {r.Host, ev.Source.Host}, 47 | } 48 | 49 | for _, v := range rules { 50 | rule := v[0] 51 | value := v[1] 52 | if rule != "" { 53 | matches := matchString(rule, value) 54 | if !matches { 55 | return false 56 | } 57 | } 58 | } 59 | 60 | // Labels are also mutually exclusive, they all need to be present 61 | if r.Labels != nil && len(r.Labels) > 0 { 62 | for k, v := range r.Labels { 63 | if val, ok := ev.InvolvedObject.Labels[k]; !ok { 64 | return false 65 | } else { 66 | matches := matchString(v, val) 67 | if !matches { 68 | return false 69 | } 70 | } 71 | } 72 | } 73 | 74 | // Annotations are also mutually exclusive, they all need to be present 75 | if r.Annotations != nil && len(r.Annotations) > 0 { 76 | for k, v := range r.Annotations { 77 | if val, ok := ev.InvolvedObject.Annotations[k]; !ok { 78 | return false 79 | } else { 80 | matches := matchString(v, val) 81 | if !matches { 82 | return false 83 | } 84 | } 85 | } 86 | } 87 | 88 | // If minCount is not given via a config, it's already 0 and the count is already 1 and this passes. 89 | if ev.Count < r.MinCount { 90 | return false 91 | } 92 | 93 | // If it failed every step, it must match because our matchers are limiting 94 | return true 95 | } 96 | -------------------------------------------------------------------------------- /pkg/sinks/slack.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | import ( 4 | "context" 5 | "sort" 6 | 7 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 8 | "github.com/rs/zerolog/log" 9 | "github.com/slack-go/slack" 10 | ) 11 | 12 | type SlackConfig struct { 13 | Token string `yaml:"token"` 14 | Channel string `yaml:"channel"` 15 | Message string `yaml:"message"` 16 | Color string `yaml:"color"` 17 | Footer string `yaml:"footer"` 18 | Title string `yaml:"title"` 19 | AuthorName string `yaml:"author_name"` 20 | Fields map[string]string `yaml:"fields"` 21 | } 22 | 23 | type SlackSink struct { 24 | cfg *SlackConfig 25 | client *slack.Client 26 | } 27 | 28 | func NewSlackSink(cfg *SlackConfig) (Sink, error) { 29 | return &SlackSink{ 30 | cfg: cfg, 31 | client: slack.New(cfg.Token), 32 | }, nil 33 | } 34 | 35 | func (s *SlackSink) Send(ctx context.Context, ev *kube.EnhancedEvent) error { 36 | channel, err := GetString(ev, s.cfg.Channel) 37 | if err != nil { 38 | return err 39 | } 40 | 41 | message, err := GetString(ev, s.cfg.Message) 42 | if err != nil { 43 | return err 44 | } 45 | 46 | options := []slack.MsgOption{slack.MsgOptionText(message, true)} 47 | if s.cfg.Fields != nil { 48 | fields := make([]slack.AttachmentField, 0) 49 | for k, v := range s.cfg.Fields { 50 | fieldText, err := GetString(ev, v) 51 | if err != nil { 52 | return err 53 | } 54 | 55 | fields = append(fields, slack.AttachmentField{ 56 | Title: k, 57 | Value: fieldText, 58 | Short: false, 59 | }) 60 | } 61 | 62 | sort.SliceStable(fields, func(i, j int) bool { 63 | return fields[i].Title < fields[j].Title 64 | }) 65 | 66 | // make slack attachment 67 | slackAttachment := slack.Attachment{} 68 | slackAttachment.Fields = fields 69 | if s.cfg.AuthorName != "" { 70 | slackAttachment.AuthorName, err = GetString(ev, s.cfg.AuthorName) 71 | if err != nil { 72 | return err 73 | } 74 | } 75 | if s.cfg.Color != "" { 76 | slackAttachment.Color, err = GetString(ev, s.cfg.Color) 77 | if err != nil { 78 | return err 79 | } 80 | } 81 | if s.cfg.Title != "" { 82 | slackAttachment.Title, err = GetString(ev, s.cfg.Title) 83 | if err != nil { 84 | return err 85 | } 86 | } 87 | if s.cfg.Footer != "" { 88 | slackAttachment.Footer, err = GetString(ev, s.cfg.Footer) 89 | if err != nil { 90 | return err 91 | } 92 | } 93 | 94 | options = append(options, slack.MsgOptionAttachments(slackAttachment)) 95 | } 96 | 97 | _ch, _ts, _text, err := s.client.SendMessageContext(ctx, channel, options...) 98 | log.Debug().Str("ch", _ch).Str("ts", _ts).Str("text", _text).Err(err).Msg("Slack Response") 99 | return err 100 | } 101 | 102 | func (s *SlackSink) Close() { 103 | // No-op 104 | } 105 | -------------------------------------------------------------------------------- /pkg/sinks/loki.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "errors" 8 | "fmt" 9 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 10 | "io/ioutil" 11 | "net/http" 12 | "strconv" 13 | "time" 14 | "github.com/rs/zerolog/log" 15 | ) 16 | 17 | type promtailStream struct { 18 | Stream map[string]string `json:"stream"` 19 | Values [][]string `json:"values"` 20 | } 21 | 22 | type LokiMsg struct { 23 | Streams []promtailStream `json:"streams"` 24 | } 25 | 26 | type LokiConfig struct { 27 | Layout map[string]interface{} `yaml:"layout"` 28 | StreamLabels map[string]string `yaml:"streamLabels"` 29 | TLS TLS `yaml:"tls"` 30 | URL string `yaml:"url"` 31 | Headers map[string]string `yaml:"headers"` 32 | } 33 | 34 | type Loki struct { 35 | cfg *LokiConfig 36 | transport *http.Transport 37 | } 38 | 39 | func NewLoki(cfg *LokiConfig) (Sink, error) { 40 | tlsClientConfig, err := setupTLS(&cfg.TLS) 41 | if err != nil { 42 | return nil, fmt.Errorf("failed to setup TLS: %w", err) 43 | } 44 | return &Loki{cfg: cfg, transport: &http.Transport{ 45 | Proxy: http.ProxyFromEnvironment, 46 | TLSClientConfig: tlsClientConfig, 47 | }}, nil 48 | } 49 | 50 | func generateTimestamp() string { 51 | return strconv.FormatInt(time.Now().Unix(), 10) + "000000000" 52 | } 53 | 54 | func (l *Loki) Send(ctx context.Context, ev *kube.EnhancedEvent) error { 55 | eventBody, err := serializeEventWithLayout(l.cfg.Layout, ev) 56 | if err != nil { 57 | return err 58 | } 59 | timestamp := generateTimestamp() 60 | a := LokiMsg{ 61 | Streams: []promtailStream{{ 62 | Stream: l.cfg.StreamLabels, 63 | Values: [][]string{{timestamp, string(eventBody)}}, 64 | }}, 65 | } 66 | reqBody, err := json.Marshal(a) 67 | if err != nil { 68 | return err 69 | } 70 | req, err := http.NewRequest(http.MethodPost, l.cfg.URL, bytes.NewBuffer(reqBody)) 71 | if err != nil { 72 | return err 73 | } 74 | 75 | req.Header.Set("Content-Type", "application/json") 76 | 77 | for k, v := range l.cfg.Headers { 78 | realValue, err := GetString(ev, v) 79 | if err != nil { 80 | log.Debug().Err(err).Msgf("parse template failed: %s", v) 81 | req.Header.Add(k, v) 82 | } else { 83 | log.Debug().Msgf("request header: {%s: %s}", k, realValue) 84 | req.Header.Add(k, realValue) 85 | } 86 | } 87 | 88 | client := http.DefaultClient 89 | resp, err := client.Do(req) 90 | if err != nil { 91 | return err 92 | } 93 | 94 | defer resp.Body.Close() 95 | 96 | body, err := ioutil.ReadAll(resp.Body) 97 | if err != nil { 98 | return err 99 | } 100 | 101 | if !(resp.StatusCode >= 200 && resp.StatusCode < 300) { 102 | return errors.New("not successfull (2xx) response: " + string(body)) 103 | } 104 | 105 | return nil 106 | } 107 | 108 | func (l *Loki) Close() { 109 | l.transport.CloseIdleConnections() 110 | } 111 | -------------------------------------------------------------------------------- /pkg/kube/event_test.go: -------------------------------------------------------------------------------- 1 | package kube 2 | 3 | import ( 4 | "github.com/stretchr/testify/assert" 5 | corev1 "k8s.io/api/core/v1" 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | "testing" 8 | ) 9 | 10 | func TestEnhancedEvent_DeDot(t *testing.T) { 11 | tests := []struct { 12 | name string 13 | in EnhancedEvent 14 | want EnhancedEvent 15 | }{ 16 | { 17 | name: "nothing", 18 | in: EnhancedEvent{ 19 | Event: corev1.Event{ 20 | Message: "foovar", 21 | ObjectMeta: metav1.ObjectMeta{ 22 | Annotations: map[string]string{"test": "bar"}, 23 | }, 24 | }, 25 | InvolvedObject: EnhancedObjectReference{ 26 | Labels: map[string]string{"faz": "var"}, 27 | }, 28 | }, 29 | want: EnhancedEvent{ 30 | Event: corev1.Event{ 31 | Message: "foovar", 32 | ObjectMeta: metav1.ObjectMeta{ 33 | Annotations: map[string]string{"test": "bar"}, 34 | }, 35 | }, 36 | InvolvedObject: EnhancedObjectReference{ 37 | Labels: map[string]string{"faz": "var"}, 38 | }, 39 | }, 40 | }, 41 | { 42 | name: "dedot", 43 | in: EnhancedEvent{ 44 | Event: corev1.Event{ 45 | Message: "foovar", 46 | ObjectMeta: metav1.ObjectMeta{ 47 | Annotations: map[string]string{"test.io.": "bar"}, 48 | }, 49 | }, 50 | InvolvedObject: EnhancedObjectReference{ 51 | Labels: map[string]string{"faz.net": "var"}, 52 | }, 53 | }, 54 | want: EnhancedEvent{ 55 | Event: corev1.Event{ 56 | Message: "foovar", 57 | ObjectMeta: metav1.ObjectMeta{ 58 | Annotations: map[string]string{"test_io_": "bar"}, 59 | }, 60 | }, 61 | InvolvedObject: EnhancedObjectReference{ 62 | Labels: map[string]string{"faz_net": "var"}, 63 | }, 64 | }, 65 | }, 66 | } 67 | for _, tt := range tests { 68 | t.Run(tt.name, func(t *testing.T) { 69 | got := tt.in.DeDot() 70 | assert.EqualValues(t, tt.want, got) 71 | }) 72 | } 73 | } 74 | 75 | func TestEnhancedEvent_DeDot_MustNotAlternateOriginal(t *testing.T) { 76 | expected := EnhancedEvent{ 77 | Event: corev1.Event{ 78 | Message: "foovar", 79 | ObjectMeta: metav1.ObjectMeta{ 80 | Annotations: map[string]string{"test.io": "bar"}, 81 | Labels: map[string]string{"faz.net": "var"}, 82 | }, 83 | }, 84 | InvolvedObject: EnhancedObjectReference{ 85 | Annotations: map[string]string{"test.io": "bar"}, 86 | Labels: map[string]string{"faz.net": "var"}, 87 | }, 88 | } 89 | in := EnhancedEvent{ 90 | Event: corev1.Event{ 91 | Message: "foovar", 92 | ObjectMeta: metav1.ObjectMeta{ 93 | Annotations: map[string]string{"test.io": "bar"}, 94 | Labels: map[string]string{"faz.net": "var"}, 95 | }, 96 | }, 97 | InvolvedObject: EnhancedObjectReference{ 98 | Annotations: map[string]string{"test.io": "bar"}, 99 | Labels: map[string]string{"faz.net": "var"}, 100 | }, 101 | } 102 | in.DeDot() 103 | assert.EqualValues(t, expected, in) 104 | } 105 | -------------------------------------------------------------------------------- /pkg/kube/objects.go: -------------------------------------------------------------------------------- 1 | package kube 2 | 3 | import ( 4 | "context" 5 | "strings" 6 | 7 | lru "github.com/hashicorp/golang-lru" 8 | "github.com/resmoio/kubernetes-event-exporter/pkg/metrics" 9 | v1 "k8s.io/api/core/v1" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | "k8s.io/apimachinery/pkg/runtime/schema" 12 | "k8s.io/client-go/dynamic" 13 | "k8s.io/client-go/kubernetes" 14 | "k8s.io/client-go/restmapper" 15 | ) 16 | 17 | type ObjectMetadataProvider interface { 18 | GetObjectMetadata(reference *v1.ObjectReference, clientset *kubernetes.Clientset, dynClient dynamic.Interface, metricsStore *metrics.Store) (ObjectMetadata, error) 19 | } 20 | 21 | type ObjectMetadataCache struct { 22 | cache *lru.ARCCache 23 | } 24 | 25 | var _ ObjectMetadataProvider = &ObjectMetadataCache{} 26 | 27 | type ObjectMetadata struct { 28 | Annotations map[string]string 29 | Labels map[string]string 30 | OwnerReferences []metav1.OwnerReference 31 | Deleted bool 32 | } 33 | 34 | func NewObjectMetadataProvider(size int) ObjectMetadataProvider { 35 | cache, err := lru.NewARC(size) 36 | if err != nil { 37 | panic("cannot init cache: " + err.Error()) 38 | } 39 | 40 | var o ObjectMetadataProvider = &ObjectMetadataCache{ 41 | cache: cache, 42 | } 43 | 44 | return o 45 | } 46 | 47 | func (o *ObjectMetadataCache) GetObjectMetadata(reference *v1.ObjectReference, clientset *kubernetes.Clientset, dynClient dynamic.Interface, metricsStore *metrics.Store) (ObjectMetadata, error) { 48 | // ResourceVersion changes when the object is updated. 49 | // We use "UID/ResourceVersion" as cache key so that if the object is updated we get the new metadata. 50 | cacheKey := strings.Join([]string{string(reference.UID), reference.ResourceVersion}, "/") 51 | if val, ok := o.cache.Get(cacheKey); ok { 52 | metricsStore.KubeApiReadCacheHits.Inc() 53 | return val.(ObjectMetadata), nil 54 | } 55 | 56 | var group, version string 57 | s := strings.Split(reference.APIVersion, "/") 58 | if len(s) == 1 { 59 | group = "" 60 | version = s[0] 61 | } else { 62 | group = s[0] 63 | version = s[1] 64 | } 65 | 66 | gk := schema.GroupKind{Group: group, Kind: reference.Kind} 67 | 68 | groupResources, err := restmapper.GetAPIGroupResources(clientset.Discovery()) 69 | if err != nil { 70 | return ObjectMetadata{}, err 71 | } 72 | 73 | rm := restmapper.NewDiscoveryRESTMapper(groupResources) 74 | mapping, err := rm.RESTMapping(gk, version) 75 | if err != nil { 76 | return ObjectMetadata{}, err 77 | } 78 | 79 | item, err := dynClient. 80 | Resource(mapping.Resource). 81 | Namespace(reference.Namespace). 82 | Get(context.Background(), reference.Name, metav1.GetOptions{}) 83 | 84 | metricsStore.KubeApiReadRequests.Inc() 85 | 86 | if err != nil { 87 | return ObjectMetadata{}, err 88 | } 89 | 90 | objectMetadata := ObjectMetadata{ 91 | OwnerReferences: item.GetOwnerReferences(), 92 | Labels: item.GetLabels(), 93 | Annotations: item.GetAnnotations(), 94 | } 95 | 96 | if item.GetDeletionTimestamp() != nil { 97 | objectMetadata.Deleted = true 98 | } 99 | 100 | o.cache.Add(cacheKey, objectMetadata) 101 | return objectMetadata, nil 102 | } 103 | -------------------------------------------------------------------------------- /pkg/sinks/receiver.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | import "errors" 4 | 5 | // Receiver allows receiving 6 | type ReceiverConfig struct { 7 | Name string `yaml:"name"` 8 | InMemory *InMemoryConfig `yaml:"inMemory"` 9 | Webhook *WebhookConfig `yaml:"webhook"` 10 | File *FileConfig `yaml:"file"` 11 | Syslog *SyslogConfig `yaml:"syslog"` 12 | Stdout *StdoutConfig `yaml:"stdout"` 13 | Elasticsearch *ElasticsearchConfig `yaml:"elasticsearch"` 14 | Kinesis *KinesisConfig `yaml:"kinesis"` 15 | Firehose *FirehoseConfig `yaml:"firehose"` 16 | OpenSearch *OpenSearchConfig `yaml:"opensearch"` 17 | Opsgenie *OpsgenieConfig `yaml:"opsgenie"` 18 | Loki *LokiConfig `yaml:"loki"` 19 | SQS *SQSConfig `yaml:"sqs"` 20 | SNS *SNSConfig `yaml:"sns"` 21 | Slack *SlackConfig `yaml:"slack"` 22 | Kafka *KafkaConfig `yaml:"kafka"` 23 | Pubsub *PubsubConfig `yaml:"pubsub"` 24 | Opscenter *OpsCenterConfig `yaml:"opscenter"` 25 | Teams *TeamsConfig `yaml:"teams"` 26 | BigQuery *BigQueryConfig `yaml:"bigquery"` 27 | EventBridge *EventBridgeConfig `yaml:"eventbridge"` 28 | Pipe *PipeConfig `yaml:"pipe"` 29 | } 30 | 31 | func (r *ReceiverConfig) Validate() error { 32 | return nil 33 | } 34 | 35 | func (r *ReceiverConfig) GetSink() (Sink, error) { 36 | if r.InMemory != nil { 37 | // This reference is used for test purposes to count the events in the sink. 38 | // It should not be used in production since it will only cause memory leak and (b)OOM 39 | sink := &InMemory{Config: r.InMemory} 40 | r.InMemory.Ref = sink 41 | return sink, nil 42 | } 43 | 44 | // Sorry for this code, but its Go 45 | if r.Pipe != nil { 46 | return NewPipeSink(r.Pipe) 47 | } 48 | 49 | if r.Webhook != nil { 50 | return NewWebhook(r.Webhook) 51 | } 52 | 53 | if r.File != nil { 54 | return NewFileSink(r.File) 55 | } 56 | 57 | if r.Syslog != nil { 58 | return NewSyslogSink(r.Syslog) 59 | } 60 | 61 | if r.Stdout != nil { 62 | return NewStdoutSink(r.Stdout) 63 | } 64 | 65 | if r.Elasticsearch != nil { 66 | return NewElasticsearch(r.Elasticsearch) 67 | } 68 | 69 | if r.Kinesis != nil { 70 | return NewKinesisSink(r.Kinesis) 71 | } 72 | 73 | if r.Firehose != nil { 74 | return NewFirehoseSink(r.Firehose) 75 | } 76 | 77 | if r.OpenSearch != nil { 78 | return NewOpenSearch(r.OpenSearch) 79 | } 80 | 81 | if r.Opsgenie != nil { 82 | return NewOpsgenieSink(r.Opsgenie) 83 | } 84 | 85 | if r.SQS != nil { 86 | return NewSQSSink(r.SQS) 87 | } 88 | 89 | if r.SNS != nil { 90 | return NewSNSSink(r.SNS) 91 | } 92 | 93 | if r.Slack != nil { 94 | return NewSlackSink(r.Slack) 95 | } 96 | 97 | if r.Kafka != nil { 98 | return NewKafkaSink(r.Kafka) 99 | } 100 | 101 | if r.Pubsub != nil { 102 | return NewPubsubSink(r.Pubsub) 103 | } 104 | 105 | if r.Opscenter != nil { 106 | return NewOpsCenterSink(r.Opscenter) 107 | } 108 | 109 | if r.Teams != nil { 110 | return NewTeamsSink(r.Teams) 111 | } 112 | 113 | if r.BigQuery != nil { 114 | return NewBigQuerySink(r.BigQuery) 115 | } 116 | 117 | if r.EventBridge != nil { 118 | return NewEventBridgeSink(r.EventBridge) 119 | } 120 | 121 | if r.Loki != nil { 122 | return NewLoki(r.Loki) 123 | } 124 | 125 | return nil, errors.New("unknown sink") 126 | } 127 | -------------------------------------------------------------------------------- /pkg/batch/writer.go: -------------------------------------------------------------------------------- 1 | package batch 2 | 3 | import ( 4 | "context" 5 | "time" 6 | ) 7 | 8 | 9 | // Writer allows to buffer some items and call the Handler function either when the buffer is full or the timeout is 10 | // reached. There will also be support for concurrency for high volume. The handler function is supposed to return an 11 | // array of booleans to indicate whether the transfer was successful or not. It can be replaced with status codes in 12 | // the future to differentiate I/O errors, rate limiting, authorization issues. 13 | type Writer struct { 14 | cfg WriterConfig 15 | Handler Callback 16 | buffer []bufferItem 17 | len int 18 | done chan bool 19 | stopDone chan bool 20 | items chan interface{} 21 | } 22 | 23 | type bufferItem struct { 24 | v interface{} 25 | attempt int 26 | } 27 | 28 | type Callback func(ctx context.Context, items []interface{}) []bool 29 | 30 | type WriterConfig struct { 31 | BatchSize int 32 | MaxRetries int 33 | Interval time.Duration 34 | Timeout time.Duration 35 | } 36 | 37 | func NewWriter(cfg WriterConfig, cb Callback) *Writer { 38 | return &Writer{ 39 | cfg: cfg, 40 | Handler: cb, 41 | buffer: make([]bufferItem, cfg.BatchSize), 42 | } 43 | } 44 | 45 | // Indicates the start to accept the 46 | func (w *Writer) Start() { 47 | w.done = make(chan bool) 48 | w.items = make(chan interface{}) 49 | w.stopDone = make(chan bool) 50 | ticker := time.NewTicker(w.cfg.Interval) 51 | 52 | go func() { 53 | shouldGoOn := true 54 | for shouldGoOn { 55 | select { 56 | case item := <-w.items: 57 | if w.len >= w.cfg.BatchSize { 58 | w.processBuffer(context.Background()) 59 | w.len = 0 60 | } 61 | 62 | w.buffer[w.len] = bufferItem{v: item, attempt: 0} 63 | w.len++ 64 | case <-w.done: 65 | w.processBuffer(context.Background()) 66 | shouldGoOn = false 67 | w.stopDone <- true 68 | ticker.Stop() 69 | case <-ticker.C: 70 | w.processBuffer(context.Background()) 71 | } 72 | } 73 | }() 74 | } 75 | 76 | func (w *Writer) processBuffer(ctx context.Context) { 77 | if w.len == 0 { 78 | return 79 | } 80 | 81 | // Need to copy the underlying item to another slice 82 | slice := make([]interface{}, w.len) 83 | for i := 0; i < w.len; i++ { 84 | slice[i] = w.buffer[i].v 85 | } 86 | 87 | // Call the actual method 88 | responses := w.Handler(ctx, slice) 89 | 90 | // Overwriting buffer here. Since the newItemsCount will always be equal to or smaller than buffer, it's safe to 91 | // overwrite the existing items whilst traversing the items. 92 | var newItemsCount int 93 | for idx, success := range responses { 94 | if !success { 95 | item := w.buffer[idx] 96 | if item.attempt >= w.cfg.MaxRetries { 97 | // It's dropped, sorry you asked for it 98 | continue 99 | } 100 | 101 | w.buffer[newItemsCount] = bufferItem{ 102 | v: item.v, 103 | attempt: item.attempt + 1, 104 | } 105 | 106 | newItemsCount++ 107 | } 108 | } 109 | 110 | w.len = newItemsCount 111 | // TODO(makin) an edge case, if all items fail, and the buffer is full, new item cannot be added to buffer. 112 | } 113 | 114 | // Used to signal writer to stop processing items and exit. 115 | func (w *Writer) Stop() { 116 | w.done <- true 117 | <-w.stopDone 118 | } 119 | 120 | // Submit pushes the items to the income buffer and they are placed onto the actual buffer from there. 121 | func (w *Writer) Submit(items ...interface{}) { 122 | for _, item := range items { 123 | w.items <- item 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /pkg/kube/leaderelection.go: -------------------------------------------------------------------------------- 1 | package kube 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "time" 8 | 9 | "k8s.io/apimachinery/pkg/util/uuid" 10 | "k8s.io/client-go/kubernetes" 11 | "k8s.io/client-go/rest" 12 | "k8s.io/client-go/tools/leaderelection" 13 | "k8s.io/client-go/tools/leaderelection/resourcelock" 14 | ) 15 | 16 | // LeaderElectionConfig is used to enable leader election 17 | type LeaderElectionConfig struct { 18 | Enabled bool `yaml:"enabled"` 19 | LeaderElectionID string `yaml:"leaderElectionID"` 20 | } 21 | 22 | const ( 23 | inClusterNamespacePath = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" 24 | defaultLeaderElectionID = "kubernetes-event-exporter" 25 | defaultNamespace = "default" // this is used for local development 26 | defaultLeaseDuration = 15 * time.Second 27 | defaultRenewDeadline = 10 * time.Second 28 | defaultRetryPeriod = 2 * time.Second 29 | ) 30 | 31 | func GetLeaseDuration() time.Duration { 32 | return defaultLeaseDuration 33 | } 34 | 35 | // NewResourceLock creates a new config map resource lock for use in a leader 36 | // election loop 37 | func newResourceLock(config *rest.Config, leaderElectionID string) (resourcelock.Interface, error) { 38 | if leaderElectionID == "" { 39 | leaderElectionID = defaultLeaderElectionID 40 | } 41 | 42 | leaderElectionNamespace, err := getInClusterNamespace() 43 | if err != nil { 44 | leaderElectionNamespace = defaultNamespace 45 | } 46 | 47 | // Leader id, needs to be unique 48 | id, err := os.Hostname() 49 | if err != nil { 50 | return nil, err 51 | } 52 | id = id + "_" + string(uuid.NewUUID()) 53 | 54 | // Construct client for leader election 55 | client, err := kubernetes.NewForConfig(config) 56 | if err != nil { 57 | return nil, err 58 | } 59 | 60 | return resourcelock.New(resourcelock.LeasesResourceLock, 61 | leaderElectionNamespace, 62 | leaderElectionID, 63 | client.CoreV1(), 64 | client.CoordinationV1(), 65 | resourcelock.ResourceLockConfig{ 66 | Identity: id, 67 | }) 68 | } 69 | 70 | func getInClusterNamespace() (string, error) { 71 | // Check whether the namespace file exists. 72 | // If not, we are not running in cluster so can't guess the namespace. 73 | _, err := os.Stat(inClusterNamespacePath) 74 | if os.IsNotExist(err) { 75 | return "", fmt.Errorf("not running in-cluster, please specify leaderElectionIDspace") 76 | } else if err != nil { 77 | return "", fmt.Errorf("error checking namespace file: %w", err) 78 | } 79 | 80 | // Load the namespace file and return its content 81 | namespace, err := os.ReadFile(inClusterNamespacePath) 82 | if err != nil { 83 | return "", fmt.Errorf("error reading namespace file: %w", err) 84 | } 85 | return string(namespace), nil 86 | } 87 | 88 | // NewLeaderElector return a leader elector object using client-go 89 | func NewLeaderElector(leaderElectionID string, config *rest.Config, startFunc func(context.Context), stopFunc func(), newLeaderFunc func(string)) (*leaderelection.LeaderElector, error) { 90 | resourceLock, err := newResourceLock(config, leaderElectionID) 91 | if err != nil { 92 | return &leaderelection.LeaderElector{}, err 93 | } 94 | 95 | l, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{ 96 | Lock: resourceLock, 97 | LeaseDuration: defaultLeaseDuration, 98 | RenewDeadline: defaultRenewDeadline, 99 | RetryPeriod: defaultRetryPeriod, 100 | Callbacks: leaderelection.LeaderCallbacks{ 101 | OnStartedLeading: startFunc, 102 | OnStoppedLeading: stopFunc, 103 | OnNewLeader: newLeaderFunc, 104 | }, 105 | }) 106 | return l, err 107 | } 108 | -------------------------------------------------------------------------------- /pkg/sinks/opensearch.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "fmt" 8 | "io" 9 | "net/http" 10 | "regexp" 11 | "strings" 12 | "time" 13 | 14 | opensearch "github.com/opensearch-project/opensearch-go" 15 | opensearchapi "github.com/opensearch-project/opensearch-go/opensearchapi" 16 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 17 | "github.com/rs/zerolog/log" 18 | ) 19 | 20 | type OpenSearchConfig struct { 21 | // Connection specific 22 | Hosts []string `yaml:"hosts"` 23 | Username string `yaml:"username"` 24 | Password string `yaml:"password"` 25 | // Indexing preferences 26 | UseEventID bool `yaml:"useEventID"` 27 | // DeDot all labels and annotations in the event. For both the event and the involvedObject 28 | DeDot bool `yaml:"deDot"` 29 | Index string `yaml:"index"` 30 | IndexFormat string `yaml:"indexFormat"` 31 | Type string `yaml:"type"` 32 | TLS TLS `yaml:"tls"` 33 | Layout map[string]interface{} `yaml:"layout"` 34 | } 35 | 36 | func NewOpenSearch(cfg *OpenSearchConfig) (*OpenSearch, error) { 37 | 38 | tlsClientConfig, err := setupTLS(&cfg.TLS) 39 | if err != nil { 40 | return nil, fmt.Errorf("failed to setup TLS: %w", err) 41 | } 42 | 43 | client, err := opensearch.NewClient(opensearch.Config{ 44 | Addresses: cfg.Hosts, 45 | Username: cfg.Username, 46 | Password: cfg.Password, 47 | Transport: &http.Transport{ 48 | TLSClientConfig: tlsClientConfig, 49 | }, 50 | }) 51 | if err != nil { 52 | return nil, err 53 | } 54 | 55 | return &OpenSearch{ 56 | client: client, 57 | cfg: cfg, 58 | }, nil 59 | } 60 | 61 | type OpenSearch struct { 62 | client *opensearch.Client 63 | cfg *OpenSearchConfig 64 | } 65 | 66 | var osRegex = regexp.MustCompile(`(?s){(.*)}`) 67 | 68 | func osFormatIndexName(pattern string, when time.Time) string { 69 | m := osRegex.FindAllStringSubmatchIndex(pattern, -1) 70 | current := 0 71 | var builder strings.Builder 72 | 73 | for i := 0; i < len(m); i++ { 74 | pair := m[i] 75 | 76 | builder.WriteString(pattern[current:pair[0]]) 77 | builder.WriteString(when.Format(pattern[pair[0]+1 : pair[1]-1])) 78 | current = pair[1] 79 | } 80 | 81 | builder.WriteString(pattern[current:]) 82 | 83 | return builder.String() 84 | } 85 | 86 | func (e *OpenSearch) Send(ctx context.Context, ev *kube.EnhancedEvent) error { 87 | var toSend []byte 88 | 89 | if e.cfg.DeDot { 90 | de := ev.DeDot() 91 | ev = &de 92 | } 93 | if e.cfg.Layout != nil { 94 | res, err := convertLayoutTemplate(e.cfg.Layout, ev) 95 | if err != nil { 96 | return err 97 | } 98 | 99 | toSend, err = json.Marshal(res) 100 | if err != nil { 101 | return err 102 | } 103 | } else { 104 | toSend = ev.ToJSON() 105 | } 106 | 107 | var index string 108 | if len(e.cfg.IndexFormat) > 0 { 109 | now := time.Now() 110 | index = osFormatIndexName(e.cfg.IndexFormat, now) 111 | } else { 112 | index = e.cfg.Index 113 | } 114 | 115 | req := opensearchapi.IndexRequest{ 116 | Body: bytes.NewBuffer(toSend), 117 | Index: index, 118 | } 119 | 120 | // This should not be used for clusters with ES8.0+. 121 | if len(e.cfg.Type) > 0 { 122 | req.DocumentType = e.cfg.Type 123 | } 124 | 125 | if e.cfg.UseEventID { 126 | req.DocumentID = string(ev.UID) 127 | } 128 | 129 | resp, err := req.Do(ctx, e.client) 130 | if err != nil { 131 | return err 132 | } 133 | 134 | defer resp.Body.Close() 135 | if resp.StatusCode > 399 { 136 | rb, err := io.ReadAll(resp.Body) 137 | if err != nil { 138 | return err 139 | } 140 | log.Error().Msgf("Indexing failed: %s", string(rb)) 141 | } 142 | return nil 143 | } 144 | 145 | func (e *OpenSearch) Close() { 146 | // No-op 147 | } 148 | -------------------------------------------------------------------------------- /config.example.yaml: -------------------------------------------------------------------------------- 1 | logLevel: debug 2 | logFormat: json 3 | maxEventAgeSeconds: 10 4 | kubeQPS: 60 5 | kubeBurst: 60 6 | # namespace: my-namespace-only # Omitting it defaults to all namespaces. 7 | route: 8 | # Main route 9 | routes: 10 | # This route allows dumping all events because it has no fields to match and no drop rules. 11 | - match: 12 | - receiver: "dump" 13 | # This starts another route, drops all the events in *test* namespaces and Normal events 14 | # for capturing critical events 15 | - match: 16 | - receiver: "alert" 17 | - receiver: "pipe" 18 | drop: 19 | - namespace: "*test*" 20 | - type: "Normal" 21 | minCount: 5 22 | apiVersion: "*beta*" 23 | # This a final route for user messages 24 | - match: 25 | - kind: "Pod|Deployment|ReplicaSet" 26 | labels: 27 | version: "dev" 28 | receiver: "slack" 29 | - match: 30 | - receiver: "firehose" 31 | drop: 32 | - type: "Normal" 33 | receivers: 34 | - name: "dump" 35 | elasticsearch: 36 | hosts: 37 | - "http://localhost:9200" 38 | indexFormat: "kube-events-{2006-01-02}" 39 | apiKey: ${ELASTIC_API_KEY} 40 | - name: "opensearch-dump" 41 | opensearch: 42 | hosts: 43 | - "http://localhost:9200" 44 | indexFormat: "kube-events-{2006-01-02}" 45 | - name: "alert" 46 | opsgenie: 47 | apiKey: "" 48 | priority: "P3" 49 | message: "Event {{ .Reason }} for {{ .InvolvedObject.Namespace }}/{{ .InvolvedObject.Name }} on K8s cluster" 50 | alias: "{{ .UID }}" 51 | description: "
{{ toPrettyJson . }}
" 52 | tags: 53 | - "event" 54 | - "{{ .Reason }}" 55 | - "{{ .InvolvedObject.Kind }}" 56 | - "{{ .InvolvedObject.Name }}" 57 | - name: "slack" 58 | slack: 59 | token: "" 60 | channel: "#mustafa-test" 61 | message: "Received a Kubernetes Event {{ .Message}}" 62 | fields: 63 | message: "{{ .Message }}" 64 | namespace: "{{ .Namespace }}" 65 | reason: "{{ .Reason }}" 66 | object: "{{ .Namespace }}" 67 | - name: "pipe" 68 | webhook: 69 | endpoint: "http://localhost:3000" 70 | headers: 71 | X-API-KEY: "123-456-OPSGENIE-789-ABC" 72 | User-Agent: "kube-event-exporter 1.0" 73 | streamName: "applicationMetric" 74 | layout: 75 | endpoint: "localhost2" 76 | eventType: "kube-event" 77 | createdAt: "{{ .GetTimestampMs }}" 78 | details: 79 | message: "{{ .Message }}" 80 | reason: "{{ .Reason }}" 81 | tip: "{{ .Type }}" 82 | count: "{{ .Count }}" 83 | kind: "{{ .InvolvedObject.Kind }}" 84 | name: "{{ .InvolvedObject.Name }}" 85 | namespace: "{{ .Namespace }}" 86 | component: "{{ .Source.Component }}" 87 | host: "{{ .Source.Host }}" 88 | labels: "{{ toJson .InvolvedObject.Labels}}" 89 | - name: "kafka" 90 | kafka: 91 | topic: "kube-event" 92 | brokers: 93 | - "localhost:9092" 94 | tls: 95 | enable: false 96 | certFile: "kafka-client.crt" 97 | keyFile: "kafka-client.key" 98 | caFile: "kafka-ca.crt" 99 | - name: "pubsub" 100 | pubsub: 101 | gcloud_project_id: "my-project" 102 | topic: "kube-event" 103 | create_topic: False 104 | - name: "eventbridge" 105 | eventbridge: 106 | detailType: "deployment" 107 | source: "cd" 108 | eventBusName: "default" 109 | region: "ap-southeast-1" 110 | details: 111 | message: "{{ .Message }}" 112 | namespace: "{{ .Namespace }}" 113 | reason: "{{ .Reason }}" 114 | object: "{{ .Namespace }}" 115 | - name: "syslog" 116 | syslog: 117 | network: "tcp" 118 | address: "127.0.0.1:11514" 119 | tag: "k8s.event" 120 | - name: "firehose" 121 | firehose: 122 | deliveryStreamName: "kubernetes-events" 123 | region: "us-east-1" 124 | deDot: true 125 | -------------------------------------------------------------------------------- /pkg/sinks/elasticsearch.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "fmt" 8 | "io" 9 | "net/http" 10 | "regexp" 11 | "strings" 12 | "time" 13 | 14 | "github.com/elastic/go-elasticsearch/v7" 15 | "github.com/elastic/go-elasticsearch/v7/esapi" 16 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 17 | "github.com/rs/zerolog/log" 18 | ) 19 | 20 | type ElasticsearchConfig struct { 21 | // Connection specific 22 | Hosts []string `yaml:"hosts"` 23 | Username string `yaml:"username"` 24 | Password string `yaml:"password"` 25 | CloudID string `yaml:"cloudID"` 26 | APIKey string `yaml:"apiKey"` 27 | Headers map[string]string `yaml:"headers"` // Can be used to append additional key value pairs into the request headers 28 | // Indexing preferences 29 | UseEventID bool `yaml:"useEventID"` 30 | // DeDot all labels and annotations in the event. For both the event and the involvedObject 31 | DeDot bool `yaml:"deDot"` 32 | Index string `yaml:"index"` 33 | IndexFormat string `yaml:"indexFormat"` 34 | Type string `yaml:"type"` 35 | TLS TLS `yaml:"tls"` 36 | Layout map[string]interface{} `yaml:"layout"` 37 | } 38 | 39 | func NewElasticsearch(cfg *ElasticsearchConfig) (*Elasticsearch, error) { 40 | 41 | tlsClientConfig, err := setupTLS(&cfg.TLS) 42 | if err != nil { 43 | return nil, fmt.Errorf("failed to setup TLS: %w", err) 44 | } 45 | 46 | var header = http.Header{} 47 | if len(cfg.Headers) > 0 { 48 | for k, v := range cfg.Headers { 49 | header.Add(k, v) 50 | } 51 | } 52 | 53 | client, err := elasticsearch.NewClient(elasticsearch.Config{ 54 | Addresses: cfg.Hosts, 55 | Username: cfg.Username, 56 | Password: cfg.Password, 57 | Header: header, 58 | CloudID: cfg.CloudID, 59 | APIKey: cfg.APIKey, 60 | Transport: &http.Transport{ 61 | TLSClientConfig: tlsClientConfig, 62 | }, 63 | }) 64 | if err != nil { 65 | return nil, err 66 | } 67 | 68 | return &Elasticsearch{ 69 | client: client, 70 | cfg: cfg, 71 | }, nil 72 | } 73 | 74 | type Elasticsearch struct { 75 | client *elasticsearch.Client 76 | cfg *ElasticsearchConfig 77 | } 78 | 79 | var regex = regexp.MustCompile(`(?s){(.*)}`) 80 | 81 | func formatIndexName(pattern string, when time.Time) string { 82 | m := regex.FindAllStringSubmatchIndex(pattern, -1) 83 | current := 0 84 | var builder strings.Builder 85 | 86 | for i := 0; i < len(m); i++ { 87 | pair := m[i] 88 | 89 | builder.WriteString(pattern[current:pair[0]]) 90 | builder.WriteString(when.Format(pattern[pair[0]+1 : pair[1]-1])) 91 | current = pair[1] 92 | } 93 | 94 | builder.WriteString(pattern[current:]) 95 | 96 | return builder.String() 97 | } 98 | 99 | func (e *Elasticsearch) Send(ctx context.Context, ev *kube.EnhancedEvent) error { 100 | var toSend []byte 101 | 102 | if e.cfg.DeDot { 103 | de := ev.DeDot() 104 | ev = &de 105 | } 106 | if e.cfg.Layout != nil { 107 | res, err := convertLayoutTemplate(e.cfg.Layout, ev) 108 | if err != nil { 109 | return err 110 | } 111 | 112 | toSend, err = json.Marshal(res) 113 | if err != nil { 114 | return err 115 | } 116 | } else { 117 | toSend = ev.ToJSON() 118 | } 119 | 120 | var index string 121 | if len(e.cfg.IndexFormat) > 0 { 122 | now := time.Now() 123 | index = formatIndexName(e.cfg.IndexFormat, now) 124 | } else { 125 | index = e.cfg.Index 126 | } 127 | 128 | req := esapi.IndexRequest{ 129 | Body: bytes.NewBuffer(toSend), 130 | Index: index, 131 | } 132 | 133 | // This should not be used for clusters with ES8.0+. 134 | if len(e.cfg.Type) > 0 { 135 | req.DocumentType = e.cfg.Type 136 | } 137 | 138 | if e.cfg.UseEventID { 139 | req.DocumentID = string(ev.UID) 140 | } 141 | 142 | resp, err := req.Do(ctx, e.client) 143 | if err != nil { 144 | return err 145 | } 146 | 147 | defer resp.Body.Close() 148 | if resp.StatusCode > 399 { 149 | rb, err := io.ReadAll(resp.Body) 150 | if err != nil { 151 | return err 152 | } 153 | log.Error().Msgf("Indexing failed: %s", string(rb)) 154 | } 155 | return nil 156 | } 157 | 158 | func (e *Elasticsearch) Close() { 159 | // No-op 160 | } 161 | -------------------------------------------------------------------------------- /pkg/exporter/config.go: -------------------------------------------------------------------------------- 1 | package exporter 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "regexp" 7 | "strconv" 8 | 9 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 10 | "github.com/resmoio/kubernetes-event-exporter/pkg/sinks" 11 | "github.com/rs/zerolog/log" 12 | "k8s.io/client-go/rest" 13 | ) 14 | 15 | const ( 16 | DefaultCacheSize = 1024 17 | ) 18 | 19 | // Config allows configuration 20 | type Config struct { 21 | // Route is the top route that the events will match 22 | // TODO: There is currently a tight coupling with route and config, but not with receiver config and sink so 23 | // TODO: I am not sure what to do here. 24 | LogLevel string `yaml:"logLevel"` 25 | LogFormat string `yaml:"logFormat"` 26 | ThrottlePeriod int64 `yaml:"throttlePeriod"` 27 | MaxEventAgeSeconds int64 `yaml:"maxEventAgeSeconds"` 28 | ClusterName string `yaml:"clusterName,omitempty"` 29 | Namespace string `yaml:"namespace"` 30 | LeaderElection kube.LeaderElectionConfig `yaml:"leaderElection"` 31 | Route Route `yaml:"route"` 32 | Receivers []sinks.ReceiverConfig `yaml:"receivers"` 33 | KubeQPS float32 `yaml:"kubeQPS,omitempty"` 34 | KubeBurst int `yaml:"kubeBurst,omitempty"` 35 | MetricsNamePrefix string `yaml:"metricsNamePrefix,omitempty"` 36 | OmitLookup bool `yaml:"omitLookup,omitempty"` 37 | CacheSize int `yaml:"cacheSize,omitempty"` 38 | } 39 | 40 | func (c *Config) SetDefaults() { 41 | if c.CacheSize == 0 { 42 | c.CacheSize = DefaultCacheSize 43 | log.Debug().Msg("setting config.cacheSize=1024 (default)") 44 | } 45 | 46 | if c.KubeBurst == 0 { 47 | c.KubeBurst = rest.DefaultBurst 48 | log.Debug().Msg(fmt.Sprintf("setting config.kubeBurst=%d (default)", rest.DefaultBurst)) 49 | } 50 | 51 | if c.KubeQPS == 0 { 52 | c.KubeQPS = rest.DefaultQPS 53 | log.Debug().Msg(fmt.Sprintf("setting config.kubeQPS=%.2f (default)", rest.DefaultQPS)) 54 | } 55 | } 56 | 57 | func (c *Config) Validate() error { 58 | if err := c.validateDefaults(); err != nil { 59 | return err 60 | } 61 | if err := c.validateMetricsNamePrefix(); err != nil { 62 | return err 63 | } 64 | 65 | // No duplicate receivers 66 | // Receivers individually 67 | // Routers recursive 68 | return nil 69 | } 70 | 71 | func (c *Config) validateDefaults() error { 72 | if err := c.validateMaxEventAgeSeconds(); err != nil { 73 | return err 74 | } 75 | return nil 76 | } 77 | 78 | func (c *Config) validateMaxEventAgeSeconds() error { 79 | if c.ThrottlePeriod == 0 && c.MaxEventAgeSeconds == 0 { 80 | c.MaxEventAgeSeconds = 5 81 | log.Info().Msg("setting config.maxEventAgeSeconds=5 (default)") 82 | } else if c.ThrottlePeriod != 0 && c.MaxEventAgeSeconds != 0 { 83 | log.Error().Msg("cannot set both throttlePeriod (depricated) and MaxEventAgeSeconds") 84 | return errors.New("validateMaxEventAgeSeconds failed") 85 | } else if c.ThrottlePeriod != 0 { 86 | log_value := strconv.FormatInt(c.ThrottlePeriod, 10) 87 | log.Info().Msg("config.maxEventAgeSeconds=" + log_value) 88 | log.Warn().Msg("config.throttlePeriod is depricated, consider using config.maxEventAgeSeconds instead") 89 | c.MaxEventAgeSeconds = c.ThrottlePeriod 90 | } else { 91 | log_value := strconv.FormatInt(c.MaxEventAgeSeconds, 10) 92 | log.Info().Msg("config.maxEventAgeSeconds=" + log_value) 93 | } 94 | return nil 95 | } 96 | 97 | func (c *Config) validateMetricsNamePrefix() error { 98 | if c.MetricsNamePrefix != "" { 99 | // https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels 100 | checkResult, err := regexp.MatchString("^[a-zA-Z][a-zA-Z0-9_:]*_$", c.MetricsNamePrefix) 101 | if err != nil { 102 | return err 103 | } 104 | if checkResult { 105 | log.Info().Msg("config.metricsNamePrefix='" + c.MetricsNamePrefix + "'") 106 | } else { 107 | log.Error().Msg("config.metricsNamePrefix should match the regex: ^[a-zA-Z][a-zA-Z0-9_:]*_$") 108 | return errors.New("validateMetricsNamePrefix failed") 109 | } 110 | } else { 111 | log.Warn().Msg("metrics name prefix is empty, setting config.metricsNamePrefix='event_exporter_' is recommended") 112 | } 113 | return nil 114 | } 115 | -------------------------------------------------------------------------------- /pkg/sinks/opscenter.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strconv" 7 | 8 | "github.com/aws/aws-sdk-go/aws" 9 | "github.com/aws/aws-sdk-go/aws/session" 10 | "github.com/aws/aws-sdk-go/service/ssm" 11 | "github.com/aws/aws-sdk-go/service/ssm/ssmiface" 12 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 13 | ) 14 | 15 | // OpsCenterConfig is the configuration of the Sink. 16 | type OpsCenterConfig struct { 17 | Category string `yaml:"category"` 18 | Description string `yaml:"description"` 19 | Notifications []string `yaml:"notifications"` 20 | OperationalData map[string]string `yaml:"operationalData"` 21 | Priority string `yaml:"priority"` 22 | Region string `yaml:"region"` 23 | RelatedOpsItems []string `yaml:"relatedOpsItems"` 24 | Severity string `yaml:"severity"` 25 | Source string `yaml:"source"` 26 | Tags map[string]string `yaml:"tags"` 27 | Title string `yaml:"title"` 28 | } 29 | 30 | // OpsCenterSink is an AWS OpsCenter notifcation path. 31 | type OpsCenterSink struct { 32 | cfg *OpsCenterConfig 33 | svc ssmiface.SSMAPI 34 | } 35 | 36 | // NewOpsCenterSink returns a new OpsCenterSink. 37 | func NewOpsCenterSink(cfg *OpsCenterConfig) (Sink, error) { 38 | sess, err := session.NewSession(&aws.Config{ 39 | Region: aws.String(cfg.Region)}, 40 | ) 41 | if err != nil { 42 | return nil, err 43 | } 44 | 45 | svc := ssm.New(sess) 46 | return &OpsCenterSink{ 47 | cfg: cfg, 48 | svc: svc, 49 | }, nil 50 | } 51 | 52 | // Send ... 53 | func (s *OpsCenterSink) Send(ctx context.Context, ev *kube.EnhancedEvent) error { 54 | oi := ssm.CreateOpsItemInput{} 55 | t, err := GetString(ev, s.cfg.Title) 56 | if err != nil { 57 | return err 58 | } 59 | oi.Title = aws.String(t) 60 | d, err := GetString(ev, s.cfg.Description) 61 | if err != nil { 62 | return err 63 | } 64 | oi.Description = aws.String(d) 65 | su, err := GetString(ev, s.cfg.Source) 66 | if err != nil { 67 | return err 68 | } 69 | oi.Source = aws.String(su) 70 | 71 | // Category is optional although highly recommended 72 | if len(s.cfg.Category) != 0 { 73 | c, err := GetString(ev, s.cfg.Category) 74 | if err != nil { 75 | return err 76 | } 77 | oi.Category = aws.String(c) 78 | } 79 | 80 | // Severity is optional although highly recommended 81 | if len(s.cfg.Severity) != 0 { 82 | se, err := GetString(ev, s.cfg.Severity) 83 | if err != nil { 84 | return err 85 | } 86 | oi.Severity = aws.String(se) 87 | } 88 | 89 | // Priority is optional although highly recommended 90 | if len(s.cfg.Priority) != 0 { 91 | p, err := GetString(ev, s.cfg.Priority) 92 | if err != nil { 93 | return err 94 | } 95 | n, err := strconv.ParseInt(p, 10, 64) 96 | if err != nil { 97 | return fmt.Errorf("Priority is a non int") 98 | } 99 | oi.Priority = aws.Int64(n) 100 | } 101 | if s.cfg.OperationalData != nil { 102 | oids := make(map[string]*ssm.OpsItemDataValue) 103 | for k, v := range s.cfg.OperationalData { 104 | dv, err := GetString(ev, v) 105 | if err != nil { 106 | return err 107 | } 108 | oids[k] = &ssm.OpsItemDataValue{Type: aws.String("SearchableString"), Value: aws.String(dv)} 109 | } 110 | oi.OperationalData = oids 111 | } 112 | if s.cfg.Tags != nil { 113 | tvs := make([]*ssm.Tag, 0) 114 | for k, v := range s.cfg.Tags { 115 | tv, err := GetString(ev, v) 116 | if err != nil { 117 | return err 118 | } 119 | tvs = append(tvs, &ssm.Tag{Key: aws.String(k), Value: aws.String(tv)}) 120 | } 121 | oi.Tags = tvs 122 | } 123 | if s.cfg.RelatedOpsItems != nil { 124 | ris := make([]*ssm.RelatedOpsItem, 0) 125 | for _, v := range s.cfg.OperationalData { 126 | ri, err := GetString(ev, v) 127 | if err != nil { 128 | return err 129 | } 130 | ris = append(ris, &ssm.RelatedOpsItem{OpsItemId: aws.String(ri)}) 131 | } 132 | oi.RelatedOpsItems = ris 133 | } 134 | if s.cfg.Notifications != nil { 135 | ns := make([]*ssm.OpsItemNotification, 0) 136 | for _, v := range s.cfg.Notifications { 137 | n, err := GetString(ev, v) 138 | if err != nil { 139 | return err 140 | } 141 | ns = append(ns, &ssm.OpsItemNotification{Arn: aws.String(n)}) 142 | } 143 | oi.Notifications = ns 144 | } 145 | 146 | _, createErr := s.svc.CreateOpsItemWithContext(ctx, &oi) 147 | 148 | return createErr 149 | } 150 | 151 | // Close ... 152 | func (s *OpsCenterSink) Close() { 153 | // No-op 154 | } 155 | -------------------------------------------------------------------------------- /pkg/exporter/config_test.go: -------------------------------------------------------------------------------- 1 | package exporter 2 | 3 | import ( 4 | "bytes" 5 | "testing" 6 | 7 | "github.com/goccy/go-yaml" 8 | "github.com/rs/zerolog/log" 9 | "github.com/stretchr/testify/assert" 10 | "github.com/stretchr/testify/require" 11 | "k8s.io/client-go/rest" 12 | ) 13 | 14 | func readConfig(t *testing.T, yml string) Config { 15 | var cfg Config 16 | err := yaml.Unmarshal([]byte(yml), &cfg) 17 | if err != nil { 18 | t.Fatal("Cannot parse yaml", err) 19 | } 20 | return cfg 21 | } 22 | 23 | func Test_ParseConfig(t *testing.T) { 24 | const yml = ` 25 | route: 26 | routes: 27 | - drop: 28 | - minCount: 6 29 | apiVersion: v33 30 | match: 31 | - receiver: stdout 32 | receivers: 33 | - name: stdout 34 | stdout: {} 35 | ` 36 | 37 | cfg := readConfig(t, yml) 38 | 39 | assert.Len(t, cfg.Route.Routes, 1) 40 | assert.Len(t, cfg.Route.Routes[0].Drop, 1) 41 | assert.Len(t, cfg.Route.Routes[0].Match, 1) 42 | assert.Len(t, cfg.Route.Routes[0].Drop, 1) 43 | 44 | assert.Equal(t, int32(6), cfg.Route.Routes[0].Drop[0].MinCount) 45 | assert.Equal(t, "v33", cfg.Route.Routes[0].Drop[0].APIVersion) 46 | assert.Equal(t, "stdout", cfg.Route.Routes[0].Match[0].Receiver) 47 | } 48 | 49 | func TestValidate_IsCheckingMaxEventAgeSeconds_WhenNotSet(t *testing.T) { 50 | config := Config{} 51 | err := config.Validate() 52 | assert.True(t, config.MaxEventAgeSeconds == 5) 53 | assert.NoError(t, err) 54 | } 55 | 56 | func TestValidate_IsCheckingMaxEventAgeSeconds_WhenThrottledPeriodSet(t *testing.T) { 57 | output := &bytes.Buffer{} 58 | log.Logger = log.Logger.Output(output) 59 | 60 | config := Config{ 61 | ThrottlePeriod: 123, 62 | } 63 | err := config.Validate() 64 | 65 | assert.True(t, config.MaxEventAgeSeconds == 123) 66 | assert.Contains(t, output.String(), "config.maxEventAgeSeconds=123") 67 | assert.Contains(t, output.String(), "config.throttlePeriod is depricated, consider using config.maxEventAgeSeconds instead") 68 | assert.NoError(t, err) 69 | } 70 | 71 | func TestValidate_IsCheckingMaxEventAgeSeconds_WhenMaxEventAgeSecondsSet(t *testing.T) { 72 | output := &bytes.Buffer{} 73 | log.Logger = log.Logger.Output(output) 74 | 75 | config := Config{ 76 | MaxEventAgeSeconds: 123, 77 | } 78 | err := config.Validate() 79 | assert.True(t, config.MaxEventAgeSeconds == 123) 80 | assert.Contains(t, output.String(), "config.maxEventAgeSeconds=123") 81 | assert.NoError(t, err) 82 | } 83 | 84 | func TestValidate_IsCheckingMaxEventAgeSeconds_WhenMaxEventAgeSecondsAndThrottledPeriodSet(t *testing.T) { 85 | output := &bytes.Buffer{} 86 | log.Logger = log.Logger.Output(output) 87 | 88 | config := Config{ 89 | ThrottlePeriod: 123, 90 | MaxEventAgeSeconds: 321, 91 | } 92 | err := config.Validate() 93 | assert.Error(t, err) 94 | assert.Contains(t, output.String(), "cannot set both throttlePeriod (depricated) and MaxEventAgeSeconds") 95 | } 96 | 97 | func TestValidate_MetricsNamePrefix_WhenEmpty(t *testing.T) { 98 | output := &bytes.Buffer{} 99 | log.Logger = log.Logger.Output(output) 100 | 101 | config := Config{} 102 | err := config.Validate() 103 | assert.NoError(t, err) 104 | assert.Equal(t, "", config.MetricsNamePrefix) 105 | assert.Contains(t, output.String(), "metrics name prefix is empty, setting config.metricsNamePrefix='event_exporter_' is recommended") 106 | } 107 | 108 | func TestValidate_MetricsNamePrefix_WhenValid(t *testing.T) { 109 | output := &bytes.Buffer{} 110 | log.Logger = log.Logger.Output(output) 111 | 112 | validCases := []string{ 113 | "kubernetes_event_exporter_", 114 | "test_", 115 | "test_test_", 116 | "test::test_test_", 117 | "TEST::test_test_", 118 | "test_test::1234_test_", 119 | } 120 | 121 | for _, testPrefix := range validCases { 122 | config := Config{ 123 | MetricsNamePrefix: testPrefix, 124 | } 125 | err := config.Validate() 126 | assert.NoError(t, err) 127 | assert.Equal(t, testPrefix, config.MetricsNamePrefix) 128 | assert.Contains(t, output.String(), "config.metricsNamePrefix='"+testPrefix+"'") 129 | } 130 | } 131 | 132 | func TestValidate_MetricsNamePrefix_WhenInvalid(t *testing.T) { 133 | output := &bytes.Buffer{} 134 | log.Logger = log.Logger.Output(output) 135 | 136 | invalidCases := []string{ 137 | "no_tracing_underscore", 138 | "__reserved_", 139 | "::wrong_", 140 | "13245_test_", 141 | } 142 | 143 | for _, testPrefix := range invalidCases { 144 | config := Config{ 145 | MetricsNamePrefix: testPrefix, 146 | } 147 | err := config.Validate() 148 | assert.Error(t, err) 149 | assert.Equal(t, testPrefix, config.MetricsNamePrefix) 150 | assert.Contains(t, output.String(), "config.metricsNamePrefix should match the regex: ^[a-zA-Z][a-zA-Z0-9_:]*_$") 151 | } 152 | } 153 | 154 | func TestSetDefaults(t *testing.T) { 155 | config := Config{} 156 | config.SetDefaults() 157 | require.Equal(t, DefaultCacheSize, config.CacheSize) 158 | require.Equal(t, rest.DefaultQPS, config.KubeQPS) 159 | require.Equal(t, rest.DefaultBurst, config.KubeBurst) 160 | } 161 | -------------------------------------------------------------------------------- /pkg/kube/watcher.go: -------------------------------------------------------------------------------- 1 | package kube 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | 7 | "github.com/resmoio/kubernetes-event-exporter/pkg/metrics" 8 | "github.com/rs/zerolog/log" 9 | corev1 "k8s.io/api/core/v1" 10 | "k8s.io/apimachinery/pkg/api/errors" 11 | "k8s.io/client-go/dynamic" 12 | "k8s.io/client-go/informers" 13 | "k8s.io/client-go/kubernetes" 14 | "k8s.io/client-go/rest" 15 | "k8s.io/client-go/tools/cache" 16 | ) 17 | 18 | var startUpTime = time.Now() 19 | 20 | type EventHandler func(event *EnhancedEvent) 21 | 22 | type EventWatcher struct { 23 | wg sync.WaitGroup 24 | informer cache.SharedInformer 25 | stopper chan struct{} 26 | objectMetadataCache ObjectMetadataProvider 27 | omitLookup bool 28 | fn EventHandler 29 | maxEventAgeSeconds time.Duration 30 | metricsStore *metrics.Store 31 | dynamicClient *dynamic.DynamicClient 32 | clientset *kubernetes.Clientset 33 | } 34 | 35 | func NewEventWatcher(config *rest.Config, namespace string, MaxEventAgeSeconds int64, metricsStore *metrics.Store, fn EventHandler, omitLookup bool, cacheSize int) *EventWatcher { 36 | clientset := kubernetes.NewForConfigOrDie(config) 37 | factory := informers.NewSharedInformerFactoryWithOptions(clientset, 0, informers.WithNamespace(namespace)) 38 | informer := factory.Core().V1().Events().Informer() 39 | 40 | watcher := &EventWatcher{ 41 | informer: informer, 42 | stopper: make(chan struct{}), 43 | objectMetadataCache: NewObjectMetadataProvider(cacheSize), 44 | omitLookup: omitLookup, 45 | fn: fn, 46 | maxEventAgeSeconds: time.Second * time.Duration(MaxEventAgeSeconds), 47 | metricsStore: metricsStore, 48 | dynamicClient: dynamic.NewForConfigOrDie(config), 49 | clientset: clientset, 50 | } 51 | 52 | informer.AddEventHandler(watcher) 53 | informer.SetWatchErrorHandler(func(r *cache.Reflector, err error) { 54 | watcher.metricsStore.WatchErrors.Inc() 55 | }) 56 | 57 | return watcher 58 | } 59 | 60 | func (e *EventWatcher) OnAdd(obj interface{}) { 61 | event := obj.(*corev1.Event) 62 | e.onEvent(event) 63 | } 64 | 65 | func (e *EventWatcher) OnUpdate(oldObj, newObj interface{}) { 66 | // Ignore updates 67 | } 68 | 69 | // Ignore events older than the maxEventAgeSeconds 70 | func (e *EventWatcher) isEventDiscarded(event *corev1.Event) bool { 71 | timestamp := event.LastTimestamp.Time 72 | if timestamp.IsZero() { 73 | timestamp = event.EventTime.Time 74 | } 75 | eventAge := time.Since(timestamp) 76 | if eventAge > e.maxEventAgeSeconds { 77 | // Log discarded events if they were created after the watcher started 78 | // (to suppres warnings from initial synchrnization) 79 | if timestamp.After(startUpTime) { 80 | log.Warn(). 81 | Str("event age", eventAge.String()). 82 | Str("event namespace", event.Namespace). 83 | Str("event name", event.Name). 84 | Msg("Event discarded as being older then maxEventAgeSeconds") 85 | e.metricsStore.EventsDiscarded.Inc() 86 | } 87 | return true 88 | } 89 | return false 90 | } 91 | 92 | func (e *EventWatcher) onEvent(event *corev1.Event) { 93 | if e.isEventDiscarded(event) { 94 | return 95 | } 96 | 97 | log.Debug(). 98 | Str("msg", event.Message). 99 | Str("namespace", event.Namespace). 100 | Str("reason", event.Reason). 101 | Str("involvedObject", event.InvolvedObject.Name). 102 | Msg("Received event") 103 | 104 | e.metricsStore.EventsProcessed.Inc() 105 | 106 | ev := &EnhancedEvent{ 107 | Event: *event.DeepCopy(), 108 | } 109 | ev.Event.ManagedFields = nil 110 | 111 | if e.omitLookup { 112 | ev.InvolvedObject.ObjectReference = *event.InvolvedObject.DeepCopy() 113 | } else { 114 | objectMetadata, err := e.objectMetadataCache.GetObjectMetadata(&event.InvolvedObject, e.clientset, e.dynamicClient, e.metricsStore) 115 | if err != nil { 116 | if errors.IsNotFound(err) { 117 | ev.InvolvedObject.Deleted = true 118 | log.Error().Err(err).Msg("Object not found, likely deleted") 119 | } else { 120 | log.Error().Err(err).Msg("Failed to get object metadata") 121 | } 122 | ev.InvolvedObject.ObjectReference = *event.InvolvedObject.DeepCopy() 123 | } else { 124 | ev.InvolvedObject.Labels = objectMetadata.Labels 125 | ev.InvolvedObject.Annotations = objectMetadata.Annotations 126 | ev.InvolvedObject.OwnerReferences = objectMetadata.OwnerReferences 127 | ev.InvolvedObject.ObjectReference = *event.InvolvedObject.DeepCopy() 128 | ev.InvolvedObject.Deleted = objectMetadata.Deleted 129 | } 130 | } 131 | 132 | e.fn(ev) 133 | } 134 | 135 | func (e *EventWatcher) OnDelete(obj interface{}) { 136 | // Ignore deletes 137 | } 138 | 139 | func (e *EventWatcher) Start() { 140 | e.wg.Add(1) 141 | go func() { 142 | defer e.wg.Done() 143 | e.informer.Run(e.stopper) 144 | }() 145 | } 146 | 147 | func (e *EventWatcher) Stop() { 148 | close(e.stopper) 149 | e.wg.Wait() 150 | } 151 | 152 | func (e *EventWatcher) setStartUpTime(time time.Time) { 153 | startUpTime = time 154 | } 155 | -------------------------------------------------------------------------------- /pkg/batch/writer_test.go: -------------------------------------------------------------------------------- 1 | package batch 2 | 3 | import ( 4 | "context" 5 | "github.com/stretchr/testify/assert" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | func TestSimpleWriter(t *testing.T) { 11 | cfg := WriterConfig{ 12 | BatchSize: 10, 13 | MaxRetries: 3, 14 | Interval: time.Second * 2, 15 | } 16 | 17 | var allItems []interface{} 18 | w := NewWriter(cfg, func(ctx context.Context, items []interface{}) []bool { 19 | resp := make([]bool, len(items)) 20 | for idx := range resp { 21 | resp[idx] = true 22 | } 23 | 24 | allItems = items 25 | return resp 26 | }) 27 | 28 | w.Start() 29 | w.Submit(1, 2, 3, 4, 5, 6, 7) 30 | assert.Len(t, allItems, 0) 31 | w.Stop() 32 | 33 | assert.Len(t, allItems, 7) 34 | for i := 0; i < 7; i++ { 35 | assert.Equal(t, allItems[i], i+1) 36 | } 37 | } 38 | 39 | func TestCorrectnessManyTimes(t *testing.T) { 40 | // Surely this is not the proper way to do it but anyways 41 | for i := 0; i < 10000; i++ { 42 | TestSimpleWriter(t) 43 | } 44 | } 45 | 46 | func TestLargerThanBatchSize(t *testing.T) { 47 | cfg := WriterConfig{ 48 | BatchSize: 3, 49 | MaxRetries: 3, 50 | Interval: time.Second * 2, 51 | } 52 | 53 | allItems := make([][]interface{}, 0) 54 | w := NewWriter(cfg, func(ctx context.Context, items []interface{}) []bool { 55 | resp := make([]bool, len(items)) 56 | for idx := range resp { 57 | resp[idx] = true 58 | } 59 | 60 | allItems = append(allItems, items) 61 | return resp 62 | }) 63 | 64 | w.Start() 65 | w.Submit(1, 2, 3, 4, 5, 6, 7) 66 | w.Stop() 67 | 68 | assert.Len(t, allItems, 3) 69 | assert.Equal(t, allItems[0], []interface{}{1, 2, 3}) 70 | assert.Equal(t, allItems[1], []interface{}{4, 5, 6}) 71 | assert.Equal(t, allItems[2], []interface{}{7}) 72 | } 73 | 74 | func TestSimpleInterval(t *testing.T) { 75 | cfg := WriterConfig{ 76 | BatchSize: 5, 77 | MaxRetries: 3, 78 | Interval: time.Millisecond * 20, 79 | } 80 | 81 | allItems := make([][]interface{}, 0) 82 | w := NewWriter(cfg, func(ctx context.Context, items []interface{}) []bool { 83 | resp := make([]bool, len(items)) 84 | for idx := range resp { 85 | resp[idx] = true 86 | } 87 | 88 | allItems = append(allItems, items) 89 | return resp 90 | }) 91 | 92 | w.Start() 93 | w.Submit(1, 2) 94 | time.Sleep(time.Millisecond * 5) 95 | assert.Len(t, allItems, 0) 96 | 97 | time.Sleep(time.Millisecond * 50) 98 | assert.Len(t, allItems, 1) 99 | assert.Equal(t, allItems[0], []interface{}{1, 2}) 100 | 101 | w.Stop() 102 | assert.Len(t, allItems, 1) 103 | } 104 | 105 | func TestIntervalComplex(t *testing.T) { 106 | cfg := WriterConfig{ 107 | BatchSize: 5, 108 | MaxRetries: 3, 109 | Interval: time.Millisecond * 20, 110 | } 111 | 112 | allItems := make([][]interface{}, 0) 113 | w := NewWriter(cfg, func(ctx context.Context, items []interface{}) []bool { 114 | resp := make([]bool, len(items)) 115 | for idx := range resp { 116 | resp[idx] = true 117 | } 118 | 119 | allItems = append(allItems, items) 120 | return resp 121 | }) 122 | 123 | w.Start() 124 | w.Submit(1, 2) 125 | time.Sleep(time.Millisecond * 5) 126 | w.Submit(3, 4) 127 | assert.Len(t, allItems, 0) 128 | 129 | time.Sleep(time.Millisecond * 50) 130 | assert.Len(t, allItems, 1) 131 | assert.Equal(t, allItems[0], []interface{}{1, 2, 3, 4}) 132 | 133 | w.Stop() 134 | assert.Len(t, allItems, 1) 135 | } 136 | 137 | func TestIntervalComplexAfterFlush(t *testing.T) { 138 | cfg := WriterConfig{ 139 | BatchSize: 5, 140 | MaxRetries: 3, 141 | Interval: time.Millisecond * 20, 142 | } 143 | 144 | allItems := make([][]interface{}, 0) 145 | w := NewWriter(cfg, func(ctx context.Context, items []interface{}) []bool { 146 | resp := make([]bool, len(items)) 147 | for idx := range resp { 148 | resp[idx] = true 149 | } 150 | 151 | allItems = append(allItems, items) 152 | return resp 153 | }) 154 | 155 | w.Start() 156 | w.Submit(1, 2) 157 | time.Sleep(time.Millisecond * 5) 158 | w.Submit(3, 4) 159 | assert.Len(t, allItems, 0) 160 | 161 | time.Sleep(time.Millisecond * 50) 162 | assert.Len(t, allItems, 1) 163 | assert.Equal(t, allItems[0], []interface{}{1, 2, 3, 4}) 164 | 165 | w.Submit(5, 6, 7) 166 | w.Stop() 167 | 168 | assert.Len(t, allItems, 2) 169 | assert.Equal(t, allItems[1], []interface{}{5, 6, 7}) 170 | } 171 | 172 | func TestRetry(t *testing.T) { 173 | cfg := WriterConfig{ 174 | BatchSize: 5, 175 | MaxRetries: 3, 176 | Interval: time.Millisecond * 10, 177 | } 178 | 179 | allItems := make([][]interface{}, 0) 180 | w := NewWriter(cfg, func(ctx context.Context, items []interface{}) []bool { 181 | resp := make([]bool, len(items)) 182 | for idx := range resp { 183 | resp[idx] = items[idx] != 2 184 | } 185 | 186 | allItems = append(allItems, items) 187 | return resp 188 | }) 189 | 190 | w.Start() 191 | w.Submit(1, 2, 3) 192 | assert.Len(t, allItems, 0) 193 | 194 | time.Sleep(time.Millisecond * 200) 195 | assert.Len(t, allItems, 4) 196 | 197 | assert.Equal(t, allItems[0], []interface{}{1, 2, 3}) 198 | assert.Equal(t, allItems[1], []interface{}{2}) 199 | assert.Equal(t, allItems[2], []interface{}{2}) 200 | assert.Equal(t, allItems[3], []interface{}{2}) 201 | } 202 | -------------------------------------------------------------------------------- /pkg/metrics/metrics.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "time" 7 | 8 | "github.com/prometheus/client_golang/prometheus" 9 | "github.com/prometheus/client_golang/prometheus/collectors" 10 | "github.com/prometheus/client_golang/prometheus/promauto" 11 | "github.com/prometheus/client_golang/prometheus/promhttp" 12 | "github.com/prometheus/exporter-toolkit/web" 13 | "github.com/resmoio/kubernetes-event-exporter/pkg/version" 14 | "github.com/rs/zerolog/log" 15 | ) 16 | 17 | type Store struct { 18 | EventsProcessed prometheus.Counter 19 | EventsDiscarded prometheus.Counter 20 | WatchErrors prometheus.Counter 21 | SendErrors prometheus.Counter 22 | BuildInfo prometheus.GaugeFunc 23 | KubeApiReadCacheHits prometheus.Counter 24 | KubeApiReadRequests prometheus.Counter 25 | } 26 | 27 | // promLogger implements promhttp.Logger 28 | type promLogger struct{} 29 | 30 | func (pl promLogger) Println(v ...interface{}) { 31 | log.Logger.Error().Msg(fmt.Sprint(v...)) 32 | } 33 | 34 | // promLogger implements the Logger interface 35 | func (pl promLogger) Log(v ...interface{}) error { 36 | log.Logger.Info().Msg(fmt.Sprint(v...)) 37 | return nil 38 | } 39 | 40 | func Init(addr string, tlsConf string) { 41 | // Setup the prometheus metrics machinery 42 | // Add Go module build info. 43 | prometheus.MustRegister(collectors.NewBuildInfoCollector()) 44 | 45 | promLogger := promLogger{} 46 | metricsPath := "/metrics" 47 | 48 | // Expose the registered metrics via HTTP. 49 | http.Handle(metricsPath, promhttp.HandlerFor( 50 | prometheus.DefaultGatherer, 51 | promhttp.HandlerOpts{ 52 | // Opt into OpenMetrics to support exemplars. 53 | EnableOpenMetrics: true, 54 | }, 55 | )) 56 | 57 | landingConfig := web.LandingConfig{ 58 | Name: "kubernetes-event-exporter", 59 | Description: "Export Kubernetes Events to multiple destinations with routing and filtering", 60 | Links: []web.LandingLinks{ 61 | { 62 | Address: metricsPath, 63 | Text: "Metrics", 64 | }, 65 | }, 66 | } 67 | landingPage, _ := web.NewLandingPage(landingConfig) 68 | http.Handle("/", landingPage) 69 | 70 | http.HandleFunc("/-/healthy", func(w http.ResponseWriter, r *http.Request) { 71 | w.WriteHeader(http.StatusOK) 72 | fmt.Fprintf(w, "OK") 73 | }) 74 | http.HandleFunc("/-/ready", func(w http.ResponseWriter, r *http.Request) { 75 | w.WriteHeader(http.StatusOK) 76 | fmt.Fprintf(w, "OK") 77 | }) 78 | 79 | metricsServer := http.Server{ 80 | ReadHeaderTimeout: 5 * time.Second} 81 | 82 | metricsFlags := web.FlagConfig{ 83 | WebListenAddresses: &[]string{addr}, 84 | WebSystemdSocket: new(bool), 85 | WebConfigFile: &tlsConf, 86 | } 87 | 88 | // start up the http listener to expose the metrics 89 | go web.ListenAndServe(&metricsServer, &metricsFlags, promLogger) 90 | } 91 | 92 | func NewMetricsStore(name_prefix string) *Store { 93 | return &Store{ 94 | BuildInfo: promauto.NewGaugeFunc( 95 | prometheus.GaugeOpts{ 96 | Name: name_prefix + "build_info", 97 | Help: "A metric with a constant '1' value labeled by version, revision, branch, and goversion from which Kubernetes Event Exporter was built.", 98 | ConstLabels: prometheus.Labels{ 99 | "version": version.Version, 100 | "revision": version.Revision(), 101 | "goversion": version.GoVersion, 102 | "goos": version.GoOS, 103 | "goarch": version.GoArch, 104 | }, 105 | }, 106 | func() float64 { return 1 }, 107 | ), 108 | EventsProcessed: promauto.NewCounter(prometheus.CounterOpts{ 109 | Name: name_prefix + "events_sent", 110 | Help: "The total number of events processed", 111 | }), 112 | EventsDiscarded: promauto.NewCounter(prometheus.CounterOpts{ 113 | Name: name_prefix + "events_discarded", 114 | Help: "The total number of events discarded because of being older than the maxEventAgeSeconds specified", 115 | }), 116 | WatchErrors: promauto.NewCounter(prometheus.CounterOpts{ 117 | Name: name_prefix + "watch_errors", 118 | Help: "The total number of errors received from the informer", 119 | }), 120 | SendErrors: promauto.NewCounter(prometheus.CounterOpts{ 121 | Name: name_prefix + "send_event_errors", 122 | Help: "The total number of send event errors", 123 | }), 124 | KubeApiReadCacheHits: promauto.NewCounter(prometheus.CounterOpts{ 125 | Name: name_prefix + "kube_api_read_cache_hits", 126 | Help: "The total number of read requests served from cache when looking up object metadata", 127 | }), 128 | KubeApiReadRequests: promauto.NewCounter(prometheus.CounterOpts{ 129 | Name: name_prefix + "kube_api_read_cache_misses", 130 | Help: "The total number of read requests served from kube-apiserver when looking up object metadata", 131 | }), 132 | } 133 | } 134 | 135 | func DestroyMetricsStore(store *Store) { 136 | prometheus.Unregister(store.EventsProcessed) 137 | prometheus.Unregister(store.EventsDiscarded) 138 | prometheus.Unregister(store.WatchErrors) 139 | prometheus.Unregister(store.SendErrors) 140 | prometheus.Unregister(store.BuildInfo) 141 | prometheus.Unregister(store.KubeApiReadCacheHits) 142 | prometheus.Unregister(store.KubeApiReadRequests) 143 | store = nil 144 | } 145 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "flag" 6 | "os" 7 | "os/signal" 8 | "syscall" 9 | "time" 10 | 11 | "github.com/resmoio/kubernetes-event-exporter/pkg/exporter" 12 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 13 | "github.com/resmoio/kubernetes-event-exporter/pkg/metrics" 14 | "github.com/resmoio/kubernetes-event-exporter/pkg/setup" 15 | "github.com/rs/zerolog" 16 | "github.com/rs/zerolog/log" 17 | ) 18 | 19 | var ( 20 | conf = flag.String("conf", "config.yaml", "The config path file") 21 | addr = flag.String("metrics-address", ":2112", "The address to listen on for HTTP requests.") 22 | kubeconfig = flag.String("kubeconfig", "", "Path to the kubeconfig file to use.") 23 | tlsConf = flag.String("metrics-tls-config", "", "The TLS config file for your metrics.") 24 | ) 25 | 26 | func main() { 27 | flag.Parse() 28 | 29 | log.Info().Msg("Reading config file " + *conf) 30 | configBytes, err := os.ReadFile(*conf) 31 | if err != nil { 32 | log.Fatal().Err(err).Msg("cannot read config file") 33 | } 34 | 35 | configBytes = []byte(os.ExpandEnv(string(configBytes))) 36 | 37 | cfg, err := setup.ParseConfigFromBytes(configBytes) 38 | if err != nil { 39 | log.Fatal().Msg(err.Error()) 40 | } 41 | 42 | if cfg.LogLevel != "" { 43 | level, err := zerolog.ParseLevel(cfg.LogLevel) 44 | if err != nil { 45 | log.Fatal().Err(err).Str("level", cfg.LogLevel).Msg("Invalid log level") 46 | } 47 | log.Logger = log.Logger.Level(level) 48 | } else { 49 | log.Info().Msg("Set default log level to info. Use config.logLevel=[debug | info | warn | error] to overwrite.") 50 | log.Logger = log.With().Caller().Logger().Level(zerolog.InfoLevel) 51 | } 52 | 53 | if cfg.LogFormat == "json" { 54 | // Defaults to JSON already nothing to do 55 | } else if cfg.LogFormat == "" || cfg.LogFormat == "pretty" { 56 | log.Logger = log.Logger.Output(zerolog.ConsoleWriter{ 57 | Out: os.Stdout, 58 | NoColor: false, 59 | TimeFormat: time.RFC3339, 60 | }) 61 | } else { 62 | log.Fatal().Str("log_format", cfg.LogFormat).Msg("Unknown log format") 63 | } 64 | 65 | cfg.SetDefaults() 66 | 67 | log.Info().Msgf("Starting with config: %#v", cfg) 68 | 69 | if err := cfg.Validate(); err != nil { 70 | log.Fatal().Err(err).Msg("config validation failed") 71 | } 72 | 73 | kubecfg, err := kube.GetKubernetesConfig(*kubeconfig) 74 | if err != nil { 75 | log.Fatal().Err(err).Msg("cannot get kubeconfig") 76 | } 77 | kubecfg.QPS = cfg.KubeQPS 78 | kubecfg.Burst = cfg.KubeBurst 79 | 80 | metrics.Init(*addr, *tlsConf) 81 | metricsStore := metrics.NewMetricsStore(cfg.MetricsNamePrefix) 82 | 83 | engine := exporter.NewEngine(&cfg, &exporter.ChannelBasedReceiverRegistry{MetricsStore: metricsStore}) 84 | onEvent := engine.OnEvent 85 | if len(cfg.ClusterName) != 0 { 86 | onEvent = func(event *kube.EnhancedEvent) { 87 | // note that per code this value is not set anywhere on the kubernetes side 88 | // https://github.com/kubernetes/apimachinery/blob/v0.22.4/pkg/apis/meta/v1/types.go#L276 89 | event.ClusterName = cfg.ClusterName 90 | engine.OnEvent(event) 91 | } 92 | } 93 | 94 | w := kube.NewEventWatcher(kubecfg, cfg.Namespace, cfg.MaxEventAgeSeconds, metricsStore, onEvent, cfg.OmitLookup, cfg.CacheSize) 95 | 96 | ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) 97 | defer cancel() 98 | 99 | if cfg.LeaderElection.Enabled { 100 | var wasLeader bool 101 | log.Info().Msg("leader election enabled") 102 | 103 | onStoppedLeading := func(ctx context.Context) { 104 | select { 105 | case <-ctx.Done(): 106 | log.Info().Msg("Context was cancelled, stopping leader election loop") 107 | default: 108 | log.Info().Msg("Lost the leader lease, stopping leader election loop") 109 | } 110 | } 111 | 112 | l, err := kube.NewLeaderElector(cfg.LeaderElection.LeaderElectionID, kubecfg, 113 | // this method gets called when this instance becomes the leader 114 | func(_ context.Context) { 115 | wasLeader = true 116 | log.Info().Msg("leader election won") 117 | w.Start() 118 | }, 119 | // this method gets called when the leader election loop is closed 120 | // either due to context cancellation or due to losing the leader lease 121 | func() { 122 | onStoppedLeading(ctx) 123 | }, 124 | func(identity string) { 125 | log.Info().Msg("new leader observed: " + identity) 126 | }, 127 | ) 128 | if err != nil { 129 | log.Fatal().Err(err).Msg("create leaderelector failed") 130 | } 131 | 132 | // Run returns if either the context is canceled or client stopped holding the leader lease 133 | l.Run(ctx) 134 | 135 | // We get here either because we lost the leader lease or the context was canceled. 136 | // In either case we want to stop the event watcher and exit. 137 | // However, if we were the leader, we wait leaseDuration seconds before stopping 138 | // so that we don't lose events until the next leader is elected. The new leader 139 | // will only be elected after leaseDuration seconds. 140 | if wasLeader { 141 | log.Info().Msgf("waiting leaseDuration seconds before stopping: %s", kube.GetLeaseDuration()) 142 | time.Sleep(kube.GetLeaseDuration()) 143 | } 144 | } else { 145 | log.Info().Msg("leader election disabled") 146 | w.Start() 147 | <-ctx.Done() 148 | } 149 | 150 | log.Info().Msg("Received signal to exit. Stopping.") 151 | w.Stop() 152 | engine.Stop() 153 | } 154 | -------------------------------------------------------------------------------- /pkg/exporter/route_test.go: -------------------------------------------------------------------------------- 1 | package exporter 2 | 3 | import ( 4 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 5 | "github.com/resmoio/kubernetes-event-exporter/pkg/sinks" 6 | "github.com/stretchr/testify/assert" 7 | "testing" 8 | ) 9 | 10 | // testReceiverRegistry just records the events to the registry so that tests can validate routing behavior 11 | type testReceiverRegistry struct { 12 | rcvd map[string][]*kube.EnhancedEvent 13 | } 14 | 15 | func (t *testReceiverRegistry) Register(string, sinks.Sink) { 16 | panic("Why do you call this? It's for counting imaginary events for tests only") 17 | } 18 | 19 | func (t *testReceiverRegistry) SendEvent(name string, event *kube.EnhancedEvent) { 20 | if t.rcvd == nil { 21 | t.rcvd = make(map[string][]*kube.EnhancedEvent) 22 | } 23 | 24 | if _, ok := t.rcvd[name]; !ok { 25 | t.rcvd[name] = make([]*kube.EnhancedEvent, 0) 26 | } 27 | 28 | t.rcvd[name] = append(t.rcvd[name], event) 29 | } 30 | 31 | func (t *testReceiverRegistry) Close() { 32 | // No-op 33 | } 34 | 35 | func (t *testReceiverRegistry) isEventRcvd(name string, event *kube.EnhancedEvent) bool { 36 | if val, ok := t.rcvd[name]; !ok { 37 | return false 38 | } else { 39 | for _, v := range val { 40 | if v == event { 41 | return true 42 | } 43 | } 44 | return false 45 | } 46 | } 47 | 48 | func (t *testReceiverRegistry) count(name string) int { 49 | if val, ok := t.rcvd[name]; ok { 50 | return len(val) 51 | } else { 52 | return 0 53 | } 54 | } 55 | 56 | func TestEmptyRoute(t *testing.T) { 57 | ev := kube.EnhancedEvent{} 58 | reg := testReceiverRegistry{} 59 | 60 | r := Route{} 61 | 62 | r.ProcessEvent(&ev, ®) 63 | assert.Empty(t, reg.rcvd) 64 | } 65 | 66 | func TestBasicRoute(t *testing.T) { 67 | ev := kube.EnhancedEvent{} 68 | ev.Namespace = "kube-system" 69 | reg := testReceiverRegistry{} 70 | 71 | r := Route{ 72 | Match: []Rule{{ 73 | Namespace: "kube-system", 74 | Receiver: "osman", 75 | }}, 76 | } 77 | 78 | r.ProcessEvent(&ev, ®) 79 | assert.True(t, reg.isEventRcvd("osman", &ev)) 80 | } 81 | 82 | func TestDropRule(t *testing.T) { 83 | ev := kube.EnhancedEvent{} 84 | ev.Namespace = "kube-system" 85 | reg := testReceiverRegistry{} 86 | 87 | r := Route{ 88 | Drop: []Rule{{ 89 | Namespace: "kube-system", 90 | }}, 91 | Match: []Rule{{ 92 | Receiver: "osman", 93 | }}, 94 | } 95 | 96 | r.ProcessEvent(&ev, ®) 97 | assert.False(t, reg.isEventRcvd("osman", &ev)) 98 | assert.Zero(t, reg.count("osman")) 99 | } 100 | 101 | func TestSingleLevelMultipleMatchRoute(t *testing.T) { 102 | ev := kube.EnhancedEvent{} 103 | ev.Namespace = "kube-system" 104 | reg := testReceiverRegistry{} 105 | 106 | r := Route{ 107 | Match: []Rule{{ 108 | Namespace: "kube-system", 109 | Receiver: "osman", 110 | }, { 111 | Receiver: "any", 112 | }}, 113 | } 114 | 115 | r.ProcessEvent(&ev, ®) 116 | assert.True(t, reg.isEventRcvd("osman", &ev)) 117 | assert.True(t, reg.isEventRcvd("any", &ev)) 118 | } 119 | 120 | func TestSubRoute(t *testing.T) { 121 | ev := kube.EnhancedEvent{} 122 | ev.Namespace = "kube-system" 123 | reg := testReceiverRegistry{} 124 | 125 | r := Route{ 126 | Match: []Rule{{ 127 | Namespace: "kube-system", 128 | }}, 129 | Routes: []Route{{ 130 | Match: []Rule{{ 131 | Receiver: "osman", 132 | }}, 133 | }}, 134 | } 135 | 136 | r.ProcessEvent(&ev, ®) 137 | 138 | assert.True(t, reg.isEventRcvd("osman", &ev)) 139 | } 140 | 141 | func TestSubSubRoute(t *testing.T) { 142 | ev := kube.EnhancedEvent{} 143 | ev.Namespace = "kube-system" 144 | reg := testReceiverRegistry{} 145 | 146 | r := Route{ 147 | Match: []Rule{{ 148 | Namespace: "kube-*", 149 | }}, 150 | Routes: []Route{{ 151 | Match: []Rule{{ 152 | Receiver: "osman", 153 | }}, 154 | Routes: []Route{{ 155 | Match: []Rule{{ 156 | Receiver: "any", 157 | }}, 158 | }}, 159 | }}, 160 | } 161 | 162 | r.ProcessEvent(&ev, ®) 163 | 164 | assert.True(t, reg.isEventRcvd("osman", &ev)) 165 | assert.True(t, reg.isEventRcvd("any", &ev)) 166 | } 167 | 168 | func TestSubSubRouteWithDrop(t *testing.T) { 169 | ev := kube.EnhancedEvent{} 170 | ev.Namespace = "kube-system" 171 | reg := testReceiverRegistry{} 172 | 173 | r := Route{ 174 | Match: []Rule{{ 175 | Namespace: "kube-*", 176 | }}, 177 | Routes: []Route{{ 178 | Match: []Rule{{ 179 | Receiver: "osman", 180 | }}, 181 | Routes: []Route{{ 182 | Drop: []Rule{{ 183 | Namespace: "kube-system", 184 | }}, 185 | Match: []Rule{{ 186 | Receiver: "any", 187 | }}, 188 | }}, 189 | }}, 190 | } 191 | 192 | r.ProcessEvent(&ev, ®) 193 | 194 | assert.True(t, reg.isEventRcvd("osman", &ev)) 195 | assert.False(t, reg.isEventRcvd("any", &ev)) 196 | } 197 | 198 | // Test for issue: https://github.com/resmoio/kubernetes-event-exporter/issues/51 199 | func Test_GHIssue51(t *testing.T) { 200 | ev1 := kube.EnhancedEvent{} 201 | ev1.Type = "Warning" 202 | ev1.Reason = "FailedCreatePodContainer" 203 | 204 | ev2 := kube.EnhancedEvent{} 205 | ev2.Type = "Warning" 206 | ev2.Reason = "FailedCreate" 207 | 208 | reg := testReceiverRegistry{} 209 | 210 | r := Route{ 211 | Drop: []Rule{{ 212 | Type: "Normal", 213 | }}, 214 | Match: []Rule{{ 215 | Reason: "FailedCreatePodContainer", 216 | Receiver: "elastic", 217 | }}, 218 | } 219 | 220 | r.ProcessEvent(&ev1, ®) 221 | r.ProcessEvent(&ev2, ®) 222 | 223 | assert.True(t, reg.isEventRcvd("elastic", &ev1)) 224 | assert.False(t, reg.isEventRcvd("elastic", &ev2)) 225 | } 226 | -------------------------------------------------------------------------------- /pkg/sinks/opscenter_test.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | import ( 4 | "context" 5 | "reflect" 6 | "testing" 7 | "time" 8 | 9 | "github.com/aws/aws-sdk-go/aws" 10 | "github.com/aws/aws-sdk-go/aws/request" 11 | "github.com/aws/aws-sdk-go/service/ssm" 12 | "github.com/aws/aws-sdk-go/service/ssm/ssmiface" 13 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 14 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 15 | ) 16 | 17 | type mockedCreateOps struct { 18 | ssmiface.SSMAPI 19 | Resp ssm.CreateOpsItemOutput 20 | Input ssm.CreateOpsItemInput 21 | } 22 | 23 | func newMockedCreateOps(id string) *mockedCreateOps { 24 | return &mockedCreateOps{ 25 | Resp: ssm.CreateOpsItemOutput{OpsItemId: aws.String(id)}, 26 | } 27 | 28 | } 29 | 30 | func (m *mockedCreateOps) CreateOpsItemWithContext(ctx aws.Context, in *ssm.CreateOpsItemInput, o ...request.Option) (*ssm.CreateOpsItemOutput, error) { 31 | m.Input = *in 32 | return &m.Resp, nil 33 | } 34 | 35 | func (m *mockedCreateOps) GetInput() ssm.CreateOpsItemInput { 36 | return m.Input 37 | } 38 | 39 | func makeNotifications(input ...string) []*ssm.OpsItemNotification { 40 | ns := make([]*ssm.OpsItemNotification, 0) 41 | for _, v := range input { 42 | ns = append(ns, &ssm.OpsItemNotification{Arn: aws.String(v)}) 43 | } 44 | return ns 45 | } 46 | 47 | func makeRelatedOpsItems(input ...string) []*ssm.RelatedOpsItem { 48 | ris := make([]*ssm.RelatedOpsItem, 0) 49 | for _, v := range input { 50 | ris = append(ris, &ssm.RelatedOpsItem{OpsItemId: aws.String(v)}) 51 | } 52 | return ris 53 | } 54 | 55 | func makeTags(input map[string]string) []*ssm.Tag { 56 | tvs := make([]*ssm.Tag, 0) 57 | for k, v := range input { 58 | tvs = append(tvs, &ssm.Tag{Key: aws.String(k), Value: aws.String(v)}) 59 | } 60 | return tvs 61 | } 62 | func makeOperationalData(input map[string]string) map[string]*ssm.OpsItemDataValue { 63 | oids := make(map[string]*ssm.OpsItemDataValue) 64 | for k, v := range input { 65 | oids[k] = &ssm.OpsItemDataValue{Type: aws.String("SearchableString"), Value: aws.String(v)} 66 | } 67 | return oids 68 | } 69 | 70 | func TestOpsCenterSink_Send(t *testing.T) { 71 | m := newMockedCreateOps("id123456") 72 | ev := &kube.EnhancedEvent{} 73 | ev.Namespace = "default" 74 | ev.Reason = "my reason" 75 | ev.Type = "Warning" 76 | ev.InvolvedObject.Kind = "Pod" 77 | ev.InvolvedObject.Name = "nginx-server-123abc-456def" 78 | ev.InvolvedObject.Namespace = "prod" 79 | ev.Message = "Successfully pulled image \"nginx:latest\"" 80 | ev.FirstTimestamp = v1.Time{Time: time.Now()} 81 | type fields struct { 82 | cfg *OpsCenterConfig 83 | svc ssmiface.SSMAPI 84 | } 85 | type args struct { 86 | ctx context.Context 87 | ev *kube.EnhancedEvent 88 | } 89 | tests := []struct { 90 | name string 91 | fields fields 92 | args args 93 | wantErr bool 94 | wantInput ssm.CreateOpsItemInput 95 | }{ 96 | {"Simple Create", fields{ 97 | &OpsCenterConfig{ 98 | Title: "{{ .Message }}", 99 | Category: "{{ .Reason }}", 100 | Description: "Event {{ .Reason }} for {{ .InvolvedObject.Namespace }}/{{ .InvolvedObject.Name }} on K8s cluster", 101 | Notifications: []string{"sns1", "sns2"}, 102 | OperationalData: map[string]string{"Reason": "{{ .Reason }}"}, 103 | Priority: "6", 104 | Region: "us-east1", 105 | RelatedOpsItems: []string{"ops1", "ops2"}, 106 | Severity: "6", 107 | Source: "production", 108 | Tags: map[string]string{"ENV": "{{ .InvolvedObject.Namespace }}"}, 109 | }, 110 | m, 111 | }, args{context.Background(), ev}, false, 112 | ssm.CreateOpsItemInput{ 113 | Category: aws.String("my reason"), 114 | Description: aws.String("Event my reason for prod/nginx-server-123abc-456def on K8s cluster"), 115 | Notifications: makeNotifications("sns1", "sns2"), 116 | OperationalData: makeOperationalData(map[string]string{"Reason": "my reason"}), 117 | Priority: aws.Int64(6), 118 | RelatedOpsItems: makeRelatedOpsItems("my reason"), 119 | Severity: aws.String("6"), 120 | Source: aws.String("production"), 121 | Tags: makeTags(map[string]string{"ENV": "prod"}), 122 | Title: aws.String("Successfully pulled image \"nginx:latest\""), 123 | }, 124 | }, 125 | {"Invalid Priority: Want err", fields{ 126 | &OpsCenterConfig{ 127 | Title: "{{ .Message }}", 128 | Category: "{{ .Reason }}", 129 | Description: "Event {{ .Reason }} for {{ .InvolvedObject.Namespace }}/{{ .InvolvedObject.Name }} on K8s cluster", 130 | Notifications: []string{"sns1", "sns2"}, 131 | OperationalData: map[string]string{"Reason": "{{ .Reason }}"}, 132 | Priority: "asdf", 133 | Region: "us-east1", 134 | RelatedOpsItems: []string{"ops1", "ops2"}, 135 | Severity: "6", 136 | Source: "production", 137 | Tags: map[string]string{"ENV": "{{ .InvolvedObject.Namespace }}"}, 138 | }, 139 | m, 140 | }, args{context.Background(), ev}, true, 141 | ssm.CreateOpsItemInput{}, 142 | }, 143 | } 144 | for _, tt := range tests { 145 | t.Run(tt.name, func(t *testing.T) { 146 | s := &OpsCenterSink{ 147 | cfg: tt.fields.cfg, 148 | svc: tt.fields.svc, 149 | } 150 | if err := s.Send(tt.args.ctx, tt.args.ev); (err != nil) != tt.wantErr { 151 | t.Errorf("OpsCenterSink.Send() error = %v, wantErr %v", err, tt.wantErr) 152 | } 153 | if !reflect.DeepEqual(m.Input, tt.wantInput) && tt.wantErr != true { 154 | t.Errorf("OpsCenterSink.Send() \nReturned:\n%v, \nWanted:\n %v", m.Input, tt.wantInput) 155 | } 156 | }) 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/resmoio/kubernetes-event-exporter 2 | 3 | go 1.20 4 | 5 | require ( 6 | cloud.google.com/go/bigquery v1.44.0 7 | cloud.google.com/go/pubsub v1.28.0 8 | github.com/Masterminds/sprig/v3 v3.2.3 9 | github.com/Shopify/sarama v1.37.2 10 | github.com/aws/aws-sdk-go v1.44.162 11 | github.com/elastic/go-elasticsearch/v7 v7.17.7 12 | github.com/goccy/go-yaml v1.11.0 13 | github.com/hashicorp/golang-lru v0.5.3 14 | github.com/linkedin/goavro/v2 v2.12.0 15 | github.com/opensearch-project/opensearch-go v1.1.0 16 | github.com/opsgenie/opsgenie-go-sdk-v2 v1.2.14 17 | github.com/prometheus/client_golang v1.16.0 18 | github.com/prometheus/exporter-toolkit v0.10.0 19 | github.com/rs/zerolog v1.28.0 20 | github.com/slack-go/slack v0.12.0 21 | github.com/stretchr/testify v1.8.1 22 | google.golang.org/api v0.107.0 23 | gopkg.in/natefinch/lumberjack.v2 v2.0.0 24 | k8s.io/api v0.26.7 25 | k8s.io/apimachinery v0.26.7 26 | k8s.io/client-go v0.26.7 27 | ) 28 | 29 | require ( 30 | github.com/xdg-go/pbkdf2 v1.0.0 // indirect 31 | github.com/xdg-go/stringprep v1.0.4 // indirect 32 | ) 33 | 34 | require ( 35 | cloud.google.com/go v0.107.0 // indirect 36 | cloud.google.com/go/compute v1.15.1 // indirect 37 | cloud.google.com/go/compute/metadata v0.2.3 // indirect 38 | cloud.google.com/go/iam v0.9.0 // indirect 39 | github.com/Masterminds/goutils v1.1.1 // indirect 40 | github.com/Masterminds/semver/v3 v3.2.0 // indirect 41 | github.com/beorn7/perks v1.0.1 // indirect 42 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 43 | github.com/coreos/go-systemd/v22 v22.5.0 // indirect 44 | github.com/davecgh/go-spew v1.1.1 // indirect 45 | github.com/eapache/go-resiliency v1.3.0 // indirect 46 | github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect 47 | github.com/eapache/queue v1.1.0 // indirect 48 | github.com/emicklei/go-restful/v3 v3.10.1 // indirect 49 | github.com/fatih/color v1.15.0 // indirect 50 | github.com/go-kit/log v0.2.1 // indirect 51 | github.com/go-logfmt/logfmt v0.5.1 // indirect 52 | github.com/go-logr/logr v1.2.3 // indirect 53 | github.com/go-openapi/jsonpointer v0.19.5 // indirect 54 | github.com/go-openapi/jsonreference v0.20.0 // indirect 55 | github.com/go-openapi/swag v0.22.3 // indirect 56 | github.com/gogo/protobuf v1.3.2 // indirect 57 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect 58 | github.com/golang/protobuf v1.5.3 // indirect 59 | github.com/golang/snappy v0.0.4 // indirect 60 | github.com/google/gnostic v0.6.9 // indirect 61 | github.com/google/go-cmp v0.5.9 // indirect 62 | github.com/google/gofuzz v1.2.0 // indirect 63 | github.com/google/uuid v1.3.0 // indirect 64 | github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect 65 | github.com/googleapis/gax-go/v2 v2.7.0 // indirect 66 | github.com/gorilla/websocket v1.5.0 // indirect 67 | github.com/hashicorp/errwrap v1.1.0 // indirect 68 | github.com/hashicorp/go-cleanhttp v0.5.2 // indirect 69 | github.com/hashicorp/go-multierror v1.1.1 // indirect 70 | github.com/hashicorp/go-retryablehttp v0.7.1 // indirect 71 | github.com/hashicorp/go-uuid v1.0.3 // indirect 72 | github.com/huandu/xstrings v1.4.0 // indirect 73 | github.com/imdario/mergo v0.3.13 // indirect 74 | github.com/jcmturner/aescts/v2 v2.0.0 // indirect 75 | github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect 76 | github.com/jcmturner/gofork v1.7.6 // indirect 77 | github.com/jcmturner/gokrb5/v8 v8.4.3 // indirect 78 | github.com/jcmturner/rpc/v2 v2.0.3 // indirect 79 | github.com/jmespath/go-jmespath v0.4.0 // indirect 80 | github.com/josharian/intern v1.0.0 // indirect 81 | github.com/jpillora/backoff v1.0.0 // indirect 82 | github.com/json-iterator/go v1.1.12 // indirect 83 | github.com/klauspost/compress v1.15.13 // indirect 84 | github.com/mailru/easyjson v0.7.7 // indirect 85 | github.com/mattn/go-colorable v0.1.13 // indirect 86 | github.com/mattn/go-isatty v0.0.17 // indirect 87 | github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect 88 | github.com/mitchellh/copystructure v1.2.0 // indirect 89 | github.com/mitchellh/reflectwalk v1.0.2 // indirect 90 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 91 | github.com/modern-go/reflect2 v1.0.2 // indirect 92 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 93 | github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect 94 | github.com/pierrec/lz4/v4 v4.1.17 // indirect 95 | github.com/pkg/errors v0.9.1 // indirect 96 | github.com/pmezard/go-difflib v1.0.0 // indirect 97 | github.com/prometheus/client_model v0.4.0 // indirect 98 | github.com/prometheus/common v0.44.0 // indirect 99 | github.com/prometheus/procfs v0.10.1 // indirect 100 | github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect 101 | github.com/shopspring/decimal v1.2.0 // indirect 102 | github.com/sirupsen/logrus v1.9.0 // indirect 103 | github.com/spf13/cast v1.3.1 // indirect 104 | github.com/spf13/pflag v1.0.5 // indirect 105 | github.com/xdg-go/scram v1.1.2 106 | go.opencensus.io v0.24.0 // indirect 107 | golang.org/x/crypto v0.17.0 // indirect 108 | golang.org/x/net v0.17.0 // indirect 109 | golang.org/x/oauth2 v0.8.0 // indirect 110 | golang.org/x/sync v0.2.0 // indirect 111 | golang.org/x/sys v0.15.0 // indirect 112 | golang.org/x/term v0.15.0 // indirect 113 | golang.org/x/text v0.14.0 // indirect 114 | golang.org/x/time v0.3.0 // indirect 115 | golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect 116 | google.golang.org/appengine v1.6.7 // indirect 117 | google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect 118 | google.golang.org/grpc v1.53.0 // indirect 119 | google.golang.org/protobuf v1.30.0 // indirect 120 | gopkg.in/inf.v0 v0.9.1 // indirect 121 | gopkg.in/yaml.v2 v2.4.0 // indirect 122 | gopkg.in/yaml.v3 v3.0.1 // indirect 123 | k8s.io/klog/v2 v2.80.1 // indirect 124 | k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715 // indirect 125 | k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 // indirect 126 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect 127 | sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect 128 | sigs.k8s.io/yaml v1.3.0 // indirect 129 | ) 130 | -------------------------------------------------------------------------------- /pkg/sinks/bigquery.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | import ( 4 | "bufio" 5 | "cloud.google.com/go/bigquery" 6 | "context" 7 | "encoding/json" 8 | "errors" 9 | "fmt" 10 | "github.com/resmoio/kubernetes-event-exporter/pkg/batch" 11 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 12 | "github.com/rs/zerolog/log" 13 | "google.golang.org/api/option" 14 | "math/rand" 15 | "os" 16 | "time" 17 | "unicode" 18 | ) 19 | 20 | // Returns a map filtering out keys that have nil value assigned. 21 | func bigQueryDropNils(x map[string]interface{}) map[string]interface{} { 22 | y := make(map[string]interface{}) 23 | for key, value := range x { 24 | if value != nil { 25 | if mapValue, ok := value.(map[string]interface{}); ok { 26 | y[key] = bigQueryDropNils(mapValue) 27 | } else { 28 | y[key] = value 29 | } 30 | } 31 | } 32 | return y 33 | } 34 | 35 | // Returns a string representing a fixed key. BigQuery expects keys to be valid identifiers, so if they aren't we modify them. 36 | func bigQuerySanitizeKey(key string) string { 37 | var fixedKey string 38 | if !unicode.IsLetter(rune(key[0])) { 39 | fixedKey = "_" 40 | } 41 | for _, ch := range key { 42 | if unicode.IsLetter(ch) || unicode.IsDigit(ch) { 43 | fixedKey = fixedKey + string(ch) 44 | } else { 45 | fixedKey = fixedKey + "_" 46 | } 47 | } 48 | return fixedKey 49 | } 50 | 51 | // Returns a map copy with fixed keys. 52 | func bigQuerySanitizeKeys(x map[string]interface{}) map[string]interface{} { 53 | y := make(map[string]interface{}) 54 | for key, value := range x { 55 | if mapValue, ok := value.(map[string]interface{}); ok { 56 | y[bigQuerySanitizeKey(key)] = bigQuerySanitizeKeys(mapValue) 57 | } else { 58 | y[bigQuerySanitizeKey(key)] = value 59 | } 60 | } 61 | return y 62 | } 63 | 64 | func bigQueryWriteBatchToJsonFile(items []interface{}, path string) error { 65 | file, err := os.Create(path) 66 | if err != nil { 67 | return err 68 | } 69 | defer file.Close() 70 | 71 | writer := bufio.NewWriter(file) 72 | for i := 0; i < len(items); i++ { 73 | event := items[i].(*kube.EnhancedEvent) 74 | var mapStruct map[string]interface{} 75 | json.Unmarshal(event.ToJSON(), &mapStruct) 76 | jsonBytes, _ := json.Marshal(bigQuerySanitizeKeys(bigQueryDropNils(mapStruct))) 77 | fmt.Fprintln(writer, string(jsonBytes)) 78 | } 79 | return writer.Flush() 80 | } 81 | 82 | func bigQueryCreateDataset(cfg *BigQueryConfig) error { 83 | ctx := context.Background() 84 | 85 | client, err := bigquery.NewClient(ctx, cfg.Project, option.WithCredentialsFile(cfg.CredentialsPath)) 86 | if err != nil { 87 | return fmt.Errorf("bigquery.NewClient: %v", err) 88 | } 89 | defer client.Close() 90 | 91 | meta := &bigquery.DatasetMetadata{Location: cfg.Location} 92 | if err := client.Dataset(cfg.Dataset).Create(ctx, meta); err != nil { 93 | return err 94 | } 95 | return nil 96 | } 97 | 98 | func bigQueryImportJsonFromFile(path string, cfg *BigQueryConfig) error { 99 | ctx := context.Background() 100 | client, err := bigquery.NewClient(ctx, cfg.Project, option.WithCredentialsFile(cfg.CredentialsPath)) 101 | if err != nil { 102 | return fmt.Errorf("bigquery.NewClient: %v", err) 103 | } 104 | defer client.Close() 105 | 106 | f, err := os.Open(path) 107 | if err != nil { 108 | return err 109 | } 110 | defer f.Close() 111 | 112 | fi, err := f.Stat() 113 | if err != nil { 114 | return err 115 | } 116 | 117 | source := bigquery.NewReaderSource(f) 118 | source.SourceFormat = bigquery.JSON 119 | source.AutoDetect = true 120 | 121 | loader := client.Dataset(cfg.Dataset).Table(cfg.Table).LoaderFrom(source) 122 | loader.SchemaUpdateOptions = []string{"ALLOW_FIELD_ADDITION"} 123 | 124 | log.Info().Msgf("BigQuery batch uploading %.3f KBs...", float64(fi.Size())/1e3) 125 | job, err := loader.Run(ctx) 126 | if err != nil { 127 | return err 128 | } 129 | status, err := job.Wait(ctx) 130 | if err != nil { 131 | return err 132 | } 133 | log.Info().Msgf("BigQuery batch uploading done.") 134 | if err := status.Err(); err != nil { 135 | return err 136 | } 137 | return nil 138 | } 139 | 140 | type BigQueryConfig struct { 141 | // BigQuery table config 142 | Location string `yaml:"location"` 143 | Project string `yaml:"project"` 144 | Dataset string `yaml:"dataset"` 145 | Table string `yaml:"table"` 146 | 147 | // Path to a JSON file that contains your service account key. 148 | CredentialsPath string `yaml:"credentials_path"` 149 | 150 | // Batching config 151 | BatchSize int `yaml:"batch_size"` 152 | MaxRetries int `yaml:"max_retries"` 153 | IntervalSeconds int `yaml:"interval_seconds"` 154 | TimeoutSeconds int `yaml:"timeout_seconds"` 155 | } 156 | 157 | func NewBigQuerySink(cfg *BigQueryConfig) (*BigQuerySink, error) { 158 | if cfg.Location == "" { 159 | cfg.Location = "US" 160 | } 161 | if cfg.Project == "" { 162 | return nil, errors.New("bigquery.project config option must be non-empty") 163 | } 164 | if cfg.Dataset == "" { 165 | return nil, errors.New("bigquery.dataset config option must be non-empty") 166 | } 167 | if cfg.Table == "" { 168 | return nil, errors.New("bigquery.table config option must be non-empty") 169 | } 170 | 171 | if cfg.BatchSize == 0 { 172 | cfg.BatchSize = 1000 173 | } 174 | if cfg.MaxRetries == 0 { 175 | cfg.MaxRetries = 3 176 | } 177 | if cfg.IntervalSeconds == 0 { 178 | cfg.IntervalSeconds = 10 179 | } 180 | if cfg.TimeoutSeconds == 0 { 181 | cfg.TimeoutSeconds = 60 182 | } 183 | 184 | rand.Seed(time.Now().UTC().UnixNano()) 185 | handleBatch := func(ctx context.Context, items []interface{}) []bool { 186 | res := make([]bool, len(items)) 187 | for i := 0; i < len(items); i++ { 188 | res[i] = true 189 | } 190 | path := fmt.Sprintf("/tmp/bq_batch-%d-%04x.json", time.Now().UTC().Unix(), rand.Uint64()%65535) 191 | if err := bigQueryWriteBatchToJsonFile(items, path); err != nil { 192 | log.Error().Msgf("Failed to write JSON file: %v", err) 193 | } 194 | if err := bigQueryImportJsonFromFile(path, cfg); err != nil { 195 | log.Error().Msgf("BigQuerySink load failed: %v", err) 196 | } else { 197 | // The batch file is intentionally not deleted in case of failure allowing to manually uplaod it later and debug issues. 198 | if err := os.Remove(path); err != nil { 199 | log.Error().Msgf("Failed to delete file %v: %v", path, err) 200 | } 201 | } 202 | return res 203 | } 204 | 205 | if err := bigQueryCreateDataset(cfg); err != nil { 206 | log.Error().Msgf("BigQuerySink create dataset failed: %v", err) 207 | } 208 | 209 | batchWriter := batch.NewWriter( 210 | batch.WriterConfig{ 211 | BatchSize: cfg.BatchSize, 212 | MaxRetries: cfg.MaxRetries, 213 | Interval: time.Duration(cfg.IntervalSeconds) * time.Second, 214 | Timeout: time.Duration(cfg.TimeoutSeconds) * time.Second, 215 | }, 216 | handleBatch, 217 | ) 218 | batchWriter.Start() 219 | 220 | return &BigQuerySink{batchWriter: batchWriter}, nil 221 | } 222 | 223 | type BigQuerySink struct { 224 | batchWriter *batch.Writer 225 | } 226 | 227 | func (e *BigQuerySink) Send(ctx context.Context, ev *kube.EnhancedEvent) error { 228 | e.batchWriter.Submit(ev) 229 | return nil 230 | } 231 | 232 | func (e *BigQuerySink) Close() { 233 | e.batchWriter.Stop() 234 | } 235 | -------------------------------------------------------------------------------- /pkg/sinks/kafka.go: -------------------------------------------------------------------------------- 1 | package sinks 2 | 3 | import ( 4 | "context" 5 | "crypto/sha256" 6 | "crypto/sha512" 7 | "crypto/tls" 8 | "crypto/x509" 9 | "encoding/json" 10 | "fmt" 11 | "os" 12 | 13 | "github.com/Shopify/sarama" 14 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 15 | "github.com/rs/zerolog/log" 16 | 17 | "github.com/xdg-go/scram" 18 | ) 19 | 20 | // KafkaConfig is the Kafka producer configuration 21 | type KafkaConfig struct { 22 | Topic string `yaml:"topic"` 23 | Brokers []string `yaml:"brokers"` 24 | Layout map[string]interface{} `yaml:"layout"` 25 | ClientId string `yaml:"clientId"` 26 | CompressionCodec string `yaml:"compressionCodec" default:"none"` 27 | Version string `yaml:"version"` 28 | TLS struct { 29 | Enable bool `yaml:"enable"` 30 | CaFile string `yaml:"caFile"` 31 | CertFile string `yaml:"certFile"` 32 | KeyFile string `yaml:"keyFile"` 33 | InsecureSkipVerify bool `yaml:"insecureSkipVerify"` 34 | } `yaml:"tls"` 35 | SASL struct { 36 | Enable bool `yaml:"enable"` 37 | Username string `yaml:"username"` 38 | Password string `yaml:"password"` 39 | Mechanism string `yaml:"mechanism" default:"plain"` 40 | } `yaml:"sasl"` 41 | KafkaEncode Avro `yaml:"avro"` 42 | } 43 | 44 | // KafkaEncoder is an interface type for adding an 45 | // encoder to the kafka data pipeline 46 | type KafkaEncoder interface { 47 | encode([]byte) ([]byte, error) 48 | } 49 | 50 | // KafkaSink is a sink that sends events to a Kafka topic 51 | type KafkaSink struct { 52 | producer sarama.SyncProducer 53 | cfg *KafkaConfig 54 | encoder KafkaEncoder 55 | } 56 | 57 | var CompressionCodecs = map[string]sarama.CompressionCodec{ 58 | "none": sarama.CompressionNone, 59 | "snappy": sarama.CompressionSnappy, 60 | "gzip": sarama.CompressionGZIP, 61 | "lz4": sarama.CompressionLZ4, 62 | "zstd": sarama.CompressionZSTD, 63 | } 64 | 65 | func NewKafkaSink(cfg *KafkaConfig) (Sink, error) { 66 | var avro KafkaEncoder 67 | producer, err := createSaramaProducer(cfg) 68 | if err != nil { 69 | return nil, err 70 | } 71 | 72 | log.Info().Msgf("kafka: Producer initialized for topic: %s, brokers: %s", cfg.Topic, cfg.Brokers) 73 | if len(cfg.KafkaEncode.SchemaID) > 0 { 74 | var err error 75 | avro, err = NewAvroEncoder(cfg.KafkaEncode.SchemaID, cfg.KafkaEncode.Schema) 76 | if err != nil { 77 | return nil, err 78 | } 79 | log.Info().Msgf("kafka: Producer using avro encoding with schemaid: %s", cfg.KafkaEncode.SchemaID) 80 | } 81 | 82 | return &KafkaSink{ 83 | producer: producer, 84 | cfg: cfg, 85 | encoder: avro, 86 | }, nil 87 | } 88 | 89 | // Send an event to Kafka synchronously 90 | func (k *KafkaSink) Send(ctx context.Context, ev *kube.EnhancedEvent) error { 91 | var toSend []byte 92 | 93 | if k.cfg.Layout != nil { 94 | res, err := convertLayoutTemplate(k.cfg.Layout, ev) 95 | if err != nil { 96 | return err 97 | } 98 | 99 | toSend, err = json.Marshal(res) 100 | if err != nil { 101 | return err 102 | } 103 | } else if len(k.cfg.KafkaEncode.SchemaID) > 0 { 104 | var err error 105 | toSend, err = k.encoder.encode(ev.ToJSON()) 106 | if err != nil { 107 | return err 108 | } 109 | } else { 110 | toSend = ev.ToJSON() 111 | } 112 | 113 | _, _, err := k.producer.SendMessage(&sarama.ProducerMessage{ 114 | Topic: k.cfg.Topic, 115 | Key: sarama.StringEncoder(string(ev.UID)), 116 | Value: sarama.ByteEncoder(toSend), 117 | }) 118 | 119 | return err 120 | } 121 | 122 | // Close the Kafka producer 123 | func (k *KafkaSink) Close() { 124 | log.Info().Msgf("kafka: Closing producer...") 125 | 126 | if err := k.producer.Close(); err != nil { 127 | log.Error().Err(err).Msg("Failed to shut down the Kafka producer cleanly") 128 | } else { 129 | log.Info().Msg("kafka: Closed producer") 130 | } 131 | } 132 | 133 | func createSaramaProducer(cfg *KafkaConfig) (sarama.SyncProducer, error) { 134 | // Default Sarama config 135 | saramaConfig := sarama.NewConfig() 136 | if cfg.Version != "" { 137 | version, err := sarama.ParseKafkaVersion(cfg.Version) 138 | if err != nil { 139 | return nil, err 140 | } 141 | saramaConfig.Version = version 142 | } else { 143 | saramaConfig.Version = sarama.MaxVersion 144 | } 145 | saramaConfig.Metadata.Full = true 146 | saramaConfig.ClientID = cfg.ClientId 147 | 148 | // Necessary for SyncProducer 149 | saramaConfig.Producer.Return.Successes = true 150 | saramaConfig.Producer.Return.Errors = true 151 | if _, ok := CompressionCodecs[cfg.CompressionCodec]; ok { 152 | saramaConfig.Producer.Compression = CompressionCodecs[cfg.CompressionCodec] 153 | } 154 | 155 | // TLS Client auth override 156 | if cfg.TLS.Enable { 157 | 158 | caCert, err := os.ReadFile(cfg.TLS.CaFile) 159 | if err != nil { 160 | return nil, fmt.Errorf("error loading ca file: %w", err) 161 | } 162 | 163 | caCertPool := x509.NewCertPool() 164 | caCertPool.AppendCertsFromPEM(caCert) 165 | 166 | saramaConfig.Net.TLS.Enable = true 167 | saramaConfig.Net.TLS.Config = &tls.Config{ 168 | RootCAs: caCertPool, 169 | InsecureSkipVerify: cfg.TLS.InsecureSkipVerify, 170 | } 171 | 172 | if cfg.TLS.CertFile != "" && cfg.TLS.KeyFile != "" { 173 | cert, err := tls.LoadX509KeyPair(cfg.TLS.CertFile, cfg.TLS.KeyFile) 174 | if err != nil { 175 | return nil, err 176 | } 177 | 178 | saramaConfig.Net.TLS.Config.Certificates = []tls.Certificate{cert} 179 | } 180 | } 181 | 182 | // SASL Client auth 183 | if cfg.SASL.Enable { 184 | saramaConfig.Net.SASL.Enable = true 185 | saramaConfig.Net.SASL.User = cfg.SASL.Username 186 | saramaConfig.Net.SASL.Password = cfg.SASL.Password 187 | if cfg.SASL.Mechanism == "sha512" { 188 | saramaConfig.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA512} } 189 | saramaConfig.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA512 190 | } else if cfg.SASL.Mechanism == "sha256" { 191 | saramaConfig.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA256} } 192 | saramaConfig.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA256 193 | } else if cfg.SASL.Mechanism == "plain" || cfg.SASL.Mechanism == "" { 194 | saramaConfig.Net.SASL.Mechanism = sarama.SASLTypePlaintext 195 | } else { 196 | return nil, fmt.Errorf("invalid scram sha mechanism: %s: can be one of 'sha256', 'sha512' or 'plain'", cfg.SASL.Mechanism) 197 | } 198 | } 199 | 200 | // TODO: Find a generic way to override all other configs 201 | 202 | // Build producer 203 | producer, err := sarama.NewSyncProducer(cfg.Brokers, saramaConfig) 204 | if err != nil { 205 | return nil, err 206 | } 207 | 208 | return producer, nil 209 | } 210 | 211 | var ( 212 | SHA256 scram.HashGeneratorFcn = sha256.New 213 | SHA512 scram.HashGeneratorFcn = sha512.New 214 | ) 215 | 216 | type XDGSCRAMClient struct { 217 | *scram.Client 218 | *scram.ClientConversation 219 | scram.HashGeneratorFcn 220 | } 221 | 222 | func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { 223 | x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID) 224 | if err != nil { 225 | return err 226 | } 227 | x.ClientConversation = x.Client.NewConversation() 228 | return nil 229 | } 230 | 231 | func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { 232 | response, err = x.ClientConversation.Step(challenge) 233 | return 234 | } 235 | 236 | func (x *XDGSCRAMClient) Done() bool { 237 | return x.ClientConversation.Done() 238 | } 239 | -------------------------------------------------------------------------------- /pkg/exporter/rule_test.go: -------------------------------------------------------------------------------- 1 | package exporter 2 | 3 | import ( 4 | "github.com/resmoio/kubernetes-event-exporter/pkg/kube" 5 | "github.com/stretchr/testify/assert" 6 | "testing" 7 | ) 8 | 9 | func TestEmptyRule(t *testing.T) { 10 | ev := &kube.EnhancedEvent{} 11 | var r Rule 12 | 13 | assert.True(t, r.MatchesEvent(ev)) 14 | } 15 | 16 | func TestBasicRule(t *testing.T) { 17 | ev := &kube.EnhancedEvent{} 18 | ev.Namespace = "kube-system" 19 | r := Rule{ 20 | Namespace: "kube-system", 21 | } 22 | 23 | assert.True(t, r.MatchesEvent(ev)) 24 | } 25 | 26 | func TestBasicNoMatchRule(t *testing.T) { 27 | ev := &kube.EnhancedEvent{} 28 | ev.Namespace = "default" 29 | r := Rule{ 30 | Namespace: "kube-system", 31 | } 32 | 33 | assert.False(t, r.MatchesEvent(ev)) 34 | } 35 | 36 | func TestBasicRegexRule(t *testing.T) { 37 | ev1 := &kube.EnhancedEvent{} 38 | ev1.Namespace = "kube-system" 39 | 40 | ev2 := &kube.EnhancedEvent{} 41 | ev2.Namespace = "kube-public" 42 | 43 | ev3 := &kube.EnhancedEvent{} 44 | ev3.Namespace = "default" 45 | 46 | r := Rule{ 47 | Namespace: "kube-*", 48 | } 49 | 50 | assert.True(t, r.MatchesEvent(ev1)) 51 | assert.True(t, r.MatchesEvent(ev2)) 52 | assert.False(t, r.MatchesEvent(ev3)) 53 | } 54 | 55 | func TestLabelRegexRule(t *testing.T) { 56 | ev := &kube.EnhancedEvent{} 57 | ev.InvolvedObject.Labels = map[string]string{ 58 | "version": "alpha-123", 59 | } 60 | 61 | r := Rule{ 62 | Labels: map[string]string{ 63 | "version": "alpha", 64 | }, 65 | } 66 | 67 | assert.True(t, r.MatchesEvent(ev)) 68 | } 69 | 70 | func TestOneLabelMatchesRule(t *testing.T) { 71 | ev := &kube.EnhancedEvent{} 72 | ev.InvolvedObject.Labels = map[string]string{ 73 | "env": "prod", 74 | } 75 | 76 | r := Rule{ 77 | Labels: map[string]string{ 78 | "env": "prod", 79 | }, 80 | } 81 | 82 | assert.True(t, r.MatchesEvent(ev)) 83 | } 84 | 85 | func TestOneLabelDoesNotMatchRule(t *testing.T) { 86 | ev := &kube.EnhancedEvent{} 87 | ev.InvolvedObject.Labels = map[string]string{ 88 | "env": "lab", 89 | } 90 | 91 | r := Rule{ 92 | Labels: map[string]string{ 93 | "env": "prod", 94 | }, 95 | } 96 | 97 | assert.False(t, r.MatchesEvent(ev)) 98 | } 99 | 100 | func TestTwoLabelMatchesRule(t *testing.T) { 101 | ev := &kube.EnhancedEvent{} 102 | ev.InvolvedObject.Labels = map[string]string{ 103 | "env": "prod", 104 | "version": "beta", 105 | } 106 | 107 | r := Rule{ 108 | Labels: map[string]string{ 109 | "env": "prod", 110 | "version": "beta", 111 | }, 112 | } 113 | 114 | assert.True(t, r.MatchesEvent(ev)) 115 | } 116 | 117 | func TestTwoLabelRequiredRule(t *testing.T) { 118 | ev := &kube.EnhancedEvent{} 119 | ev.InvolvedObject.Labels = map[string]string{ 120 | "env": "prod", 121 | "version": "alpha", 122 | } 123 | 124 | r := Rule{ 125 | Labels: map[string]string{ 126 | "env": "prod", 127 | "version": "beta", 128 | }, 129 | } 130 | 131 | assert.False(t, r.MatchesEvent(ev)) 132 | } 133 | 134 | func TestTwoLabelRequiredOneMissingRule(t *testing.T) { 135 | ev := &kube.EnhancedEvent{} 136 | ev.InvolvedObject.Labels = map[string]string{ 137 | "age": "very-old", 138 | "version": "beta", 139 | } 140 | 141 | r := Rule{ 142 | Labels: map[string]string{ 143 | "env": "prod", 144 | "version": "beta", 145 | }, 146 | } 147 | 148 | assert.False(t, r.MatchesEvent(ev)) 149 | } 150 | 151 | func TestOneAnnotationMatchesRule(t *testing.T) { 152 | ev := &kube.EnhancedEvent{} 153 | ev.InvolvedObject.Annotations = map[string]string{ 154 | "name": "source", 155 | "service": "event-exporter", 156 | } 157 | 158 | r := Rule{ 159 | Annotations: map[string]string{ 160 | "name": "sou*", 161 | }, 162 | } 163 | assert.True(t, r.MatchesEvent(ev)) 164 | } 165 | 166 | func TestOneAnnotationDoesNotMatchRule(t *testing.T) { 167 | ev := &kube.EnhancedEvent{} 168 | ev.InvolvedObject.Annotations = map[string]string{ 169 | "name": "source", 170 | } 171 | 172 | r := Rule{ 173 | Annotations: map[string]string{ 174 | "name": "test*", 175 | }, 176 | } 177 | 178 | assert.False(t, r.MatchesEvent(ev)) 179 | } 180 | 181 | func TestTwoAnnotationsMatchesRule(t *testing.T) { 182 | ev := &kube.EnhancedEvent{} 183 | ev.InvolvedObject.Annotations = map[string]string{ 184 | "name": "source", 185 | "service": "event-exporter", 186 | } 187 | 188 | r := Rule{ 189 | Annotations: map[string]string{ 190 | "name": "sou.*", 191 | "service": "event*", 192 | }, 193 | } 194 | 195 | assert.True(t, r.MatchesEvent(ev)) 196 | } 197 | 198 | func TestTwoAnnotationsRequiredOneMissingRule(t *testing.T) { 199 | ev := &kube.EnhancedEvent{} 200 | ev.InvolvedObject.Annotations = map[string]string{ 201 | "service": "event-exporter", 202 | } 203 | 204 | r := Rule{ 205 | Annotations: map[string]string{ 206 | "name": "sou*", 207 | "service": "event*", 208 | }, 209 | } 210 | 211 | assert.False(t, r.MatchesEvent(ev)) 212 | } 213 | 214 | func TestComplexRuleNoMatch(t *testing.T) { 215 | ev := &kube.EnhancedEvent{} 216 | ev.InvolvedObject.Labels = map[string]string{ 217 | "env": "prod", 218 | "version": "alpha", 219 | } 220 | 221 | r := Rule{ 222 | Namespace: "kube-system", 223 | Type: "Warning", 224 | Labels: map[string]string{ 225 | "env": "prod", 226 | "version": "alpha", 227 | }, 228 | } 229 | 230 | assert.False(t, r.MatchesEvent(ev)) 231 | } 232 | 233 | func TestComplexRuleMatches(t *testing.T) { 234 | ev := &kube.EnhancedEvent{} 235 | ev.Namespace = "kube-system" 236 | ev.InvolvedObject.Kind = "Pod" 237 | ev.InvolvedObject.Labels = map[string]string{ 238 | "env": "prod", 239 | "version": "alpha", 240 | } 241 | ev.InvolvedObject.Annotations = map[string]string{ 242 | "service": "event-exporter", 243 | } 244 | 245 | r := Rule{ 246 | Namespace: "kube-system", 247 | Kind: "Pod", 248 | Labels: map[string]string{ 249 | "env": "prod", 250 | "version": "alpha", 251 | }, 252 | Annotations: map[string]string{ 253 | "service": "event*", 254 | }, 255 | } 256 | 257 | assert.True(t, r.MatchesEvent(ev)) 258 | } 259 | 260 | func TestComplexRuleAnnotationsNoMatch(t *testing.T) { 261 | ev := &kube.EnhancedEvent{} 262 | ev.Namespace = "kube-system" 263 | ev.InvolvedObject.Kind = "Pod" 264 | ev.InvolvedObject.Labels = map[string]string{ 265 | "env": "prod", 266 | "version": "alpha", 267 | } 268 | ev.InvolvedObject.Annotations = map[string]string{ 269 | "service": "event*", 270 | } 271 | 272 | r := Rule{ 273 | Namespace: "kube-system", 274 | Kind: "Pod", 275 | Labels: map[string]string{ 276 | "env": "prod", 277 | "version": "alpha", 278 | }, 279 | Annotations: map[string]string{ 280 | "name": "test*", 281 | }, 282 | } 283 | 284 | assert.False(t, r.MatchesEvent(ev)) 285 | } 286 | 287 | func TestComplexRuleMatchesRegexp(t *testing.T) { 288 | ev := &kube.EnhancedEvent{} 289 | ev.Namespace = "kube-system" 290 | ev.InvolvedObject.Kind = "Pod" 291 | ev.InvolvedObject.Labels = map[string]string{ 292 | "env": "prod", 293 | "version": "alpha", 294 | } 295 | 296 | r := Rule{ 297 | Namespace: "kube*", 298 | Kind: "Po*", 299 | Labels: map[string]string{ 300 | "env": "prod", 301 | "version": "alpha|beta", 302 | }, 303 | } 304 | 305 | assert.True(t, r.MatchesEvent(ev)) 306 | } 307 | 308 | func TestComplexRuleNoMatchRegexp(t *testing.T) { 309 | ev := &kube.EnhancedEvent{} 310 | ev.Namespace = "kube-system" 311 | ev.Type = "Pod" 312 | ev.InvolvedObject.Labels = map[string]string{ 313 | "env": "prod", 314 | "version": "alpha", 315 | } 316 | 317 | r := Rule{ 318 | Namespace: "kube*", 319 | Type: "Deployment|ReplicaSet", 320 | Labels: map[string]string{ 321 | "env": "prod", 322 | "version": "alpha|beta", 323 | }, 324 | } 325 | 326 | assert.False(t, r.MatchesEvent(ev)) 327 | } 328 | 329 | func TestMessageRegexp(t *testing.T) { 330 | ev := &kube.EnhancedEvent{} 331 | ev.Namespace = "default" 332 | ev.Type = "Pod" 333 | ev.Message = "Successfully pulled image \"nginx:latest\"" 334 | 335 | r := Rule{ 336 | Type: "Pod", 337 | Message: "pulled.*nginx.*", 338 | } 339 | 340 | assert.True(t, r.MatchesEvent(ev)) 341 | } 342 | 343 | func TestCount(t *testing.T) { 344 | ev := &kube.EnhancedEvent{} 345 | ev.Namespace = "default" 346 | ev.Type = "Pod" 347 | ev.Message = "Successfully pulled image \"nginx:latest\"" 348 | ev.Count = 5 349 | 350 | r := Rule{ 351 | Type: "Pod", 352 | Message: "pulled.*nginx.*", 353 | MinCount: 30, 354 | } 355 | 356 | assert.False(t, r.MatchesEvent(ev)) 357 | } 358 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /pkg/kube/watcher_test.go: -------------------------------------------------------------------------------- 1 | package kube 2 | 3 | import ( 4 | "bytes" 5 | "testing" 6 | "time" 7 | 8 | lru "github.com/hashicorp/golang-lru" 9 | "github.com/prometheus/client_golang/prometheus/testutil" 10 | "github.com/resmoio/kubernetes-event-exporter/pkg/metrics" 11 | "github.com/rs/zerolog/log" 12 | "github.com/stretchr/testify/assert" 13 | "github.com/stretchr/testify/require" 14 | corev1 "k8s.io/api/core/v1" 15 | "k8s.io/apimachinery/pkg/api/errors" 16 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 17 | "k8s.io/apimachinery/pkg/runtime/schema" 18 | "k8s.io/apimachinery/pkg/types" 19 | "k8s.io/client-go/dynamic" 20 | "k8s.io/client-go/kubernetes" 21 | ) 22 | 23 | type mockObjectMetadataProvider struct { 24 | cache *lru.ARCCache 25 | objDeleted bool 26 | } 27 | 28 | func newMockObjectMetadataProvider() ObjectMetadataProvider { 29 | cache, err := lru.NewARC(1024) 30 | if err != nil { 31 | panic("cannot init cache: " + err.Error()) 32 | } 33 | 34 | cache.Add("test", ObjectMetadata{ 35 | Annotations: map[string]string{"test": "test"}, 36 | Labels: map[string]string{"test": "test"}, 37 | OwnerReferences: []metav1.OwnerReference{ 38 | { 39 | APIVersion: "testAPI", 40 | Kind: "testKind", 41 | Name: "testOwner", 42 | UID: "testOwner", 43 | }, 44 | }, 45 | }) 46 | 47 | var o ObjectMetadataProvider = &mockObjectMetadataProvider{ 48 | cache: cache, 49 | objDeleted: false, 50 | } 51 | 52 | return o 53 | } 54 | 55 | func (o *mockObjectMetadataProvider) GetObjectMetadata(reference *corev1.ObjectReference, clientset *kubernetes.Clientset, dynClient dynamic.Interface, metricsStore *metrics.Store) (ObjectMetadata, error) { 56 | if o.objDeleted { 57 | return ObjectMetadata{}, errors.NewNotFound(schema.GroupResource{}, "") 58 | } 59 | 60 | val, _ := o.cache.Get("test") 61 | return val.(ObjectMetadata), nil 62 | } 63 | 64 | var _ ObjectMetadataProvider = &mockObjectMetadataProvider{} 65 | 66 | func newMockEventWatcher(MaxEventAgeSeconds int64, metricsStore *metrics.Store) *EventWatcher { 67 | watcher := &EventWatcher{ 68 | objectMetadataCache: newMockObjectMetadataProvider(), 69 | maxEventAgeSeconds: time.Second * time.Duration(MaxEventAgeSeconds), 70 | fn: func(event *EnhancedEvent) {}, 71 | metricsStore: metricsStore, 72 | } 73 | return watcher 74 | } 75 | 76 | func TestEventWatcher_EventAge_whenEventCreatedBeforeStartup(t *testing.T) { 77 | // should not discard events as old as 300s=5m 78 | var MaxEventAgeSeconds int64 = 300 79 | metricsStore := metrics.NewMetricsStore("test_") 80 | ew := newMockEventWatcher(MaxEventAgeSeconds, metricsStore) 81 | output := &bytes.Buffer{} 82 | log.Logger = log.Logger.Output(output) 83 | 84 | // event is 3m before stratup time -> expect silently dropped 85 | startup := time.Now().Add(-10 * time.Minute) 86 | ew.setStartUpTime(startup) 87 | event1 := corev1.Event{ 88 | LastTimestamp: metav1.Time{Time: startup.Add(-3 * time.Minute)}, 89 | } 90 | 91 | // event is 3m before stratup time -> expect silently dropped 92 | assert.True(t, ew.isEventDiscarded(&event1)) 93 | assert.NotContains(t, output.String(), "Event discarded as being older then maxEventAgeSeconds") 94 | ew.onEvent(&event1) 95 | assert.NotContains(t, output.String(), "Received event") 96 | assert.Equal(t, float64(0), testutil.ToFloat64(metricsStore.EventsProcessed)) 97 | 98 | event2 := corev1.Event{ 99 | EventTime: metav1.MicroTime{Time: startup.Add(-3 * time.Minute)}, 100 | } 101 | 102 | assert.True(t, ew.isEventDiscarded(&event2)) 103 | assert.NotContains(t, output.String(), "Event discarded as being older then maxEventAgeSeconds") 104 | ew.onEvent(&event2) 105 | assert.NotContains(t, output.String(), "Received event") 106 | assert.Equal(t, float64(0), testutil.ToFloat64(metricsStore.EventsProcessed)) 107 | 108 | // event is 3m before stratup time -> expect silently dropped 109 | event3 := corev1.Event{ 110 | LastTimestamp: metav1.Time{Time: startup.Add(-3 * time.Minute)}, 111 | EventTime: metav1.MicroTime{Time: startup.Add(-3 * time.Minute)}, 112 | } 113 | 114 | assert.True(t, ew.isEventDiscarded(&event3)) 115 | assert.NotContains(t, output.String(), "Event discarded as being older then maxEventAgeSeconds") 116 | ew.onEvent(&event3) 117 | assert.NotContains(t, output.String(), "Received event") 118 | assert.Equal(t, float64(0), testutil.ToFloat64(metricsStore.EventsProcessed)) 119 | 120 | metrics.DestroyMetricsStore(metricsStore) 121 | } 122 | 123 | func TestEventWatcher_EventAge_whenEventCreatedAfterStartupAndBeforeMaxAge(t *testing.T) { 124 | // should not discard events as old as 300s=5m 125 | var MaxEventAgeSeconds int64 = 300 126 | metricsStore := metrics.NewMetricsStore("test_") 127 | ew := newMockEventWatcher(MaxEventAgeSeconds, metricsStore) 128 | output := &bytes.Buffer{} 129 | log.Logger = log.Logger.Output(output) 130 | 131 | startup := time.Now().Add(-10 * time.Minute) 132 | ew.setStartUpTime(startup) 133 | // event is 8m after stratup time (2m in max age) -> expect processed 134 | event1 := corev1.Event{ 135 | InvolvedObject: corev1.ObjectReference{ 136 | UID: "test", 137 | Name: "test-1", 138 | }, 139 | LastTimestamp: metav1.Time{Time: startup.Add(8 * time.Minute)}, 140 | } 141 | 142 | assert.False(t, ew.isEventDiscarded(&event1)) 143 | assert.NotContains(t, output.String(), "Event discarded as being older then maxEventAgeSeconds") 144 | ew.onEvent(&event1) 145 | assert.Contains(t, output.String(), "test-1") 146 | assert.Contains(t, output.String(), "Received event") 147 | assert.Equal(t, float64(1), testutil.ToFloat64(metricsStore.EventsProcessed)) 148 | 149 | // event is 8m after stratup time (2m in max age) -> expect processed 150 | event2 := corev1.Event{ 151 | InvolvedObject: corev1.ObjectReference{ 152 | UID: "test", 153 | Name: "test-2", 154 | }, 155 | EventTime: metav1.MicroTime{Time: startup.Add(8 * time.Minute)}, 156 | } 157 | 158 | assert.False(t, ew.isEventDiscarded(&event2)) 159 | assert.NotContains(t, output.String(), "Event discarded as being older then maxEventAgeSeconds") 160 | ew.onEvent(&event2) 161 | assert.Contains(t, output.String(), "test-2") 162 | assert.Contains(t, output.String(), "Received event") 163 | assert.Equal(t, float64(2), testutil.ToFloat64(metricsStore.EventsProcessed)) 164 | 165 | // event is 8m after stratup time (2m in max age) -> expect processed 166 | event3 := corev1.Event{ 167 | InvolvedObject: corev1.ObjectReference{ 168 | UID: "test", 169 | Name: "test-3", 170 | }, 171 | LastTimestamp: metav1.Time{Time: startup.Add(8 * time.Minute)}, 172 | EventTime: metav1.MicroTime{Time: startup.Add(8 * time.Minute)}, 173 | } 174 | 175 | assert.False(t, ew.isEventDiscarded(&event3)) 176 | assert.NotContains(t, output.String(), "Event discarded as being older then maxEventAgeSeconds") 177 | ew.onEvent(&event3) 178 | assert.Contains(t, output.String(), "test-3") 179 | assert.Contains(t, output.String(), "Received event") 180 | assert.Equal(t, float64(3), testutil.ToFloat64(metricsStore.EventsProcessed)) 181 | 182 | metrics.DestroyMetricsStore(metricsStore) 183 | } 184 | 185 | func TestEventWatcher_EventAge_whenEventCreatedAfterStartupAndAfterMaxAge(t *testing.T) { 186 | // should not discard events as old as 300s=5m 187 | var MaxEventAgeSeconds int64 = 300 188 | metricsStore := metrics.NewMetricsStore("test_") 189 | ew := newMockEventWatcher(MaxEventAgeSeconds, metricsStore) 190 | output := &bytes.Buffer{} 191 | log.Logger = log.Logger.Output(output) 192 | 193 | // event is 3m after stratup time (and 2m after max age) -> expect dropped with warn 194 | startup := time.Now().Add(-10 * time.Minute) 195 | ew.setStartUpTime(startup) 196 | event1 := corev1.Event{ 197 | ObjectMeta: metav1.ObjectMeta{Name: "event1"}, 198 | LastTimestamp: metav1.Time{Time: startup.Add(3 * time.Minute)}, 199 | } 200 | assert.True(t, ew.isEventDiscarded(&event1)) 201 | assert.Contains(t, output.String(), "event1") 202 | assert.Contains(t, output.String(), "Event discarded as being older then maxEventAgeSeconds") 203 | ew.onEvent(&event1) 204 | assert.NotContains(t, output.String(), "Received event") 205 | assert.Equal(t, float64(0), testutil.ToFloat64(metricsStore.EventsProcessed)) 206 | 207 | // event is 3m after stratup time (and 2m after max age) -> expect dropped with warn 208 | event2 := corev1.Event{ 209 | ObjectMeta: metav1.ObjectMeta{Name: "event2"}, 210 | EventTime: metav1.MicroTime{Time: startup.Add(3 * time.Minute)}, 211 | } 212 | 213 | assert.True(t, ew.isEventDiscarded(&event2)) 214 | assert.Contains(t, output.String(), "event2") 215 | assert.Contains(t, output.String(), "Event discarded as being older then maxEventAgeSeconds") 216 | ew.onEvent(&event2) 217 | assert.NotContains(t, output.String(), "Received event") 218 | assert.Equal(t, float64(0), testutil.ToFloat64(metricsStore.EventsProcessed)) 219 | 220 | // event is 3m after stratup time (and 2m after max age) -> expect dropped with warn 221 | event3 := corev1.Event{ 222 | ObjectMeta: metav1.ObjectMeta{Name: "event3"}, 223 | LastTimestamp: metav1.Time{Time: startup.Add(3 * time.Minute)}, 224 | EventTime: metav1.MicroTime{Time: startup.Add(3 * time.Minute)}, 225 | } 226 | 227 | assert.True(t, ew.isEventDiscarded(&event3)) 228 | assert.Contains(t, output.String(), "event3") 229 | assert.Contains(t, output.String(), "Event discarded as being older then maxEventAgeSeconds") 230 | ew.onEvent(&event3) 231 | assert.NotContains(t, output.String(), "Received event") 232 | assert.Equal(t, float64(0), testutil.ToFloat64(metricsStore.EventsProcessed)) 233 | 234 | metrics.DestroyMetricsStore(metricsStore) 235 | } 236 | 237 | func TestOnEvent_WithObjectMetadata(t *testing.T) { 238 | metricsStore := metrics.NewMetricsStore("test_") 239 | defer metrics.DestroyMetricsStore(metricsStore) 240 | ew := newMockEventWatcher(300, metricsStore) 241 | 242 | event := EnhancedEvent{} 243 | ew.fn = func(e *EnhancedEvent) { 244 | event = *e 245 | } 246 | 247 | startup := time.Now().Add(-10 * time.Minute) 248 | ew.setStartUpTime(startup) 249 | event1 := corev1.Event{ 250 | ObjectMeta: metav1.ObjectMeta{Name: "event1"}, 251 | LastTimestamp: metav1.Time{Time: startup.Add(8 * time.Minute)}, 252 | InvolvedObject: corev1.ObjectReference{ 253 | UID: "test", 254 | Name: "test-1", 255 | }, 256 | } 257 | ew.onEvent(&event1) 258 | 259 | require.Equal(t, types.UID("test"), event.InvolvedObject.UID) 260 | require.Equal(t, "test-1", event.InvolvedObject.Name) 261 | require.Equal(t, map[string]string{"test": "test"}, event.InvolvedObject.Annotations) 262 | require.Equal(t, map[string]string{"test": "test"}, event.InvolvedObject.Labels) 263 | require.Equal(t, []metav1.OwnerReference{ 264 | { 265 | APIVersion: "testAPI", 266 | Kind: "testKind", 267 | Name: "testOwner", 268 | UID: "testOwner", 269 | }, 270 | }, event.InvolvedObject.OwnerReferences) 271 | } 272 | 273 | func TestOnEvent_DeletedObjects(t *testing.T) { 274 | metricsStore := metrics.NewMetricsStore("test_") 275 | defer metrics.DestroyMetricsStore(metricsStore) 276 | ew := newMockEventWatcher(300, metricsStore) 277 | ew.objectMetadataCache.(*mockObjectMetadataProvider).objDeleted = true 278 | 279 | event := EnhancedEvent{} 280 | ew.fn = func(e *EnhancedEvent) { 281 | event = *e 282 | } 283 | 284 | startup := time.Now().Add(-10 * time.Minute) 285 | ew.setStartUpTime(startup) 286 | event1 := corev1.Event{ 287 | ObjectMeta: metav1.ObjectMeta{Name: "event1"}, 288 | LastTimestamp: metav1.Time{Time: startup.Add(8 * time.Minute)}, 289 | InvolvedObject: corev1.ObjectReference{ 290 | UID: "test", 291 | Name: "test-1", 292 | }, 293 | } 294 | 295 | ew.onEvent(&event1) 296 | 297 | require.Equal(t, types.UID("test"), event.InvolvedObject.UID) 298 | require.Equal(t, "test-1", event.InvolvedObject.Name) 299 | require.Equal(t, true, event.InvolvedObject.Deleted) 300 | require.Equal(t, map[string]string(nil), event.InvolvedObject.Annotations) 301 | require.Equal(t, map[string]string(nil), event.InvolvedObject.Labels) 302 | require.Equal(t, []metav1.OwnerReference(nil), event.InvolvedObject.OwnerReferences) 303 | } 304 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # kubernetes-event-exporter 2 | 3 | > **Note**: This is an active fork of [Opsgenie Kubernetes Event Exporter](https://github.com/opsgenie/kubernetes-event-exporter) 4 | since that is not maintained since November 2021. Development is sponsored by [Resmo](https://www.resmo.com). 5 | 6 | > This tool is presented at [KubeCon 2019 San Diego](https://kccncna19.sched.com/event/6aa61eca397e4ff2bdbb2845e5aebb81). 7 | 8 | This tool allows exporting the often missed Kubernetes events to various outputs so that they can be used for 9 | observability or alerting purposes. You won't believe what you are missing. 10 | 11 | ## Deployment 12 | 13 | Head on to `deploy/` folder and apply the YAMLs in the given filename order. Do not forget to modify the 14 | `deploy/01-config.yaml` file to your configuration needs. The additional information for configuration is as follows: 15 | 16 | ### Kustomize 17 | 18 | Deploy with Kustomize by Git ref (i.e., commit sha, tag, or branch). 19 | 20 | Create namespace before deployment: 21 | 22 | ```console 23 | kubectl create namespace monitoring 24 | ``` 25 | 26 | ```yaml 27 | apiVersion: kustomize.config.k8s.io/v1beta1 28 | kind: Kustomization 29 | resources: 30 | - https://github.com/resmoio/kubernetes-event-exporter?ref=master 31 | ``` 32 | 33 | ### Helm 34 | 35 | Please use [Bitnami Chart](https://github.com/bitnami/charts/tree/main/bitnami/kubernetes-event-exporter/) which is 36 | comprehensive. 37 | 38 | ## Configuration 39 | 40 | Configuration is done via a YAML file, when run in Kubernetes, ConfigMap. The tool watches all the events and 41 | user has to option to filter out some events, according to their properties. Critical events can be routed to alerting 42 | tools such as Opsgenie, or all events can be dumped to an Elasticsearch instance. You can use namespaces, labels on the 43 | related object to route some Pod related events to owners via Slack. The final routing is a tree which allows 44 | flexibility. It generally looks like following: 45 | 46 | ```yaml 47 | route: 48 | # Main route 49 | routes: 50 | # This route allows dumping all events because it has no fields to match and no drop rules. 51 | - match: 52 | - receiver: dump 53 | # This starts another route, drops all the events in *test* namespaces and Normal events 54 | # for capturing critical events 55 | - drop: 56 | - namespace: "*test*" 57 | - type: "Normal" 58 | match: 59 | - receiver: "critical-events-queue" 60 | # This a final route for user messages 61 | - match: 62 | - kind: "Pod|Deployment|ReplicaSet" 63 | labels: 64 | version: "dev" 65 | receiver: "slack" 66 | receivers: 67 | # See below for configuring the receivers 68 | ``` 69 | 70 | * A `match` rule is exclusive, all conditions must be matched to the event. 71 | * During processing a route, `drop` rules are executed first to filter out events. 72 | * The `match` rules in a route are independent of each other. If an event matches a rule, it goes down it's subtree. 73 | * If all the `match` rules are matched, the event is passed to the `receiver`. 74 | * A route can have many sub-routes, forming a tree. 75 | * Routing starts from the root route. 76 | 77 | ## Using Secrets 78 | 79 | In your config file, you can refer to environment variables as `${API_KEY}` therefore you can use ConfigMap or Secrets 80 | to keep the config file clean of secrets. 81 | 82 | ## Troubleshoot "Events Discarded" warning: 83 | 84 | - If there are `client-side throttling` warnings in the event-exporter log: 85 | Adjust the following values in configuration: 86 | ``` 87 | kubeQPS: 100 88 | kubeBurst: 500 89 | ``` 90 | > `Burst` to roughly match your events per minute 91 | > `QPS` to be 1/5 of the burst 92 | - If there is no request throttling, but events are still dropped: 93 | Consider increasing events cut off age 94 | ``` 95 | maxEventAgeSeconds: 60 96 | ``` 97 | 98 | ### Opsgenie 99 | 100 | [Opsgenie](https://www.opsgenie.com) is an alerting and on-call management tool. kubernetes-event-exporter can push to 101 | events to Opsgenie so that you can notify the on-call when something critical happens. Alerting should be precise and 102 | actionable, so you should carefully design what kind of alerts you would like in Opsgenie. A good starting point might 103 | be filtering out Normal type of events, while some additional filtering can help. Below is an example configuration. 104 | 105 | ```yaml 106 | # ... 107 | receivers: 108 | - name: "alerts" 109 | opsgenie: 110 | apiKey: xxx 111 | priority: "P3" 112 | message: "Event {{ .Reason }} for {{ .InvolvedObject.Namespace }}/{{ .InvolvedObject.Name }} on K8s cluster" 113 | alias: "{{ .UID }}" 114 | description: "
{{ toPrettyJson . }}
" 115 | tags: 116 | - "event" 117 | - "{{ .Reason }}" 118 | - "{{ .InvolvedObject.Kind }}" 119 | - "{{ .InvolvedObject.Name }}" 120 | ``` 121 | 122 | ### Webhooks/HTTP 123 | 124 | Webhooks are the easiest way of integrating this tool to external systems. It allows templating & custom headers which 125 | allows you to push events to many possible sources out there. See [Customizing Payload] for more information. 126 | 127 | ```yaml 128 | # ... 129 | receivers: 130 | - name: "alerts" 131 | webhook: 132 | endpoint: "https://my-super-secret-service.com" 133 | headers: 134 | X-API-KEY: "123" 135 | User-Agent: kube-event-exporter 1.0 136 | layout: # Optional 137 | ``` 138 | 139 | ### Elasticsearch 140 | 141 | [Elasticsearch](https://www.elastic.co/) is a full-text, distributed search engine which can also do powerful 142 | aggregations. You may decide to push all events to Elasticsearch and do some interesting queries over time to find out 143 | which images are pulled, how often pod schedules happen etc. You 144 | can [watch the presentation](https://static.sched.com/hosted_files/kccncna19/d0/Exporting%20K8s%20Events.pdf) 145 | in Kubecon to see what else you can do with aggregation and reporting. 146 | 147 | ```yaml 148 | # ... 149 | receivers: 150 | - name: "dump" 151 | elasticsearch: 152 | hosts: 153 | - http://localhost:9200 154 | index: kube-events 155 | # Ca be used optionally for time based indices, accepts Go time formatting directives 156 | indexFormat: "kube-events-{2006-01-02}" 157 | username: # optional 158 | password: # optional 159 | cloudID: # optional 160 | apiKey: # optional 161 | headers: # optional,Can be used to append the additional key/value pairs into the request headers 162 | # If set to true, it allows updating the same document in ES (might be useful handling count) 163 | useEventID: true|false 164 | # Type should be only used for clusters Version 6 and lower. 165 | # type: kube-event 166 | # If set to true, all dots in labels and annotation keys are replaced by underscores. Defaults false 167 | deDot: true|false 168 | layout: # Optional 169 | tls: # optional, advanced options for tls 170 | insecureSkipVerify: true|false # optional, if set to true, the tls cert won't be verified 171 | serverName: # optional, the domain, the certificate was issued for, in case it doesn't match the hostname used for the connection 172 | caFile: # optional, path to the CA file of the trusted authority the cert was signed with 173 | ``` 174 | ### OpenSearch 175 | 176 | [OpenSearch](https://opensearch.org/) is a community-driven, open source search and analytics suite derived from Apache 2.0 licensed Elasticsearch 7.10.2 & Kibana 7.10.2. 177 | OpenSearch enables people to easily ingest, secure, search, aggregate, view, and analyze data. These capabilities are popular for use cases such as application search, log analytics, and more. 178 | You may decide to push all events to OpenSearch and do some interesting queries over time to find out 179 | which images are pulled, how often pod schedules happen etc. 180 | 181 | ```yaml 182 | # ... 183 | receivers: 184 | - name: "dump" 185 | opensearch: 186 | hosts: 187 | - http://localhost:9200 188 | index: kube-events 189 | # Ca be used optionally for time based indices, accepts Go time formatting directives 190 | indexFormat: "kube-events-{2006-01-02}" 191 | username: # optional 192 | password: # optional 193 | # If set to true, it allows updating the same document in ES (might be useful handling count) 194 | useEventID: true|false 195 | # Type should be only used for clusters Version 6 and lower. 196 | # type: kube-event 197 | # If set to true, all dots in labels and annotation keys are replaced by underscores. Defaults false 198 | deDot: true|false 199 | layout: # Optional 200 | tls: # optional, advanced options for tls 201 | insecureSkipVerify: true|false # optional, if set to true, the tls cert won't be verified 202 | serverName: # optional, the domain, the certificate was issued for, in case it doesn't match the hostname used for the connection 203 | caFile: # optional, path to the CA file of the trusted authority the cert was signed with 204 | ``` 205 | 206 | ### Slack 207 | 208 | Slack is a cloud-based instant messaging platform where many people use it for integrations and getting notified by 209 | software such as Jira, Opsgenie, Google Calendar etc. and even some implement ChatOps on it. This tool also allows 210 | exporting events to Slack channels or direct messages to persons. If your objects in Kubernetes, such as Pods, 211 | Deployments have real owners, you can opt-in to notify them via important events by using the labels of the objects. If 212 | a Pod sandbox changes and it's restarted, or it cannot find the Docker image, you can immediately notify the owner. 213 | 214 | ```yaml 215 | # ... 216 | receivers: 217 | - name: "slack" 218 | slack: 219 | token: YOUR-API-TOKEN-HERE 220 | channel: "@{{ .InvolvedObject.Labels.owner }}" 221 | message: "{{ .Message }}" 222 | color: # optional 223 | title: # optional 224 | author_name: # optional 225 | footer: # optional 226 | fields: 227 | namespace: "{{ .Namespace }}" 228 | reason: "{{ .Reason }}" 229 | object: "{{ .Namespace }}" 230 | 231 | ``` 232 | 233 | ### Kinesis 234 | 235 | Kinesis is an AWS service allows to collect high throughput messages and allow it to be used in stream processing. 236 | 237 | ```yaml 238 | # ... 239 | receivers: 240 | - name: "kinesis" 241 | kinesis: 242 | streamName: "events-pipeline" 243 | region: us-west-2 244 | layout: # Optional 245 | ``` 246 | 247 | ### Firehose 248 | 249 | Firehose is an AWS service providing high throughput message collection for use in stream processing. 250 | 251 | ```yaml 252 | # ... 253 | receivers: 254 | - name: "firehose" 255 | firehose: 256 | deliveryStreamName: "events-pipeline" 257 | region: us-west-2 258 | layout: # Optional 259 | ``` 260 | ### SNS 261 | 262 | SNS is an AWS service for highly durable pub/sub messaging system. 263 | 264 | ```yaml 265 | # ... 266 | receivers: 267 | - name: "sns" 268 | sns: 269 | topicARN: "arn:aws:sns:us-east-1:1234567890123456:mytopic" 270 | region: "us-west-2" 271 | layout: # Optional 272 | ``` 273 | 274 | ### SQS 275 | 276 | SQS is an AWS service for message queuing that allows high throughput messaging. 277 | 278 | ```yaml 279 | # ... 280 | receivers: 281 | - name: "sqs" 282 | sqs: 283 | queueName: "/tmp/dump" 284 | region: us-west-2 285 | layout: # Optional 286 | ``` 287 | 288 | ### File 289 | 290 | For some debugging purposes, you might want to push the events to files. Or you can already have a logging tool that can 291 | ingest these files and it might be a good idea to just use plain old school files as an integration point. 292 | 293 | ```yaml 294 | # ... 295 | receivers: 296 | - name: "file" 297 | file: 298 | path: "/tmp/dump" 299 | layout: # Optional 300 | ``` 301 | 302 | ### Stdout 303 | 304 | Standard out is also another file in Linux. `logLevel` refers to the application logging severity - available levels 305 | `trace`, `debug`, `info`, `warn`, `error`, `fatal` and `panic`. When not specified, default level is set to `info`. You 306 | can use the following configuration as an example. 307 | 308 | ```yaml 309 | logLevel: error 310 | logFormat: json 311 | maxEventAgeSeconds: 5 312 | route: 313 | routes: 314 | - match: 315 | - receiver: "dump" 316 | receivers: 317 | - name: "dump" 318 | stdout: 319 | deDot: true|false 320 | ``` 321 | 322 | ### Kafka 323 | 324 | Kafka is a popular tool used for real-time data pipelines. You can combine it with other tools for further analysis. 325 | 326 | ```yaml 327 | receivers: 328 | - name: "kafka" 329 | kafka: 330 | clientId: "kubernetes" 331 | topic: "kube-event" 332 | brokers: 333 | - "localhost:9092" 334 | compressionCodec: "snappy" 335 | tls: 336 | enable: true 337 | certFile: "kafka-client.crt" 338 | keyFile: "kafka-client.key" 339 | caFile: "kafka-ca.crt" 340 | sasl: 341 | enable: true 342 | username: "kube-event-producer" 343 | password: "kube-event-producer-password" 344 | mechanism: "sha512" 345 | layout: #optional 346 | kind: "{{ .InvolvedObject.Kind }}" 347 | namespace: "{{ .InvolvedObject.Namespace }}" 348 | name: "{{ .InvolvedObject.Name }}" 349 | reason: "{{ .Reason }}" 350 | message: "{{ .Message }}" 351 | type: "{{ .Type }}" 352 | createdAt: "{{ .GetTimestampISO8601 }}" 353 | ``` 354 | 355 | ### OpsCenter 356 | 357 | [OpsCenter](https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html) provides a central location 358 | where operations engineers and IT professionals can view, investigate, and resolve operational work items (OpsItems) 359 | related to AWS resources. OpsCenter is designed to reduce mean time to resolution for issues impacting AWS resources. 360 | This Systems Manager capability aggregates and standardizes OpsItems across services while providing contextual 361 | investigation data about each OpsItem, related OpsItems, and related resources. OpsCenter also provides Systems Manager 362 | Automation documents (runbooks) that you can use to quickly resolve issues. You can specify searchable, custom data for 363 | each OpsItem. You can also view automatically-generated summary reports about OpsItems by status and source. 364 | 365 | ```yaml 366 | # ... 367 | receivers: 368 | - name: "alerts" 369 | opscenter: 370 | title: "{{ .Message }}", 371 | category: "{{ .Reason }}", # Optional 372 | description: "Event {{ .Reason }} for {{ .InvolvedObject.Namespace }}/{{ .InvolvedObject.Name }} on K8s cluster", 373 | notifications: # Optional: SNS ARN 374 | - "sns1" 375 | - "sns2" 376 | operationalData: # Optional 377 | - Reason: ""{ { .Reason } }"}" 378 | priority: "6", # Optional 379 | region: "us-east1", 380 | relatedOpsItems: # Optional: OpsItems ARN 381 | - "ops1" 382 | - "ops2" 383 | severity: "6" # Optional 384 | source: "production" 385 | tags: # Optional 386 | - ENV: "{{ .InvolvedObject.Namespace }}" 387 | ``` 388 | 389 | ### Customizing Payload 390 | 391 | Some receivers allow customizing the payload. This can be useful to integrate it to external systems that require the 392 | data be in some format. It is designed to reduce the need for code writing. It allows mapping an event using Go 393 | templates, with [sprig](github.com/Masterminds/sprig) library additions. It supports a recursive map definition, so that 394 | you can create virtually any kind of JSON to be pushed to a webhook, a Kinesis stream, SQS queue etc. 395 | 396 | ```yaml 397 | # ... 398 | receivers: 399 | - name: pipe 400 | kinesis: 401 | region: us-west-2 402 | streamName: event-pipeline 403 | layout: 404 | region: "us-west-2" 405 | eventType: "kubernetes-event" 406 | createdAt: "{{ .GetTimestampMs }}" 407 | details: 408 | message: "{{ .Message }}" 409 | reason: "{{ .Reason }}" 410 | type: "{{ .Type }}" 411 | count: "{{ .Count }}" 412 | kind: "{{ .InvolvedObject.Kind }}" 413 | name: "{{ .InvolvedObject.Name }}" 414 | namespace: "{{ .Namespace }}" 415 | component: "{{ .Source.Component }}" 416 | host: "{{ .Source.Host }}" 417 | labels: "{{ toJson .InvolvedObject.Labels}}" 418 | ``` 419 | 420 | ### Pubsub 421 | 422 | Pub/Sub is a fully-managed real-time messaging service that allows you to send and receive messages between independent 423 | applications. 424 | 425 | ```yaml 426 | receivers: 427 | - name: "pubsub" 428 | pubsub: 429 | gcloud_project_id: "my-project" 430 | topic: "kube-event" 431 | create_topic: False 432 | ``` 433 | 434 | ### Teams 435 | 436 | Microsoft Teams is your hub for teamwork in Office 365. All your team conversations, files, meetings, and apps live 437 | together in a single shared workspace, and you can take it with you on your favorite mobile device. 438 | 439 | ```yaml 440 | # ... 441 | receivers: 442 | - name: "ms_teams" 443 | teams: 444 | endpoint: "https://outlook.office.com/webhook/..." 445 | layout: # Optional 446 | ``` 447 | 448 | ### Syslog 449 | 450 | Syslog sink support enables to write k8s-events to syslog daemon server over tcp/udp. This can also be consumed by 451 | rsyslog. 452 | 453 | ```yaml 454 | # ... 455 | receivers: 456 | - name: "syslog" 457 | syslog: 458 | network: "tcp" 459 | address: "127.0.0.1:11514" 460 | tag: "k8s.event" 461 | 462 | ``` 463 | 464 | # BigQuery 465 | 466 | Google's query thing 467 | 468 | ```yaml 469 | receivers: 470 | - name: "my-big-query" 471 | bigquery: 472 | location: 473 | project: 474 | dataset: 475 | table: 476 | credentials_path: 477 | batch_size: 478 | max_retries: 479 | interval_seconds: 480 | timeout_seconds: 481 | ``` 482 | 483 | # Pipe 484 | 485 | pipe output directly into some file descriptor 486 | 487 | ```yaml 488 | receivers: 489 | - name: "my_pipe" 490 | pipe: 491 | path: "/dev/stdout" 492 | deDot: true|false 493 | ``` 494 | 495 | # AWS EventBridge 496 | 497 | ```yaml 498 | receivers: 499 | - name: "eventbridge" 500 | eventbridge: 501 | detailType: "deployment" 502 | source: "cd" 503 | eventBusName: "default" 504 | region: "ap-southeast-1" 505 | details: 506 | message: "{{ .Message }}" 507 | namespace: "{{ .Namespace }}" 508 | reason: "{{ .Reason }}" 509 | object: "{{ .Namespace }}" 510 | 511 | ``` 512 | 513 | # Loki 514 | 515 | ```yaml 516 | receivers: 517 | - name: "loki" 518 | loki: 519 | headers: # optional 520 | X-Scope-OrgID: tennantID 521 | streamLabels: 522 | foo: bar 523 | url: http://127.0.0.1:3100/loki/api/v1/push 524 | ``` 525 | --------------------------------------------------------------------------------