├── util ├── testdata │ ├── jsonparse_eve_empty.json │ ├── jsonparse_eve_nulls.json │ ├── jsonparse_eve_broken1.json │ └── jsonparse_eve.json ├── hostnamer.go ├── submitter.go ├── add_fields_preprocess.go ├── add_fields_preprocess_test.go ├── hostnamer_rdns_test.go ├── hostnamer_rdns.go ├── submitter_dummy.go ├── performance_stats_encoder.go ├── alertifier_providers.go ├── performance_stats_encoder_test.go ├── submitter_test.go ├── consumer.go └── util_test.go ├── .gitignore ├── cmd └── fever │ ├── main.go │ └── cmds │ ├── version.go │ ├── makeman.go │ ├── testdata │ └── alertify_input.json │ ├── alertify_test.go │ ├── root.go │ └── bloom.go ├── scripts └── makelpush ├── mgmt ├── server.go ├── state.go ├── mgmt.proto ├── endpointconfig.go ├── server_test.go └── mgmtserver.go ├── input ├── input.go ├── input_socket_test.go ├── input_stdin.go ├── input_socket.go └── input_redis_test.go ├── fever.service ├── protomgmtc.sh ├── db ├── slurper.go ├── slurper_dummy.go ├── slurper_ejdb.go ├── slurper_postgres_test.go └── sql.go ├── types ├── flow_event_test.go ├── entry.go └── eve_test.go ├── .github └── workflows │ └── go.yml ├── protoc.sh ├── processing ├── void_handler.go ├── handler.go ├── flow_notifier.go ├── context_shipper_amqp.go ├── rdns_handler_test.go ├── context_collector_test.go ├── heartbeat_injector_test.go ├── forward_handler.go ├── context_shipper_amqp_test.go ├── rdns_handler.go ├── dns_aggregator_test.go ├── flow_extractor.go ├── flow_profiler.go ├── flow_profiler_test.go ├── heartbeat_injector.go ├── event_profiler.go ├── handler_dispatcher_test.go ├── handler_dispatcher.go ├── ip_handler.go ├── flow_extractor_test.go ├── flow_aggregator.go └── context_collector.go ├── doc ├── flow-agg.md └── database.md ├── LICENSE ├── thirdparty └── google │ └── protobuf │ ├── empty.proto │ └── timestamp.proto ├── go.mod ├── fever.yaml └── CHANGELOG.md /util/testdata/jsonparse_eve_empty.json: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.eve.json 2 | *.eve.json.gz 3 | .vs 4 | tmp/** -------------------------------------------------------------------------------- /cmd/fever/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2017, 2018, DCSO GmbH 5 | 6 | import cmd "github.com/DCSO/fever/cmd/fever/cmds" 7 | 8 | func main() { 9 | cmd.Execute() 10 | } 11 | -------------------------------------------------------------------------------- /scripts/makelpush: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env lua 2 | 3 | for line in io.lines() do 4 | io.stdout:write("LPUSH suricata \"") 5 | escapedstr = string.gsub(line, "\"", "\\\"") 6 | io.stdout:write(escapedstr) 7 | io.stdout:write("\"\r\n") 8 | end 9 | -------------------------------------------------------------------------------- /mgmt/server.go: -------------------------------------------------------------------------------- 1 | package mgmt 2 | 3 | // Server ... 4 | type Server interface { 5 | // ListenAndServe is expected to create a listener and to block until a 6 | // shutdown is invoked. 7 | ListenAndServe() error 8 | Stop() 9 | } 10 | -------------------------------------------------------------------------------- /util/hostnamer.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | // HostNamer is an interface specifying a component that provides 4 | // cached hostnames for IP addresses passed as strings. 5 | type HostNamer interface { 6 | GetHostname(ipAddr string) ([]string, error) 7 | Flush() 8 | } 9 | -------------------------------------------------------------------------------- /util/testdata/jsonparse_eve_nulls.json: -------------------------------------------------------------------------------- 1 | {"timestamp":"2017-03-06T06:54:10.839668+0000","flow_id":null,"in_iface":"enp2s0f1","event_type":"fileinfo","vlan":null,"src_ip":null,"src_port":null,"dest_ip":null,"dest_port":null,"http":{"hostname":"api.icndb.com","url":null,"state":"CLOSED","md5":null}} 2 | -------------------------------------------------------------------------------- /mgmt/state.go: -------------------------------------------------------------------------------- 1 | package mgmt 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2021, DCSO GmbH 5 | 6 | import "github.com/DCSO/fever/processing" 7 | 8 | // State contains references to components to be affected by RPC calls. 9 | type State struct { 10 | BloomHandler *processing.BloomHandler 11 | } 12 | -------------------------------------------------------------------------------- /input/input.go: -------------------------------------------------------------------------------- 1 | package input 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2017, DCSO GmbH 5 | 6 | // Input is an interface describing the behaviour for a component to 7 | // handle events parsed from EVE input. 8 | type Input interface { 9 | GetName() string 10 | Run() 11 | SetVerbose(bool) 12 | Stop(chan bool) 13 | } 14 | -------------------------------------------------------------------------------- /fever.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=fast, extensible, versatile event router 3 | Documentation=https://github.com/DCSO/fever 4 | After=network.target 5 | 6 | [Service] 7 | SyslogIdentifier=fever 8 | EnvironmentFile=-/etc/default/fever 9 | ExecStart=/usr/bin/fever run $FEVER_ARGS 10 | ExecStop=/usr/bin/pkill fever 11 | Restart=on-failure 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /util/submitter.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2017, DCSO GmbH 5 | 6 | // StatsSubmitter is an interface for an entity that sends JSON data to an endpoint 7 | type StatsSubmitter interface { 8 | Submit(rawData []byte, key string, contentType string) 9 | SubmitWithHeaders(rawData []byte, key string, contentType string, myHeaders map[string]string) 10 | UseCompression() 11 | Finish() 12 | } 13 | -------------------------------------------------------------------------------- /protomgmtc.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | # this path has to contain protobuf's well-known types 4 | WELL_KNOWN_TYPES="thirdparty" 5 | # this is the mgmt project's root path 6 | MGMT_PATH=mgmt 7 | 8 | find ${MGMT_PATH} -name "*.pb.go" -delete 9 | 10 | protoc \ 11 | --proto_path="${WELL_KNOWN_TYPES}" \ 12 | --proto_path="${MGMT_PATH}" \ 13 | --go_out=plugins=grpc:${GOPATH}/src \ 14 | ${MGMT_PATH}/mgmt.proto 15 | -------------------------------------------------------------------------------- /cmd/fever/cmds/version.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/spf13/cobra" 7 | ) 8 | 9 | const ( 10 | version = "1.4.0" 11 | ) 12 | 13 | // versionCmd represents the version command 14 | var versionCmd = &cobra.Command{ 15 | Use: "version", 16 | Short: "Show FEVER version", 17 | Run: func(cmd *cobra.Command, args []string) { 18 | fmt.Println(version) 19 | }, 20 | } 21 | 22 | func init() { 23 | rootCmd.AddCommand(versionCmd) 24 | } 25 | -------------------------------------------------------------------------------- /db/slurper.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2017, DCSO GmbH 5 | 6 | import ( 7 | "context" 8 | "github.com/DCSO/fever/types" 9 | ) 10 | 11 | // Slurper is an interface for a worker that can be started (Run()) with a given 12 | // channel delivering Entries, storing them in an associated data store. 13 | // Finish() can be used to finalize any state. 14 | // TODO implement proper start/stop (atm 'hard' stop by exit()ing) 15 | type Slurper interface { 16 | Run(context.Context, chan types.Entry) 17 | Finish() 18 | } 19 | -------------------------------------------------------------------------------- /db/slurper_dummy.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2017, DCSO GmbH 5 | 6 | import ( 7 | "context" 8 | 9 | "github.com/DCSO/fever/types" 10 | ) 11 | 12 | // DummySlurper is a slurper that just consumes entries with no action. 13 | type DummySlurper struct{} 14 | 15 | // Run starts a DummySlurper. 16 | func (s *DummySlurper) Run(_ctx context.Context, eventchan chan types.Entry) { 17 | go func() { 18 | for range eventchan { 19 | } 20 | }() 21 | } 22 | 23 | // Finish is a null operation in the DummySlurper implementation. 24 | func (s *DummySlurper) Finish() { 25 | } 26 | -------------------------------------------------------------------------------- /types/flow_event_test.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2017, 2018, DCSO GmbH 5 | 6 | import ( 7 | "bytes" 8 | "net" 9 | "testing" 10 | ) 11 | 12 | func TestIPParsing(t *testing.T) { 13 | ipv4 := "8.8.8.8" 14 | ipv6 := "2001:0db8:85a3:0000:0000:8a2e:0370:7334" 15 | parsedIPv4, err := parseIP(ipv4) 16 | if err != nil || !bytes.Equal(parsedIPv4, net.ParseIP(ipv4).To4()) { 17 | t.Fatal("Conversion failed!") 18 | } 19 | parsedIPv6, err := parseIP(ipv6) 20 | if err != nil || !bytes.Equal(parsedIPv6, net.ParseIP(ipv6)) { 21 | t.Fatal("Conversion failed!") 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /util/testdata/jsonparse_eve_broken1.json: -------------------------------------------------------------------------------- 1 | {"timestamp":"2017-03-06T06:54:06.047429+0000","flow_id":4711,"in_iface":"enp2s0f1","event_type":"dns","vlan":61,"src_ip":"10.0.0.10","src_port":53,"dest_ip":"10.0.0.11","dest_port":51323,"proto":"UDP","dns":{"type":"answer","id":1,"rcode":"NOERROR","rrname":"test.test.local","rrtype":"A","ttl":2365,"rdata":"10.0.0.12"}} 2 | {"timestamp":"2017-03-06T06:54:10 3 | {"timestamp":"2017-03-06T06:54:14.002504+0000","flow_id":2134,"in_iface":"enp2s0f1","event_type":"http","vlan":72,"src_ip":"10.0.0.10","src_port":24092,"dest_ip":"10.0.0.11","dest_port":80,"proto":"TCP","tx_id":0,"http":{"hostname":"foobar","url":"\/scripts\/wpnbr.dll","http_content_type":"text\/xml","http_method":"POST","protocol":"HTTP\/1.1","status":200,"length":347}} 4 | -------------------------------------------------------------------------------- /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Go build 3 | 4 | on: 5 | - push 6 | - pull_request 7 | 8 | jobs: 9 | build: 10 | name: "Go build" 11 | permissions: 12 | contents: read 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v2 16 | 17 | - name: Set up Go 18 | uses: actions/setup-go@v2 19 | with: 20 | go-version: 1.21 21 | 22 | - name: Install non-Go deps 23 | run: | 24 | sudo apt update 25 | sudo apt -yq install redis-server 26 | 27 | - name: Get and build deps 28 | run: go get -v -t ./... 29 | 30 | - name: Build executable 31 | run: go build -v -o fever cmd/fever/main.go 32 | 33 | - name: Run tests 34 | run: go test -v ./... 35 | -------------------------------------------------------------------------------- /cmd/fever/cmds/makeman.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | log "github.com/sirupsen/logrus" 5 | "github.com/spf13/cobra" 6 | "github.com/spf13/cobra/doc" 7 | ) 8 | 9 | // mmanCmd represents the makeman command 10 | var mmanCmd = &cobra.Command{ 11 | Use: "makeman [options]", 12 | Short: "Create man pages", 13 | Run: func(cmd *cobra.Command, args []string) { 14 | targetDir, err := cmd.Flags().GetString("dir") 15 | if err != nil { 16 | log.Fatal(err) 17 | } 18 | header := &doc.GenManHeader{} 19 | err = doc.GenManTree(rootCmd, header, targetDir) 20 | if err != nil { 21 | log.Fatal(err) 22 | } 23 | for _, v := range rootCmd.Commands() { 24 | err = doc.GenManTree(v, header, targetDir) 25 | if err != nil { 26 | log.Fatal(err) 27 | } 28 | } 29 | }, 30 | } 31 | 32 | func init() { 33 | rootCmd.AddCommand(mmanCmd) 34 | mmanCmd.Flags().StringP("dir", "d", ".", "target directory for man pages") 35 | } 36 | -------------------------------------------------------------------------------- /protoc.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | # this path has to contain protobuf's well-known types 4 | WELL_KNOWN_TYPES="thirdparty" 5 | # this is the stenosis project's root path 6 | STENOSIS_PATH=stenosis 7 | 8 | find ${STENOSIS_PATH} -name "*.pb.go" -delete 9 | 10 | protoc \ 11 | --proto_path="${WELL_KNOWN_TYPES}" \ 12 | --proto_path="${STENOSIS_PATH}" \ 13 | --proto_path="${GOPATH}/src" \ 14 | --go_out=:${GOPATH}/src \ 15 | ${STENOSIS_PATH}/api/hateoas.proto 16 | 17 | protoc \ 18 | --proto_path="${WELL_KNOWN_TYPES}" \ 19 | --proto_path="${STENOSIS_PATH}" \ 20 | --proto_path="${GOPATH}/src" \ 21 | --go_out=:${GOPATH}/src \ 22 | ${STENOSIS_PATH}/task/*.proto 23 | 24 | protoc \ 25 | --proto_path="${WELL_KNOWN_TYPES}" \ 26 | --proto_path="${STENOSIS_PATH}" \ 27 | --proto_path="${GOPATH}/src" \ 28 | --go_out=plugins=grpc:${GOPATH}/src \ 29 | ${STENOSIS_PATH}/api/stenosisservicequery.proto 30 | -------------------------------------------------------------------------------- /mgmt/mgmt.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package mgmt; 4 | 5 | import "google/protobuf/empty.proto"; 6 | 7 | option go_package = "github.com/DCSO/fever/mgmt"; 8 | 9 | message MgmtBloomInfoResponse { 10 | bool has_bloom = 1; 11 | uint64 capacity = 2; 12 | uint64 elements = 3; 13 | uint64 bits = 4; 14 | uint64 hashfuncs = 5; 15 | double fpprob = 6; 16 | } 17 | 18 | message MgmtAliveRequest { 19 | string alive = 1; 20 | } 21 | 22 | message MgmtAliveResponse { 23 | string echo = 1; 24 | } 25 | 26 | message MgmtBloomAddRequest { 27 | string ioc = 1; 28 | } 29 | 30 | message MgmtBloomAddResponse { 31 | uint64 added = 1; 32 | } 33 | 34 | service MgmtService { 35 | rpc Alive(MgmtAliveRequest) returns (MgmtAliveResponse); 36 | rpc BloomInfo(google.protobuf.Empty) returns (MgmtBloomInfoResponse); 37 | rpc BloomAdd(stream MgmtBloomAddRequest) returns (MgmtBloomAddResponse); 38 | rpc BloomSave(google.protobuf.Empty) returns (google.protobuf.Empty); 39 | rpc BloomReload(google.protobuf.Empty) returns (google.protobuf.Empty); 40 | } -------------------------------------------------------------------------------- /processing/void_handler.go: -------------------------------------------------------------------------------- 1 | package processing 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2019, DCSO GmbH 5 | 6 | import ( 7 | "github.com/DCSO/fever/types" 8 | 9 | log "github.com/sirupsen/logrus" 10 | ) 11 | 12 | // VoidHandler is a handler that does nothing. 13 | type VoidHandler struct { 14 | Logger *log.Entry 15 | } 16 | 17 | // MakeVoidHandler creates a new forwarding handler 18 | func MakeVoidHandler() *VoidHandler { 19 | fh := &VoidHandler{ 20 | Logger: log.WithFields(log.Fields{ 21 | "domain": "forward", 22 | }), 23 | } 24 | return fh 25 | } 26 | 27 | // Consume processes an Entry and discards it 28 | func (fh *VoidHandler) Consume(e *types.Entry) error { 29 | _ = e 30 | return nil 31 | } 32 | 33 | // GetName returns the name of the handler 34 | func (fh *VoidHandler) GetName() string { 35 | return "Void forwarding handler" 36 | } 37 | 38 | // GetEventTypes returns a slice of event type strings that this handler 39 | // should be applied to 40 | func (fh *VoidHandler) GetEventTypes() []string { 41 | return []string{"*"} 42 | } 43 | -------------------------------------------------------------------------------- /processing/handler.go: -------------------------------------------------------------------------------- 1 | package processing 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2017, DCSO GmbH 5 | 6 | import ( 7 | "github.com/DCSO/fever/types" 8 | "github.com/DCSO/fever/util" 9 | ) 10 | 11 | // Handler is an interface describing the behaviour for a component to 12 | // handle events parsed from EVE input. 13 | type Handler interface { 14 | GetEventTypes() []string 15 | GetName() string 16 | Consume(*types.Entry) error 17 | } 18 | 19 | // ConcurrentHandler is an interface describing the behaviour for a component to 20 | // handle events parsed from EVE input, while concurrently performing other 21 | // actions, such as collecting, integrating and/or forwarding data. 22 | type ConcurrentHandler interface { 23 | Handler 24 | Run() 25 | Stop(chan bool) 26 | } 27 | 28 | // StatsGeneratingHandler is an interface describing a Handler which also 29 | // periodically outputs performance statistics using the provided 30 | // PerformanceStatsEncoder. 31 | type StatsGeneratingHandler interface { 32 | Handler 33 | SubmitStats(*util.PerformanceStatsEncoder) 34 | } 35 | -------------------------------------------------------------------------------- /processing/flow_notifier.go: -------------------------------------------------------------------------------- 1 | package processing 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2020, DCSO GmbH 5 | 6 | import ( 7 | "github.com/DCSO/fever/types" 8 | ) 9 | 10 | // FlowNotifier is a handler that just passes flow events on a 11 | // given channel once encountered. 12 | type FlowNotifier struct { 13 | FlowNotifyChan chan types.Entry 14 | } 15 | 16 | // MakeFlowNotifier creates a new FlowNotifier. 17 | func MakeFlowNotifier(outChan chan types.Entry) *FlowNotifier { 18 | notifier := &FlowNotifier{ 19 | FlowNotifyChan: outChan, 20 | } 21 | return notifier 22 | } 23 | 24 | // Consume processes an Entry, emitting an Entry on the output 25 | // channel 26 | func (n *FlowNotifier) Consume(e *types.Entry) error { 27 | n.FlowNotifyChan <- *e 28 | return nil 29 | } 30 | 31 | // GetName returns the name of the handler 32 | func (n *FlowNotifier) GetName() string { 33 | return "Flow notifier" 34 | } 35 | 36 | // GetEventTypes returns a slice of event type strings that this handler 37 | // should be applied to -- flow in this case. 38 | func (n *FlowNotifier) GetEventTypes() []string { 39 | return []string{"flow"} 40 | } 41 | -------------------------------------------------------------------------------- /db/slurper_ejdb.go: -------------------------------------------------------------------------------- 1 | //go:build ignore 2 | // +build ignore 3 | 4 | package db 5 | 6 | import ( 7 | "github.com/mkilling/goejdb" 8 | log "github.com/sirupsen/logrus" 9 | ) 10 | 11 | // EJDBSlurper is a Slurper that stores events in an EJDB database. 12 | type EJDBSlurper struct { 13 | db *goejdb.Ejdb 14 | } 15 | 16 | // Run starts an EJDBSlurper. 17 | func (s *EJDBSlurper) Run(eventchan chan Entry) { 18 | var err error 19 | i := 0 20 | s.db, err = goejdb.Open("eventsdb", goejdb.JBOWRITER|goejdb.JBOCREAT) 21 | if err != nil { 22 | log.Warn(err) 23 | } 24 | coll, _ := s.db.CreateColl("events", nil) 25 | coll.SetIndex("timestamp", goejdb.JBIDXSTR) 26 | coll.SetIndex("event_type", goejdb.JBIDXSTR) 27 | coll.SetIndex("dns.rrname", goejdb.JBIDXSTR) 28 | coll.SetIndex("alert.payload_printable", goejdb.JBIDXSTR) 29 | go func() { 30 | coll.BeginTransaction() 31 | for d := range eventchan { 32 | if i%5000 == 0 { 33 | coll.CommitTransaction() 34 | coll.BeginTransaction() 35 | } 36 | coll.SaveJson(d.JSONLine) 37 | i++ 38 | } 39 | }() 40 | } 41 | 42 | // Finish closes the associated EJDB database.. 43 | func (s *EJDBSlurper) Finish() { 44 | s.db.Close() 45 | } 46 | -------------------------------------------------------------------------------- /types/entry.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2017, 2018, DCSO GmbH 5 | 6 | // DNSAnswer is a single DNS answer as observed by Suricata 7 | type DNSAnswer struct { 8 | DNSRRName string 9 | DNSRRType string 10 | DNSRCode string 11 | DNSRData string 12 | DNSType string 13 | } 14 | 15 | // Entry is a collection of data that needs to be parsed FAST from the entry 16 | type Entry struct { 17 | SrcIP string 18 | SrcHosts []string 19 | SrcPort int64 20 | DestIP string 21 | DestHosts []string 22 | DestPort int64 23 | Timestamp string 24 | EventType string 25 | Proto string 26 | HTTPHost string 27 | HTTPUrl string 28 | HTTPMethod string 29 | JSONLine string 30 | DNSVersion int64 31 | DNSRRName string 32 | DNSRRType string 33 | DNSRCode string 34 | DNSRData string 35 | DNSType string 36 | DNSAnswers []DNSAnswer 37 | TLSSNI string 38 | BytesToClient int64 39 | BytesToServer int64 40 | PktsToClient int64 41 | PktsToServer int64 42 | FlowID string 43 | Iface string 44 | AppProto string 45 | TLSFingerprint string 46 | } 47 | -------------------------------------------------------------------------------- /util/add_fields_preprocess.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "fmt" 5 | 6 | log "github.com/sirupsen/logrus" 7 | ) 8 | 9 | // PreprocessAddedFields preprocesses the added fields to be able to only use 10 | // fast string operations to add them to JSON text later. This code 11 | // progressively builds a JSON snippet by adding JSON key-value pairs for each 12 | // added field, e.g. `, "foo":"bar"`. 13 | func PreprocessAddedFields(fields map[string]string) (string, error) { 14 | j := "" 15 | for k, v := range fields { 16 | // Escape the fields to make sure we do not mess up the JSON when 17 | // encountering weird symbols in field names or values. 18 | kval, err := EscapeJSON(k) 19 | if err != nil { 20 | log.Warningf("cannot escape value: %s", v) 21 | return "", err 22 | } 23 | vval, err := EscapeJSON(v) 24 | if err != nil { 25 | log.Warningf("cannot escape value: %s", v) 26 | return "", err 27 | } 28 | j += fmt.Sprintf(",%s:%s", kval, vval) 29 | } 30 | // We finish the list of key-value pairs with a final brace: 31 | // `, "foo":"bar"}`. This string can now just replace the final brace in a 32 | // given JSON string. If there were no added fields, we just leave the 33 | // output at the final brace. 34 | j += "}" 35 | return j, nil 36 | } 37 | -------------------------------------------------------------------------------- /util/add_fields_preprocess_test.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import "testing" 4 | 5 | func TestPreprocessAddedFields(t *testing.T) { 6 | type args struct { 7 | fields map[string]string 8 | } 9 | tests := []struct { 10 | name string 11 | args args 12 | want []string 13 | wantErr bool 14 | }{ 15 | { 16 | name: "empty fieldset", 17 | args: args{ 18 | fields: map[string]string{}, 19 | }, 20 | want: []string{ 21 | "}", 22 | }, 23 | }, 24 | { 25 | name: "fieldset present", 26 | args: args{ 27 | fields: map[string]string{ 28 | "foo": "bar", 29 | "baz": "quux", 30 | }, 31 | }, 32 | want: []string{ 33 | `,"foo":"bar","baz":"quux"}`, 34 | `,"baz":"quux","foo":"bar"}`, 35 | }, 36 | }, 37 | } 38 | for _, tt := range tests { 39 | t.Run(tt.name, func(t *testing.T) { 40 | got, err := PreprocessAddedFields(tt.args.fields) 41 | if (err != nil) != tt.wantErr { 42 | t.Errorf("PreprocessAddedFields() error = %v, wantErr %v", err, tt.wantErr) 43 | return 44 | } 45 | found := false 46 | for _, w := range tt.want { 47 | if got == w { 48 | found = true 49 | break 50 | } 51 | } 52 | if !found { 53 | t.Errorf("PreprocessAddedFields() = %v, want %v", got, tt.want) 54 | } 55 | }) 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /util/hostnamer_rdns_test.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | log "github.com/sirupsen/logrus" 8 | ) 9 | 10 | func _TestHostNamerQuad8(t *testing.T, ip string) { 11 | hn := NewHostNamerRDNS(5*time.Second, 5*time.Second) 12 | v, err := hn.GetHostname(ip) 13 | if err != nil { 14 | log.Info(err) 15 | t.Skip() 16 | } 17 | if len(v) == 0 { 18 | t.Fatal("no response") 19 | } else { 20 | log.Infof("got response %v", v) 21 | } 22 | v, err = hn.GetHostname(ip) 23 | if err != nil { 24 | t.Fatal(err) 25 | } 26 | if len(v) == 0 { 27 | t.Fatal("no response") 28 | } else { 29 | log.Infof("got response %v", v) 30 | } 31 | time.Sleep(6 * time.Second) 32 | v, err = hn.GetHostname(ip) 33 | if err != nil { 34 | t.Fatal(err) 35 | } 36 | if len(v) == 0 { 37 | t.Fatal("no response") 38 | } else { 39 | log.Infof("got response %v", v) 40 | } 41 | } 42 | 43 | func TestHostNamerQuad8v4(t *testing.T) { 44 | _TestHostNamerQuad8(t, "8.8.8.8") 45 | } 46 | 47 | func TestHostNamerQuad8v6(t *testing.T) { 48 | _TestHostNamerQuad8(t, "2001:4860:4860::8888") 49 | } 50 | 51 | func TestHostNamerInvalid(t *testing.T) { 52 | hn := NewHostNamerRDNS(5*time.Second, 5*time.Second) 53 | _, err := hn.GetHostname("8.") 54 | if err == nil { 55 | t.Fatal("missed error") 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /util/testdata/jsonparse_eve.json: -------------------------------------------------------------------------------- 1 | {"timestamp":"2017-03-06T06:54:06.047429+0000","flow_id":4711,"in_iface":"enp2s0f1","event_type":"dns","vlan":61,"src_ip":"10.0.0.10","src_port":53,"dest_ip":"10.0.0.11","dest_port":51323,"proto":"UDP","dns":{"type":"answer","id":1,"rcode":"NOERROR","rrname":"test.test.local","rrtype":"A","ttl":2365,"rdata":"10.0.0.12"}} 2 | {"timestamp":"2017-03-06T06:54:10.839668+0000","flow_id":2323,"in_iface":"enp2s0f1","event_type":"fileinfo","vlan":91,"src_ip":"10.0.0.10","src_port":80,"dest_ip":"10.0.0.11","dest_port":52914,"proto":"TCP","http":{"hostname":"api.icndb.com","url":"\/jokes\/random?firstName=Chuck&lastName=Norris&limitTo=[nerdy]","http_user_agent":"Ruby","http_content_type":"application\/json","http_method":"GET","protocol":"HTTP\/1.1","status":200,"length":178},"app_proto":"http","fileinfo":{"filename":"\/jokes\/random","magic":"ASCII text, with no line terminators","state":"CLOSED","md5":"8d81d793b28b098e8623d47bae23cf44","stored":false,"size":176,"tx_id":0}} 3 | {"timestamp":"2017-03-06T06:54:14.002504+0000","flow_id":2134,"in_iface":"enp2s0f1","event_type":"http","vlan":72,"src_ip":"10.0.0.10","src_port":24092,"dest_ip":"10.0.0.11","dest_port":80,"proto":"TCP","tx_id":0,"http":{"hostname":"foobar","url":"\/scripts\/wpnbr.dll","http_content_type":"text\/xml","http_method":"POST","protocol":"HTTP\/1.1","status":200,"length":347}} 4 | -------------------------------------------------------------------------------- /util/hostnamer_rdns.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "net" 5 | "strings" 6 | "sync" 7 | "time" 8 | 9 | "github.com/patrickmn/go-cache" 10 | ) 11 | 12 | // HostNamerRDNS is a component that provides cached hostnames for IP 13 | // addresses passed as strings, determined via reverse DNS lookups. 14 | type HostNamerRDNS struct { 15 | cache *cache.Cache 16 | lock sync.Mutex 17 | } 18 | 19 | // NewHostNamerRDNS returns a new HostNamer with the given default expiration time. 20 | // Data entries will be purged after each cleanupInterval. 21 | func NewHostNamerRDNS(defaultExpiration, cleanupInterval time.Duration) *HostNamerRDNS { 22 | return &HostNamerRDNS{ 23 | cache: cache.New(defaultExpiration, cleanupInterval), 24 | } 25 | } 26 | 27 | // GetHostname returns a list of host names for a given IP address. 28 | func (n *HostNamerRDNS) GetHostname(ipAddr string) ([]string, error) { 29 | n.lock.Lock() 30 | defer n.lock.Unlock() 31 | 32 | val, found := n.cache.Get(ipAddr) 33 | if found { 34 | return val.([]string), nil 35 | } 36 | hns, err := net.LookupAddr(ipAddr) 37 | if err != nil { 38 | return nil, err 39 | } 40 | for i, hn := range hns { 41 | hns[i] = strings.TrimRight(hn, ".") 42 | } 43 | n.cache.Set(ipAddr, hns, cache.DefaultExpiration) 44 | val = hns 45 | return val.([]string), nil 46 | } 47 | 48 | // Flush clears the cache of a HostNamerRDNS. 49 | func (n *HostNamerRDNS) Flush() { 50 | n.cache.Flush() 51 | } 52 | -------------------------------------------------------------------------------- /doc/flow-agg.md: -------------------------------------------------------------------------------- 1 | ## Aggregated flow metadata JSON example 2 | 3 | ```json 4 | { 5 | "sensor-id": "foobar", 6 | "time-start": "2017-03-13T17:36:53.205850748+01:00", 7 | "time-end": "2017-03-13T17:36:58.205967348+01:00", 8 | "tuples": { 9 | "172.22.0.214_172.18.8.116_993": { 10 | "count": 1, 11 | "total_bytes_toclient": 86895, 12 | "total_bytes_toserver": 17880 13 | }, 14 | "172.22.0.214_172.18.8.145_2222": { 15 | "count": 2, 16 | "total_bytes_toclient": 36326, 17 | "total_bytes_toserver": 4332 18 | }, 19 | "172.22.0.214_198.232.125.113_80": { 20 | "count": 3, 21 | "total_bytes_toclient": 23242, 22 | "total_bytes_toserver": 1223 23 | }, 24 | "172.22.0.214_198.232.125.123_80": { 25 | "count": 1, 26 | "total_bytes_toclient": 1026322, 27 | "total_bytes_toserver": 51232 28 | } 29 | }, 30 | "proxy-map": { 31 | "23.37.43.27": { 32 | "ss.symcd.com": 1 33 | } 34 | } 35 | } 36 | ``` 37 | The `tuples` keys represent routes in which sourceIP/destIP/destPort (concatenated using `_`) map to the number of flow events observed in the reported time period. In the `proxy-map` dict, the keys are destination IP addresses which have had observed HTTP requests on ports 8000-8999, 80 or 3128 (i.e. typical proxy ports). The associated values are the number of times that these requests were made with certain HTTP Host headers. 38 | 39 | Using the `-n` parameter, the reporting frequency can be tuned. Longer intervals (e.g. hours) will reduce load on the consuming endpoint, but may also lead to larger payloads in the JSON outlined above. -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2017, 2018, 2019, DCSO Deutsche Cyber-Sicherheitsorganisation GmbH 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | * Neither the name of the DCSO Deutsche Cyber-Sicherheitsorganisation GmbH 15 | nor the names of its contributors may be used to endorse or promote products 16 | derived from this software without specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /input/input_socket_test.go: -------------------------------------------------------------------------------- 1 | package input 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2017, DCSO GmbH 5 | 6 | import ( 7 | "encoding/json" 8 | "fmt" 9 | "math/rand" 10 | "net" 11 | "os" 12 | "path/filepath" 13 | "testing" 14 | 15 | "github.com/DCSO/fever/types" 16 | 17 | log "github.com/sirupsen/logrus" 18 | ) 19 | 20 | func TestSocketInput(t *testing.T) { 21 | dir, err := os.MkdirTemp("", "test") 22 | if err != nil { 23 | t.Fatal(err) 24 | } 25 | defer os.RemoveAll(dir) 26 | tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) 27 | 28 | evChan := make(chan types.Entry) 29 | events := make([]string, 1000) 30 | 31 | is, err := MakeSocketInput(tmpfn, evChan, false) 32 | if err != nil { 33 | t.Fatal(err) 34 | } 35 | is.Run() 36 | 37 | submitDone := make(chan bool) 38 | collectDone := make(chan bool) 39 | 40 | go func() { 41 | c, err := net.Dial("unix", tmpfn) 42 | if err != nil { 43 | log.Println(err) 44 | } 45 | for i := 0; i < 1000; i++ { 46 | events[i] = makeEveEvent([]string{"http", "dns", "foo"}[rand.Intn(3)], i) 47 | c.Write([]byte(events[i])) 48 | c.Write([]byte("\n")) 49 | } 50 | c.Close() 51 | close(submitDone) 52 | }() 53 | 54 | coll := make([]types.Entry, 0) 55 | go func() { 56 | for i := 0; i < 1000; i++ { 57 | e := <-evChan 58 | coll = append(coll, e) 59 | } 60 | close(collectDone) 61 | }() 62 | 63 | <-submitDone 64 | <-collectDone 65 | ch := make(chan bool) 66 | is.Stop(ch) 67 | <-ch 68 | 69 | if len(coll) != 1000 { 70 | t.Fatalf("unexpected number of items read from socket: %d != 1000", 71 | len(coll)) 72 | } 73 | for i := 0; i < 1000; i++ { 74 | var checkEvent types.EveEvent 75 | json.Unmarshal([]byte(events[i]), &checkEvent) 76 | if coll[i].EventType != checkEvent.EventType { 77 | t.Fatalf("wrong event type for test event %d: %s != %s", i, 78 | coll[i].EventType, checkEvent.EventType) 79 | } 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /types/eve_test.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2019, DCSO GmbH 5 | 6 | import ( 7 | "encoding/json" 8 | "strings" 9 | "testing" 10 | "time" 11 | ) 12 | 13 | func TestEVERoundtripTimestamp(t *testing.T) { 14 | timeCmp, _ := time.Parse(time.RFC3339, "2019-08-06 13:30:01.690233 +0200 CEST") 15 | ee := EveEvent{ 16 | Timestamp: &SuriTime{ 17 | Time: timeCmp, 18 | }, 19 | EventType: "http", 20 | SrcIP: "1.2.3.4", 21 | SrcPort: 2222, 22 | DestIP: "3.4.5.6", 23 | DestPort: 80, 24 | Proto: "tcp", 25 | FlowID: 642, 26 | HTTP: &HTTPEvent{ 27 | Hostname: "test", 28 | URL: "/", 29 | }, 30 | } 31 | 32 | out, err := json.Marshal(ee) 33 | if err != nil { 34 | t.Error(err) 35 | } 36 | 37 | var inEVE EveEvent 38 | err = json.Unmarshal(out, &inEVE) 39 | if err != nil { 40 | t.Error(err) 41 | } 42 | 43 | if !inEVE.Timestamp.Time.Equal(ee.Timestamp.Time) { 44 | t.Fatalf("timestamp round-trip failed: %v <-> %v", inEVE.Timestamp, ee.Timestamp) 45 | } 46 | } 47 | 48 | func TestEVEStringFlowIDRoundtrip(t *testing.T) { 49 | timeCmp, _ := time.Parse(time.RFC3339, "2019-08-06 13:30:01.690233 +0200 CEST") 50 | ee := EveOutEvent{ 51 | Timestamp: &SuriTime{ 52 | Time: timeCmp, 53 | }, 54 | EventType: "http", 55 | SrcIP: "1.2.3.4", 56 | SrcPort: 2222, 57 | DestIP: "3.4.5.6", 58 | DestPort: 80, 59 | Proto: "tcp", 60 | FlowID: 649, 61 | HTTP: &HTTPEvent{ 62 | Hostname: "test", 63 | URL: "/", 64 | }, 65 | } 66 | 67 | out, err := json.Marshal(ee) 68 | if err != nil { 69 | t.Error(err) 70 | } 71 | 72 | var inEVE EveOutEvent 73 | err = json.Unmarshal(out, &inEVE) 74 | if err != nil { 75 | t.Error(err) 76 | } 77 | 78 | if !strings.Contains(string(out), `"flow_id":"649"`) { 79 | t.Fatalf("flow ID missing") 80 | } 81 | 82 | if inEVE.FlowID != ee.FlowID { 83 | t.Fatalf("round-trip failed: %v <-> %v", inEVE.FlowID, ee.FlowID) 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /util/submitter_dummy.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2018, DCSO GmbH 5 | 6 | import ( 7 | "unicode" 8 | 9 | log "github.com/sirupsen/logrus" 10 | ) 11 | 12 | // DummySubmitter is a StatsSubmitter that just logs submissions without 13 | // sending them over the network. 14 | type DummySubmitter struct { 15 | Logger *log.Entry 16 | SensorID string 17 | } 18 | 19 | func isASCIIPrintable(s string) bool { 20 | for _, r := range s { 21 | if r > unicode.MaxASCII || !unicode.IsPrint(r) { 22 | return false 23 | } 24 | } 25 | return true 26 | } 27 | 28 | // MakeDummySubmitter creates a new submitter just logging to the default log 29 | // target. 30 | func MakeDummySubmitter() (*DummySubmitter, error) { 31 | mySubmitter := &DummySubmitter{ 32 | Logger: log.WithFields(log.Fields{ 33 | "domain": "submitter", 34 | "submitter": "dummy", 35 | }), 36 | } 37 | sensorID, err := GetSensorID() 38 | if err != nil { 39 | return nil, err 40 | } 41 | mySubmitter.SensorID = sensorID 42 | return mySubmitter, nil 43 | } 44 | 45 | // UseCompression enables gzip compression of submitted payloads (not 46 | // applicable in this implementation). 47 | func (s *DummySubmitter) UseCompression() { 48 | // pass 49 | } 50 | 51 | // Submit logs the rawData payload. 52 | func (s *DummySubmitter) Submit(rawData []byte, key string, contentType string) { 53 | s.SubmitWithHeaders(rawData, key, contentType, nil) 54 | } 55 | 56 | // SubmitWithHeaders logs rawData payload, adding some extra key-value pairs to 57 | // the header. 58 | func (s *DummySubmitter) SubmitWithHeaders(rawData []byte, key string, contentType string, myHeaders map[string]string) { 59 | bytestring := string(rawData) 60 | if isASCIIPrintable(bytestring) { 61 | s.Logger.Info(bytestring) 62 | } else { 63 | s.Logger.Infof("%s (%s) - submitting non-printable byte array of length %d", key, contentType, len(rawData)) 64 | } 65 | } 66 | 67 | // Finish is a no-op in this implementation. 68 | func (s *DummySubmitter) Finish() { 69 | // pass 70 | } 71 | -------------------------------------------------------------------------------- /input/input_stdin.go: -------------------------------------------------------------------------------- 1 | package input 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2020, 2023, DCSO GmbH 5 | 6 | import ( 7 | "bufio" 8 | "net" 9 | "os" 10 | 11 | "github.com/DCSO/fever/types" 12 | "github.com/DCSO/fever/util" 13 | 14 | log "github.com/sirupsen/logrus" 15 | ) 16 | 17 | // StdinInput is an Input reading JSON EVE input from standard input. 18 | type StdinInput struct { 19 | EventChan chan types.Entry 20 | Verbose bool 21 | Running bool 22 | InputListener net.Listener 23 | StopChan chan bool 24 | StoppedChan chan bool 25 | } 26 | 27 | // GetName returns a printable name for the input 28 | func (si *StdinInput) GetName() string { 29 | return "Stdin input" 30 | } 31 | 32 | func (si *StdinInput) handleStdinStream() { 33 | scanner := bufio.NewScanner(os.Stdin) 34 | for scanner.Scan() { 35 | json := scanner.Bytes() 36 | e, err := util.ParseJSON(json) 37 | if err != nil { 38 | log.Error(err, string(json[:])) 39 | continue 40 | } 41 | si.EventChan <- e 42 | } 43 | close(si.EventChan) 44 | } 45 | 46 | // MakeStdinInput returns a new StdinInput reading from stdin and writing 47 | // parsed events to outChan. 48 | func MakeStdinInput(outChan chan types.Entry) *StdinInput { 49 | si := &StdinInput{ 50 | EventChan: outChan, 51 | Verbose: false, 52 | StopChan: make(chan bool), 53 | } 54 | return si 55 | } 56 | 57 | // Run starts the StdinInput 58 | func (si *StdinInput) Run() { 59 | if !si.Running { 60 | si.Running = true 61 | si.StopChan = make(chan bool) 62 | go si.handleStdinStream() 63 | } 64 | } 65 | 66 | // Stop causes the StdinInput to stop reading from stdin and close all 67 | // associated channels, including the passed notification channel. 68 | func (si *StdinInput) Stop(stoppedChan chan bool) { 69 | if si.Running { 70 | si.StoppedChan = stoppedChan 71 | si.Running = false 72 | close(stoppedChan) 73 | } 74 | } 75 | 76 | // SetVerbose sets the input's verbosity level 77 | func (si *StdinInput) SetVerbose(verbose bool) { 78 | si.Verbose = verbose 79 | } 80 | -------------------------------------------------------------------------------- /mgmt/endpointconfig.go: -------------------------------------------------------------------------------- 1 | package mgmt 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2021, DCSO GmbH 5 | 6 | import ( 7 | "crypto/tls" 8 | fmt "fmt" 9 | 10 | "github.com/spf13/viper" 11 | "google.golang.org/grpc" 12 | ) 13 | 14 | // EndpointConfig ... 15 | type EndpointConfig struct { 16 | ListenerAddress string 17 | ServerAddress string 18 | Network string 19 | Params map[string]interface{} 20 | TLSConfig *tls.Config 21 | TLSDisable bool 22 | Disable bool 23 | } 24 | 25 | // GRPCEndpointConfig ... 26 | type GRPCEndpointConfig struct { 27 | EndpointConfig 28 | ServerOptions []grpc.ServerOption 29 | DialOptions []grpc.DialOption 30 | } 31 | 32 | // EndpointConfigFromViper creates a new GRPCEndpointConfig from the relevant 33 | // Viper configs 34 | func EndpointConfigFromViper() GRPCEndpointConfig { 35 | var mgmtCfg GRPCEndpointConfig 36 | host := viper.GetString("mgmt.host") 37 | network := viper.GetString("mgmt.network") 38 | socket := viper.GetString("mgmt.socket") 39 | 40 | if host != "" { 41 | mgmtCfg = GRPCEndpointConfig{ 42 | EndpointConfig: EndpointConfig{ 43 | Network: network, 44 | ListenerAddress: host, 45 | ServerAddress: host, 46 | TLSDisable: true, // XXX we may choose to support TLS eventually 47 | }, 48 | DialOptions: []grpc.DialOption{grpc.WithInsecure()}, 49 | } 50 | } else { 51 | mgmtCfg = GRPCEndpointConfig{ 52 | EndpointConfig: EndpointConfig{ 53 | Network: "unix", 54 | ListenerAddress: socket, 55 | }, 56 | DialOptions: []grpc.DialOption{grpc.WithInsecure()}, 57 | } 58 | } 59 | return mgmtCfg 60 | } 61 | 62 | // DialString returns a string from the given config that is suitable to be 63 | // passed into a grpc.Dial() function. 64 | func (e GRPCEndpointConfig) DialString() string { 65 | if e.EndpointConfig.Network == "unix" { 66 | return fmt.Sprintf("%s:%s", e.EndpointConfig.Network, e.EndpointConfig.ListenerAddress) 67 | } 68 | return fmt.Sprintf("dns:///%s", e.EndpointConfig.ListenerAddress) 69 | } 70 | -------------------------------------------------------------------------------- /processing/context_shipper_amqp.go: -------------------------------------------------------------------------------- 1 | package processing 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2019, DCSO GmbH 5 | 6 | import ( 7 | "encoding/json" 8 | "time" 9 | 10 | "github.com/DCSO/fever/util" 11 | 12 | log "github.com/sirupsen/logrus" 13 | ) 14 | 15 | const ( 16 | // ContextQueueLength is the length of the queue buffering incoming context 17 | // bundles to balance out potential transmission delays. 18 | ContextQueueLength = 100 19 | ) 20 | 21 | // ContextChunk represents a collection of events for transmission via AMQP. 22 | type ContextChunk struct { 23 | Timestamp time.Time `json:"timestamp"` 24 | SensorID string `json:"sensor_id"` 25 | Events []interface{} `json:"events"` 26 | } 27 | 28 | // ContextShipperAMQP is a ContextShipper that sends incoming context bundles to 29 | // an AMQP exchange. 30 | type ContextShipperAMQP struct { 31 | Submitter util.StatsSubmitter 32 | InChan chan Context 33 | SensorID string 34 | } 35 | 36 | // Start initiates the concurrent handling of incoming context bundles in the 37 | // Shipper's input channel. It will stop automatically once this channel is 38 | // closed. 39 | func (cs *ContextShipperAMQP) Start(s util.StatsSubmitter) (chan<- Context, error) { 40 | var err error 41 | cs.Submitter = s 42 | cs.InChan = make(chan Context, ContextQueueLength) 43 | cs.SensorID, err = util.GetSensorID() 44 | if err != nil { 45 | return nil, err 46 | } 47 | 48 | go func() { 49 | for ctx := range cs.InChan { 50 | out := make([]interface{}, 0) 51 | for _, ctxItem := range ctx { 52 | var myItem interface{} 53 | err := json.Unmarshal([]byte(ctxItem), &myItem) 54 | if err != nil { 55 | log.Warnf("could not marshal event JSON: %s", string(ctxItem)) 56 | continue 57 | } 58 | out = append(out, myItem) 59 | } 60 | chunk := ContextChunk{ 61 | Timestamp: time.Now(), 62 | SensorID: cs.SensorID, 63 | Events: out, 64 | } 65 | json, err := json.Marshal(chunk) 66 | if err != nil { 67 | log.Warn(err) 68 | continue 69 | } 70 | s.Submit(json, "context", "application/json") 71 | } 72 | }() 73 | 74 | return cs.InChan, nil 75 | } 76 | -------------------------------------------------------------------------------- /cmd/fever/cmds/testdata/alertify_input.json: -------------------------------------------------------------------------------- 1 | {"timestamp":"2016-01-02T19:08:01.310448+0000","flow_id":386974943918954,"pcap_cnt":7,"event_type":"http","src_ip":"1.1.1.1","src_port":10305,"dest_ip":"8.8.8.8","dest_port":80,"proto":"TCP","tx_id":0,"http":{"hostname":"evader.example.com","url":"/compressed/eicar.txt/ce%3Agzip,gzip;gzip;gzip","http_content_type":"application/octet-stream","http_method":"GET","protocol":"HTTP/1.1","status":200,"length":106}} 2 | {"timestamp":"2013-07-04T19:47:51.592983+0000","flow_id":694559854542603,"pcap_cnt":4,"event_type":"dns","src_ip":"192.168.42.150","src_port":55597,"dest_ip":"192.168.42.129","dest_port":53,"proto":"UDP","dns":{"version":2,"type":"answer","id":25783,"flags":"8180","qr":true,"rd":true,"ra":true,"rrname":"static.programme-tv.net","rrtype":"AAAA","rcode":"NOERROR","answers":[{"rrname":"static.programme-tv.net","rrtype":"CNAME","ttl":630,"rdata":"programme-tv.net.edgesuite.net"},{"rrname":"programme-tv.net.edgesuite.net","rrtype":"CNAME","ttl":20432,"rdata":"a1859.g.akamai.net"}],"grouped":{"CNAME":["programme-tv.net.edgesuite.net","a1859.g.akamai.net"]},"authorities":[{"rrname":"g.akamai.net","rrtype":"SOA","ttl":1000}]}} 3 | {"timestamp":"2019-05-15T08:11:18.955582+0000","flow_id":1416337568450228,"pcap_cnt":11,"event_type":"tls","src_ip":"2a03:b0c0:0002:00d0:0000:0000:0bd3:4001","src_port":48106,"dest_ip":"2606:2800:0220:0001:0248:1893:25c8:1946","dest_port":443,"proto":"TCP","tls":{"subject":"C=US, ST=California, L=Los Angeles, O=Internet Corporation for Assigned Names and Numbers, OU=Technology, CN=www.example.org","issuerdn":"C=US, O=DigiCert Inc, CN=DigiCert SHA2 Secure Server CA","serial":"0F:D0:78:DD:48:F1:A2:BD:4D:0F:2B:A9:6B:60:38:FE","fingerprint":"7b:b6:98:38:69:70:36:3d:29:19:cc:57:72:84:69:84:ff:d4:a8:89","sni":"example.com","version":"TLS 1.2","notbefore":"2018-11-28T00:00:00","notafter":"2020-12-02T12:00:00","ja3":{"hash":"1fe4c7a3544eb27afec2adfb3a3dbf60","string":"771,49196-49200-159-52393-52392-52394-49195-49199-158-49188-49192-107-49187-49191-103-49162-49172-57-49161-49171-51-157-156-61-60-53-47-255,0-11-10-13172-16-22-23-13,29-23-25-24,0-1-2"},"ja3s":{"hash":"5d79edf64e03689ff559a54e9d9487bc","string":"771,49199,65281-0-11-16-23"}}} -------------------------------------------------------------------------------- /doc/database.md: -------------------------------------------------------------------------------- 1 | ## Database schema 2 | 3 | Events are stored in a JSONB column tagged with a timestamp. Indexes will be created on this timestamp, the source/destination IP/port values (composite), and the event type. Another full-text (trigram) index will be built for event type-specific plain-text fields that are concatenated using a `|`. The keyword-based full-text matches are intended to serve as the main means of access to 'interesting' events, and can be further refined by IP/port/type/... constraints, which are also indexed. All further queries on JSON fields **will be unindexed**, so care should be taken to reduce the search space as much as possible using indexed queries. 4 | 5 | A separate database must be used and the connecting user must be able to `CREATE` and `DROP` tables in the public schema. 6 | 7 | ```sql 8 | -- Initial table 9 | CREATE UNLOGGED TABLE IF NOT EXISTS "events-YY-MM-DD-HHMM" 10 | (ts timestamp without time zone default now(), 11 | payload jsonb); 12 | GRANT ALL PRIVILEGES ON TABLE "events-YY-MM-DD-HHMM" to sensor; 13 | 14 | -- Deferred 15 | CREATE INDEX ON "events-YY-MM-DD-HHMM" (ts); 16 | CREATE INDEX ON "events-YY-MM-DD-HHMM" (((payload->>'src_ip')::INET), ((payload->>'src_port')::INT)); 17 | CREATE INDEX ON "events-YY-MM-DD-HHMM" (((payload->>'dest_ip')::INET), ((payload->>'dest_port')::INT)); 18 | CREATE INDEX ON "events-YY-MM-DD-HHMM" ((payload->>'event_type')); 19 | CREATE INDEX ON "events-YY-MM-DD-HHMM" using GIN (trigram_string(payload) gin_trgm_ops) 20 | ``` 21 | `trigram_string(payload jsonb)` is a PL/PgSQL function that extracts and concatenates relevant data for indexing, see `sql.go`. 22 | 23 | The following contents are used to build the full-text index: 24 | 25 | - `dns` events: 26 | - `dns->rdata` 27 | - `http` events: 28 | - `http->hostname` + `http->url` + `http->http_user_agent` 29 | - `tls` events: 30 | - `tls->subject` + `tls->issuerdn` + `tls->fingerprint` 31 | - `alert` events: 32 | - `alert->payload_printable` + `alert->payload` 33 | - `smtp` events: 34 | - `smtp->helo` + `smtp->mail_from` + `smtp->rcpt_to` + `email->from`+ `email->to` + `email->attachment` 35 | - `fileinfo` events: 36 | - `fileinfo->filename` + `fileinfo->md5` -------------------------------------------------------------------------------- /cmd/fever/cmds/alertify_test.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "bytes" 5 | "io/ioutil" 6 | "strings" 7 | "testing" 8 | 9 | "github.com/DCSO/fever/types" 10 | "github.com/DCSO/fever/util" 11 | ) 12 | 13 | func checkAlertified(t *testing.T, es []types.Entry, ioc string, 14 | result string) bool { 15 | a := makeAlertifyAlertifier("TEST", "test-ioc") 16 | var buf bytes.Buffer 17 | for _, e := range es { 18 | err := emitAlertsForEvent(a, e, ioc, &buf, 0) 19 | if err != nil { 20 | t.Fatal(err) 21 | } 22 | } 23 | return strings.Contains(buf.String(), result) 24 | } 25 | 26 | func checkLimit(t *testing.T, es []types.Entry, ioc string) { 27 | a := makeAlertifyAlertifier("TEST", "test-ioc") 28 | var buf bytes.Buffer 29 | i := 0 30 | for _, e := range es { 31 | err := emitAlertsForEvent(a, e, ioc, &buf, 1) 32 | if i == 1 { 33 | if err == nil { 34 | t.Fatal(err) 35 | } 36 | if !strings.Contains(err.Error(), `limit reached (1)`) { 37 | t.Fatal("wrong limit error message: ", err.Error()) 38 | } 39 | } 40 | i++ 41 | } 42 | } 43 | 44 | func TestAlertify(t *testing.T) { 45 | ins, err := ioutil.ReadFile("testdata/alertify_input.json") 46 | if err != nil { 47 | t.Fatal(err) 48 | } 49 | 50 | inputs := make([]types.Entry, 0) 51 | for _, line := range strings.Split(string(ins), "\n") { 52 | e, err := util.ParseJSON([]byte(line)) 53 | if err != nil { 54 | t.Fatal(err) 55 | } 56 | inputs = append(inputs, e) 57 | } 58 | 59 | if !checkAlertified(t, inputs, "evader.example.com", 60 | `TEST Possibly bad HTTP host: evader.example.com`) { 61 | t.Fatal("evader.example.com not processed") 62 | } 63 | 64 | if !checkAlertified(t, inputs, "static.programme-tv.net", 65 | `TEST Possibly bad DNS response for static.programme-tv.net`) { 66 | t.Fatal("static.programme-tv.net not processed") 67 | } 68 | 69 | if !checkAlertified(t, inputs, "example.com", 70 | `TEST Possibly bad TLS SNI: example.com`) { 71 | t.Fatal("example.com not processed") 72 | } 73 | 74 | if !checkAlertified(t, inputs, "/compressed/eicar.txt/ce%3Agzip,gzip;gzip;gzip", 75 | `TEST Possibly bad HTTP URL: GET | evader.example.com | /compressed/eicar.txt/ce%3Agzip,gzip;gzip;gzip`) { 76 | t.Fatal("example.com URL not processed") 77 | } 78 | 79 | checkLimit(t, inputs, "example.com") 80 | } 81 | -------------------------------------------------------------------------------- /thirdparty/google/protobuf/empty.proto: -------------------------------------------------------------------------------- 1 | // Protocol Buffers - Google's data interchange format 2 | // Copyright 2008 Google Inc. All rights reserved. 3 | // https://developers.google.com/protocol-buffers/ 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions are 7 | // met: 8 | // 9 | // * Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // * Redistributions in binary form must reproduce the above 12 | // copyright notice, this list of conditions and the following disclaimer 13 | // in the documentation and/or other materials provided with the 14 | // distribution. 15 | // * Neither the name of Google Inc. nor the names of its 16 | // contributors may be used to endorse or promote products derived from 17 | // this software without specific prior written permission. 18 | // 19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | syntax = "proto3"; 32 | 33 | package google.protobuf; 34 | 35 | option csharp_namespace = "Google.Protobuf.WellKnownTypes"; 36 | option go_package = "google.golang.org/protobuf/types/known/emptypb"; 37 | option java_package = "com.google.protobuf"; 38 | option java_outer_classname = "EmptyProto"; 39 | option java_multiple_files = true; 40 | option objc_class_prefix = "GPB"; 41 | option cc_enable_arenas = true; 42 | 43 | // A generic empty message that you can re-use to avoid defining duplicated 44 | // empty messages in your APIs. A typical example is to use it as the request 45 | // or the response type of an API method. For instance: 46 | // 47 | // service Foo { 48 | // rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); 49 | // } 50 | // 51 | // The JSON representation for `Empty` is empty JSON object `{}`. 52 | message Empty {} 53 | -------------------------------------------------------------------------------- /util/performance_stats_encoder.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2017, DCSO GmbH 5 | 6 | import ( 7 | "bytes" 8 | "strings" 9 | "sync" 10 | "time" 11 | 12 | "github.com/DCSO/fluxline" 13 | log "github.com/sirupsen/logrus" 14 | ) 15 | 16 | // PerformanceStatsEncoder is a component to collect, encode and submit data 17 | // to an InfluxDb via RabbitMQ. 18 | type PerformanceStatsEncoder struct { 19 | sync.RWMutex 20 | Encoder *fluxline.Encoder 21 | Buffer bytes.Buffer 22 | Logger *log.Entry 23 | Tags map[string]string 24 | Submitter StatsSubmitter 25 | SubmitPeriod time.Duration 26 | LastSubmitted time.Time 27 | DummyMode bool 28 | } 29 | 30 | // MakePerformanceStatsEncoder creates a new stats encoder, submitting via 31 | // the given StatsSubmitter, with at least submitPeriod time between submissions. 32 | // if dummyMode is set, then the result will be printed to stdout instead of 33 | // submitting. 34 | func MakePerformanceStatsEncoder(statsSubmitter StatsSubmitter, 35 | submitPeriod time.Duration, dummyMode bool) *PerformanceStatsEncoder { 36 | a := &PerformanceStatsEncoder{ 37 | Logger: log.WithFields(log.Fields{ 38 | "domain": "statscollect", 39 | }), 40 | Submitter: statsSubmitter, 41 | DummyMode: dummyMode, 42 | Tags: make(map[string]string), 43 | LastSubmitted: time.Now(), 44 | SubmitPeriod: submitPeriod, 45 | } 46 | a.Encoder = fluxline.NewEncoder(&a.Buffer) 47 | return a 48 | } 49 | 50 | // SubmitWithTags encodes the data annotated with 'influx' tags in the passed 51 | // struct and sends it to the configured submitter. This version also allows to 52 | // add a set of user-defined tags as a key-value map. 53 | func (a *PerformanceStatsEncoder) SubmitWithTags(val interface{}, tags map[string]string) { 54 | a.Lock() 55 | a.Buffer.Reset() 56 | err := a.Encoder.EncodeWithoutTypes(ToolName, val, tags) 57 | if err != nil { 58 | if a.Logger != nil { 59 | a.Logger.WithFields(log.Fields{}).Warn(err) 60 | } 61 | } 62 | line := strings.TrimSpace(a.Buffer.String()) 63 | if line == "" { 64 | a.Logger.WithFields(log.Fields{}).Warn("skipping empty influx line") 65 | a.Unlock() 66 | return 67 | } 68 | jsonString := []byte(line) 69 | a.Submitter.SubmitWithHeaders(jsonString, "", "text/plain", map[string]string{ 70 | "database": "telegraf", 71 | "retention_policy": "default", 72 | }) 73 | a.Unlock() 74 | } 75 | 76 | // Submit encodes the data annotated with 'influx' tags in the passed struct and 77 | // sends it to the configured submitter. 78 | func (a *PerformanceStatsEncoder) Submit(val interface{}) { 79 | a.SubmitWithTags(val, a.Tags) 80 | } 81 | -------------------------------------------------------------------------------- /cmd/fever/cmds/root.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2018, 2021, DCSO GmbH 5 | 6 | import ( 7 | "fmt" 8 | "os" 9 | 10 | homedir "github.com/mitchellh/go-homedir" 11 | log "github.com/sirupsen/logrus" 12 | "github.com/spf13/cobra" 13 | "github.com/spf13/viper" 14 | ) 15 | 16 | var cfgFile string 17 | 18 | // rootCmd represents the base command when called without any subcommands 19 | var rootCmd = &cobra.Command{ 20 | Use: "fever", 21 | Short: "fast, extensible and flexible event router", 22 | Long: `FEVER is a fast execution engine for processing, aggregation and 23 | reporting components that act on Suricata's EVE output.`, 24 | } 25 | 26 | // Execute adds all child commands to the root command and sets flags appropriately. 27 | // This is called by main.main(). It only needs to happen once to the rootCmd. 28 | func Execute() { 29 | if err := rootCmd.Execute(); err != nil { 30 | fmt.Println(err) 31 | os.Exit(1) 32 | } 33 | } 34 | 35 | func init() { 36 | cobra.OnInitialize(initConfig) 37 | 38 | // Here you will define your flags and configuration settings. 39 | // Cobra supports persistent flags, which, if defined here, 40 | // will be global for your application. 41 | rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.fever.yaml)") 42 | 43 | // Management server options 44 | rootCmd.PersistentFlags().StringP("mgmt-socket", "", "/tmp/fever-mgmt.sock", "Socket path for management server") 45 | viper.BindPFlag("mgmt.socket", rootCmd.PersistentFlags().Lookup("mgmt-socket")) 46 | rootCmd.PersistentFlags().StringP("mgmt-host", "", "", "hostname:port definition for management server") 47 | viper.BindPFlag("mgmt.host", rootCmd.PersistentFlags().Lookup("mgmt-host")) 48 | rootCmd.PersistentFlags().StringP("mgmt-network", "", "tcp", "network (tcp/udp) definition for management server") 49 | viper.BindPFlag("mgmt.network", rootCmd.PersistentFlags().Lookup("mgmt-network")) 50 | } 51 | 52 | // initConfig reads in config file and ENV variables if set. 53 | func initConfig() { 54 | if cfgFile != "" { 55 | // Use config file from the flag. 56 | viper.SetConfigFile(cfgFile) 57 | } else { 58 | // Find home directory. 59 | home, err := homedir.Dir() 60 | if err != nil { 61 | fmt.Println(err) 62 | os.Exit(1) 63 | } 64 | 65 | // Search config in home directory with name ".fever" (without extension). 66 | viper.AddConfigPath(home) 67 | viper.SetConfigName(".fever") 68 | } 69 | 70 | viper.AutomaticEnv() // read in environment variables that match 71 | 72 | // If a config file is found, read it in. 73 | if err := viper.ReadInConfig(); err == nil { 74 | log.Infof("Using config file: %s", viper.ConfigFileUsed()) 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /processing/rdns_handler_test.go: -------------------------------------------------------------------------------- 1 | package processing 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2020, DCSO GmbH 5 | 6 | import ( 7 | "encoding/json" 8 | "math/rand" 9 | "testing" 10 | "time" 11 | 12 | "github.com/DCSO/fever/types" 13 | log "github.com/sirupsen/logrus" 14 | ) 15 | 16 | type MockHostNamer struct{} 17 | 18 | func (m *MockHostNamer) GetHostname(ipAddr string) ([]string, error) { 19 | return []string{"foo.bar", "foo.baz"}, nil 20 | } 21 | 22 | func (m *MockHostNamer) Flush() {} 23 | 24 | func makeRDNSEvent() types.Entry { 25 | e := types.Entry{ 26 | SrcIP: "8.8.8.8", 27 | SrcPort: int64(rand.Intn(60000) + 1025), 28 | DestIP: "8.8.8.8", 29 | DestPort: 53, 30 | Timestamp: time.Now().Format(types.SuricataTimestampFormat), 31 | EventType: "http", 32 | Proto: "TCP", 33 | } 34 | eve := types.EveEvent{ 35 | Timestamp: &types.SuriTime{ 36 | Time: time.Now(), 37 | }, 38 | EventType: e.EventType, 39 | SrcIP: e.SrcIP, 40 | SrcPort: int(e.SrcPort), 41 | DestIP: e.DestIP, 42 | DestPort: int(e.DestPort), 43 | Proto: e.Proto, 44 | } 45 | json, err := json.Marshal(eve) 46 | if err != nil { 47 | log.Warn(err) 48 | } else { 49 | e.JSONLine = string(json) 50 | } 51 | return e 52 | } 53 | 54 | type SrcHostResponse struct { 55 | Evidence []struct { 56 | Hostname string `json:"rdns"` 57 | } `json:"src_host"` 58 | } 59 | 60 | type DstHostResponse struct { 61 | Evidence []struct { 62 | Hostname string `json:"rdns"` 63 | } `json:"dest_host"` 64 | } 65 | 66 | func TestRDNSHandler(t *testing.T) { 67 | hn := MockHostNamer{} 68 | rdnsh := MakeRDNSHandler(&hn) 69 | 70 | e := makeRDNSEvent() 71 | 72 | err := rdnsh.Consume(&e) 73 | if err != nil { 74 | t.Fatal(err) 75 | } 76 | 77 | var srchosts SrcHostResponse 78 | err = json.Unmarshal([]byte(e.JSONLine), &srchosts) 79 | if err != nil { 80 | t.Fatal(err) 81 | } 82 | if len(srchosts.Evidence) != 2 { 83 | t.Fatalf("src hosts length is not 2: length %d", len(srchosts.Evidence)) 84 | } 85 | if srchosts.Evidence[0].Hostname != "foo.bar" { 86 | t.Fatalf("wrong hostname:%s", srchosts.Evidence[0].Hostname) 87 | } 88 | if srchosts.Evidence[1].Hostname != "foo.baz" { 89 | t.Fatalf("wrong hostname:%s", srchosts.Evidence[1].Hostname) 90 | } 91 | 92 | var desthosts DstHostResponse 93 | err = json.Unmarshal([]byte(e.JSONLine), &desthosts) 94 | if err != nil { 95 | t.Fatal(err) 96 | } 97 | if len(desthosts.Evidence) != 2 { 98 | t.Fatalf("dest hosts length is not 2: length %d", len(desthosts.Evidence)) 99 | } 100 | if desthosts.Evidence[0].Hostname != "foo.bar" { 101 | t.Fatalf("wrong hostname:%s", desthosts.Evidence[0].Hostname) 102 | } 103 | if desthosts.Evidence[1].Hostname != "foo.baz" { 104 | t.Fatalf("wrong hostname:%s", desthosts.Evidence[1].Hostname) 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /util/alertifier_providers.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2020, DCSO GmbH 5 | 6 | import ( 7 | "fmt" 8 | 9 | "github.com/DCSO/fever/types" 10 | ) 11 | 12 | // AlertJSONProviderHTTPURL is an AlertJSONProvider for HTTP URL matches. 13 | type AlertJSONProviderHTTPURL struct{} 14 | 15 | // GetAlertJSON returns the "alert" subobject for an alert EVE event. 16 | func (a AlertJSONProviderHTTPURL) GetAlertJSON(inputEvent types.Entry, 17 | prefix string, ioc string) ([]byte, error) { 18 | v := fmt.Sprintf("%s | %s | %s", inputEvent.HTTPMethod, inputEvent.HTTPHost, 19 | inputEvent.HTTPUrl) 20 | return GenericGetAlertObjForIoc(inputEvent, prefix, v, 21 | "%s Possibly bad HTTP URL: %s") 22 | } 23 | 24 | // AlertJSONProviderHTTPHost is an AlertJSONProvider for HTTP Host header 25 | // matches. 26 | type AlertJSONProviderHTTPHost struct{} 27 | 28 | // GetAlertJSON returns the "alert" subobject for an alert EVE event. 29 | func (a AlertJSONProviderHTTPHost) GetAlertJSON(inputEvent types.Entry, 30 | prefix string, ioc string) ([]byte, error) { 31 | return GenericGetAlertObjForIoc(inputEvent, prefix, ioc, 32 | "%s Possibly bad HTTP host: %s") 33 | } 34 | 35 | // AlertJSONProviderDNSReq is an AlertJSONProvider for DNS request matches. 36 | type AlertJSONProviderDNSReq struct{} 37 | 38 | // GetAlertJSON returns the "alert" subobject for an alert EVE event. 39 | func (a AlertJSONProviderDNSReq) GetAlertJSON(inputEvent types.Entry, 40 | prefix string, ioc string) ([]byte, error) { 41 | return GenericGetAlertObjForIoc(inputEvent, prefix, ioc, 42 | "%s Possibly bad DNS lookup to %s") 43 | } 44 | 45 | // AlertJSONProviderDNSResp is an AlertJSONProvider for DNS response matches. 46 | type AlertJSONProviderDNSResp struct{} 47 | 48 | // GetAlertJSON returns the "alert" subobject for an alert EVE event. 49 | func (a AlertJSONProviderDNSResp) GetAlertJSON(inputEvent types.Entry, 50 | prefix string, ioc string) ([]byte, error) { 51 | return GenericGetAlertObjForIoc(inputEvent, prefix, ioc, 52 | "%s Possibly bad DNS response for %s") 53 | } 54 | 55 | // AlertJSONProviderTLSSNI is an AlertJSONProvider for TLS SNI matches. 56 | type AlertJSONProviderTLSSNI struct{} 57 | 58 | // GetAlertJSON returns the "alert" subobject for an alert EVE event. 59 | func (a AlertJSONProviderTLSSNI) GetAlertJSON(inputEvent types.Entry, 60 | prefix string, ioc string) ([]byte, error) { 61 | return GenericGetAlertObjForIoc(inputEvent, prefix, ioc, 62 | "%s Possibly bad TLS SNI: %s") 63 | } 64 | 65 | // AlertJSONProviderTLSFingerprint is an AlertJSONProvider for TLS Fingerprint matches. 66 | type AlertJSONProviderTLSFingerprint struct{} 67 | 68 | // GetAlertJSON returns the "alert" subobject for an alert EVE event. 69 | func (a AlertJSONProviderTLSFingerprint) GetAlertJSON(inputEvent types.Entry, 70 | prefix string, ioc string) ([]byte, error) { 71 | return GenericGetAlertObjForIoc(inputEvent, prefix, ioc, 72 | "%s Possibly bad TLS Fingerprint: %s") 73 | } 74 | -------------------------------------------------------------------------------- /processing/context_collector_test.go: -------------------------------------------------------------------------------- 1 | package processing 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2019, DCSO GmbH 5 | 6 | import ( 7 | "encoding/json" 8 | "fmt" 9 | "math/rand" 10 | "reflect" 11 | "testing" 12 | "time" 13 | 14 | "github.com/DCSO/fever/types" 15 | log "github.com/sirupsen/logrus" 16 | ) 17 | 18 | func makeCCTestEvent(eType, flowID string) types.Entry { 19 | e := types.Entry{ 20 | SrcIP: fmt.Sprintf("10.%d.%d.%d", rand.Intn(250), rand.Intn(250), rand.Intn(250)), 21 | SrcPort: []int64{1, 2, 3, 4, 5}[rand.Intn(5)], 22 | DestIP: fmt.Sprintf("10.0.0.%d", rand.Intn(250)), 23 | DestPort: []int64{11, 12, 13, 14, 15}[rand.Intn(5)], 24 | Timestamp: time.Now().Format(types.SuricataTimestampFormat), 25 | EventType: eType, 26 | Proto: "TCP", 27 | FlowID: flowID, 28 | } 29 | jsonBytes, _ := json.Marshal(e) 30 | e.JSONLine = string(jsonBytes) 31 | return e 32 | } 33 | 34 | func TestContextCollector(t *testing.T) { 35 | markedVals := make(map[string][]string) 36 | seenMarked := make(map[string][]string) 37 | dsub := func(entries Context, logger *log.Entry) error { 38 | for _, v := range entries { 39 | var parsed struct { 40 | FlowID string 41 | } 42 | err := json.Unmarshal([]byte(v), &parsed) 43 | if err != nil { 44 | t.Fatal(err) 45 | } 46 | seenMarked[parsed.FlowID] = append(seenMarked[parsed.FlowID], v) 47 | } 48 | return nil 49 | } 50 | cc := MakeContextCollector(dsub, 5*time.Minute) 51 | 52 | nofReports := 0 53 | for i := 0; i < 10000; i++ { 54 | isMarked := (rand.Intn(20) < 1) 55 | flowID := fmt.Sprintf("%d", rand.Intn(10000000)+10000) 56 | if isMarked { 57 | nofReports++ 58 | cc.Mark(flowID) 59 | } 60 | for j := 0; j < rand.Intn(200)+1; j++ { 61 | ev := makeCCTestEvent([]string{"http", "smb", "dns"}[rand.Intn(3)], flowID) 62 | if isMarked { 63 | markedVals[flowID] = append(markedVals[flowID], ev.JSONLine) 64 | } 65 | cc.Consume(&ev) 66 | } 67 | 68 | ev := makeCCTestEvent("flow", flowID) 69 | cc.Consume(&ev) 70 | } 71 | 72 | if len(markedVals) != len(seenMarked) { 73 | t.Fatalf("number of marked flows (%d) != number of results (%d)", len(markedVals), len(seenMarked)) 74 | } 75 | 76 | if !reflect.DeepEqual(markedVals, seenMarked) { 77 | t.Fatal("contents of results and recorded metadata maps differ") 78 | } 79 | } 80 | 81 | func TestContextCollectorMissingFlowID(t *testing.T) { 82 | e := types.Entry{ 83 | Timestamp: time.Now().Format(types.SuricataTimestampFormat), 84 | EventType: "stats", 85 | } 86 | jsonBytes, _ := json.Marshal(e) 87 | e.JSONLine = string(jsonBytes) 88 | 89 | count := 0 90 | 91 | dsub := func(entries Context, logger *log.Entry) error { 92 | count++ 93 | return nil 94 | } 95 | cc := MakeContextCollector(dsub, 5*time.Minute) 96 | 97 | cc.Consume(&e) 98 | 99 | if count != 0 { 100 | t.Fatalf("event with empty flow ID was considered") 101 | } 102 | 103 | flowID := "12345" 104 | cc.Mark(flowID) 105 | ev := makeCCTestEvent("dns", flowID) 106 | cc.Consume(&ev) 107 | ev = makeCCTestEvent("flow", flowID) 108 | cc.Consume(&ev) 109 | 110 | if count != 1 { 111 | t.Fatalf("wrong number of entries: %d", count) 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /processing/heartbeat_injector_test.go: -------------------------------------------------------------------------------- 1 | package processing 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2020, 2021, DCSO GmbH 5 | 6 | import ( 7 | "fmt" 8 | "sync" 9 | "testing" 10 | "time" 11 | 12 | "github.com/DCSO/fever/types" 13 | 14 | "github.com/buger/jsonparser" 15 | ) 16 | 17 | type HeartbeatTestFwdHandler struct { 18 | Entries []types.Entry 19 | Lock sync.Mutex 20 | } 21 | 22 | func (h *HeartbeatTestFwdHandler) Consume(e *types.Entry) error { 23 | h.Lock.Lock() 24 | defer h.Lock.Unlock() 25 | h.Entries = append(h.Entries, *e) 26 | return nil 27 | } 28 | 29 | func (h *HeartbeatTestFwdHandler) GetEventTypes() []string { 30 | return []string{"*"} 31 | } 32 | 33 | func (h *HeartbeatTestFwdHandler) GetName() string { 34 | return "Heartbeat Injector Forwarding Test Handler" 35 | } 36 | 37 | func TestHeartbeatInjectorInvalidTime(t *testing.T) { 38 | hbth := HeartbeatTestFwdHandler{ 39 | Entries: make([]types.Entry, 0), 40 | } 41 | 42 | _, err := MakeHeartbeatInjector(&hbth, []string{"foo"}, []string{}) 43 | if err == nil { 44 | t.Fatal("invalid time not caught") 45 | } 46 | } 47 | 48 | func TestHeartbeatInjectorInvalidAlertTime(t *testing.T) { 49 | hbth := HeartbeatTestFwdHandler{ 50 | Entries: make([]types.Entry, 0), 51 | } 52 | 53 | _, err := MakeHeartbeatInjector(&hbth, []string{}, []string{"foo"}) 54 | if err == nil { 55 | t.Fatal("invalid time not caught") 56 | } 57 | } 58 | 59 | func TestHeartbeatInjector(t *testing.T) { 60 | hbth := HeartbeatTestFwdHandler{ 61 | Entries: make([]types.Entry, 0), 62 | } 63 | 64 | now := time.Now() 65 | ctime := []string{now.Format("15:04")} 66 | 67 | hbi, err := MakeHeartbeatInjector(&hbth, ctime, []string{}) 68 | if err != nil { 69 | t.Fatal(err) 70 | } 71 | 72 | hbi.Run() 73 | for { 74 | hbth.Lock.Lock() 75 | if len(hbth.Entries) > 0 { 76 | hbth.Lock.Unlock() 77 | break 78 | } 79 | hbth.Lock.Unlock() 80 | time.Sleep(100 * time.Millisecond) 81 | } 82 | hbi.Stop() 83 | 84 | hbJSON := hbth.Entries[0].JSONLine 85 | 86 | expectedHost := fmt.Sprintf("test-%d-%02d-%02d.vast", 87 | now.Year(), now.Month(), now.Day()) 88 | seenHost, err := jsonparser.GetString([]byte(hbJSON), "http", "hostname") 89 | if err != nil { 90 | t.Fatal(err) 91 | } 92 | if seenHost != expectedHost { 93 | t.Fatalf("wrong hostname for heartbeat: %s", seenHost) 94 | } 95 | } 96 | 97 | func TestHeartbeatAlertInjector(t *testing.T) { 98 | hbth := HeartbeatTestFwdHandler{ 99 | Entries: make([]types.Entry, 0), 100 | } 101 | 102 | now := time.Now() 103 | atime := []string{now.Format("15:04")} 104 | 105 | hbi, err := MakeHeartbeatInjector(&hbth, []string{}, atime) 106 | if err != nil { 107 | t.Fatal(err) 108 | } 109 | 110 | hbi.Run() 111 | for { 112 | hbth.Lock.Lock() 113 | if len(hbth.Entries) > 0 { 114 | hbth.Lock.Unlock() 115 | break 116 | } 117 | hbth.Lock.Unlock() 118 | time.Sleep(100 * time.Millisecond) 119 | } 120 | hbi.Stop() 121 | 122 | hbJSON := hbth.Entries[0].JSONLine 123 | 124 | sig, err := jsonparser.GetString([]byte(hbJSON), "alert", "signature") 125 | if err != nil { 126 | t.Fatal(err) 127 | } 128 | if sig != "DCSO FEVER TEST alert" { 129 | t.Fatalf("wrong signature for test alert: %s", sig) 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /processing/forward_handler.go: -------------------------------------------------------------------------------- 1 | package processing 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2017, 2020, DCSO GmbH 5 | 6 | import ( 7 | "sync" 8 | "time" 9 | 10 | "github.com/DCSO/fever/types" 11 | "github.com/DCSO/fever/util" 12 | 13 | log "github.com/sirupsen/logrus" 14 | ) 15 | 16 | // ForwardHandler is a handler that processes events by writing their JSON 17 | // representation into a UNIX socket. This is limited by a list of allowed 18 | // event types to be forwarded. 19 | type ForwardHandler struct { 20 | Logger *log.Entry 21 | DoRDNS bool 22 | RDNSHandler *RDNSHandler 23 | AddedFields string 24 | ContextCollector *ContextCollector 25 | FlowNotifyChan chan types.Entry 26 | MultiFwdChan chan types.Entry 27 | Running bool 28 | Lock sync.Mutex 29 | } 30 | 31 | // MakeForwardHandler creates a new forwarding handler 32 | func MakeForwardHandler(multiFwdChan chan types.Entry) *ForwardHandler { 33 | fh := &ForwardHandler{ 34 | Logger: log.WithFields(log.Fields{ 35 | "domain": "forward", 36 | }), 37 | MultiFwdChan: multiFwdChan, 38 | } 39 | return fh 40 | } 41 | 42 | // Consume processes an Entry and prepares it to be sent off to the 43 | // forwarding sink 44 | func (fh *ForwardHandler) Consume(inEntry *types.Entry) error { 45 | // make copy to pass on from here 46 | e := *inEntry 47 | // mark flow as relevant when alert is seen 48 | if GlobalContextCollector != nil && e.EventType == types.EventTypeAlert { 49 | GlobalContextCollector.Mark(string(e.FlowID)) 50 | } 51 | // we also perform active rDNS enrichment if requested 52 | if fh.DoRDNS && fh.RDNSHandler != nil { 53 | err := fh.RDNSHandler.Consume(&e) 54 | if err != nil { 55 | return err 56 | } 57 | } 58 | // Replace the final brace `}` in the JSON with the prepared string to 59 | // add the 'added fields' defined in the config. I the length of this 60 | // string is 1 then there are no added fields, only a final brace '}'. 61 | // In this case we don't even need to modify the JSON string at all. 62 | if len(fh.AddedFields) > 1 { 63 | j := e.JSONLine 64 | l := len(j) 65 | j = j[:l-1] 66 | j += fh.AddedFields 67 | e.JSONLine = j 68 | } 69 | 70 | fh.MultiFwdChan <- e 71 | 72 | return nil 73 | } 74 | 75 | // GetName returns the name of the handler 76 | func (fh *ForwardHandler) GetName() string { 77 | return "Forwarding handler" 78 | } 79 | 80 | // GetEventTypes returns a slice of event type strings that this handler 81 | // should be applied to 82 | func (fh *ForwardHandler) GetEventTypes() []string { 83 | return []string{"*"} 84 | } 85 | 86 | // EnableRDNS switches on reverse DNS enrichment for source and destination 87 | // IPs in outgoing EVE events. 88 | func (fh *ForwardHandler) EnableRDNS(expiryPeriod time.Duration) { 89 | fh.DoRDNS = true 90 | fh.RDNSHandler = MakeRDNSHandler(util.NewHostNamerRDNS(expiryPeriod, 2*expiryPeriod)) 91 | } 92 | 93 | // AddFields enables the addition of a custom set of top-level fields to the 94 | // forwarded JSON. 95 | func (fh *ForwardHandler) AddFields(fields map[string]string) error { 96 | addedFields, err := util.PreprocessAddedFields(fields) 97 | if err != nil { 98 | return err 99 | } 100 | fh.AddedFields = addedFields 101 | return nil 102 | } 103 | -------------------------------------------------------------------------------- /processing/context_shipper_amqp_test.go: -------------------------------------------------------------------------------- 1 | package processing 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2019, 2020, DCSO GmbH 5 | 6 | import ( 7 | "bytes" 8 | "fmt" 9 | "strings" 10 | "testing" 11 | "time" 12 | 13 | "github.com/DCSO/fever/util" 14 | 15 | "github.com/NeowayLabs/wabbit" 16 | "github.com/NeowayLabs/wabbit/amqptest" 17 | "github.com/NeowayLabs/wabbit/amqptest/server" 18 | log "github.com/sirupsen/logrus" 19 | "github.com/sirupsen/logrus/hooks/test" 20 | ) 21 | 22 | func TestContextShipperAMQP(t *testing.T) { 23 | serverURL := "amqp://sensor:sensor@localhost:9988/%2f/" 24 | log.SetLevel(log.DebugLevel) 25 | 26 | // start mock server 27 | fakeServer := server.NewServer(serverURL) 28 | fakeServer.Start() 29 | 30 | // set up consumer 31 | allDone := make(chan bool) 32 | coll := make([]string, 0) 33 | c, err := util.NewConsumer(serverURL, "context", "direct", "context", 34 | "context", "foo-test1", func(d wabbit.Delivery) { 35 | coll = append(coll, string(d.Body())) 36 | if len(coll) == 4 { 37 | allDone <- true 38 | } 39 | }) 40 | if err != nil { 41 | t.Fatal(err) 42 | } 43 | 44 | // set up submitter 45 | submitter, err := util.MakeAMQPSubmitterWithReconnector(serverURL, 46 | "context", true, func(url string) (wabbit.Conn, error) { 47 | // we pass in a custom reconnector which uses the amqptest implementation 48 | var conn wabbit.Conn 49 | conn, err = amqptest.Dial(url) 50 | return conn, err 51 | }) 52 | if err != nil { 53 | t.Fatal(err) 54 | } 55 | cs := &ContextShipperAMQP{} 56 | inChan, err := cs.Start(submitter) 57 | if err != nil { 58 | t.Fatal(err) 59 | } 60 | 61 | inChan <- Context{`{"value":"c1"}`} 62 | inChan <- Context{`{"value":"c2"}`} 63 | inChan <- Context{`{"value":"c3"}`} 64 | inChan <- Context{`{"value":"c4"}`} 65 | 66 | // ... and wait until they are received and processed 67 | <-allDone 68 | // check if output is correct 69 | if len(coll) != 4 { 70 | t.Fail() 71 | } 72 | if !strings.Contains(coll[0], `"value":"c1"`) { 73 | t.Fatalf("value 1 incorrect: %v", coll[0]) 74 | } 75 | if !strings.Contains(coll[1], `"value":"c2"`) { 76 | t.Fatalf("value 2 incorrect: %v", coll[1]) 77 | } 78 | if !strings.Contains(coll[2], `"value":"c3"`) { 79 | t.Fatalf("value 3 incorrect: %v", coll[2]) 80 | } 81 | if !strings.Contains(coll[3], `"value":"c4"`) { 82 | t.Fatalf("value 4 incorrect: %v", coll[3]) 83 | } 84 | 85 | close(inChan) 86 | 87 | // tear down test setup 88 | submitter.Finish() 89 | fakeServer.Stop() 90 | c.Shutdown() 91 | } 92 | 93 | func TestContextShipperAMQPBrokenJSON(t *testing.T) { 94 | cs := &ContextShipperAMQP{} 95 | ds, _ := util.MakeDummySubmitter() 96 | inChan, err := cs.Start(ds) 97 | if err != nil { 98 | t.Fatal(err) 99 | } 100 | 101 | hook := test.NewGlobal() 102 | var entries []*log.Entry 103 | 104 | inChan <- Context{`{""value":1}`} 105 | 106 | for i := 0; i < 60; i++ { 107 | time.Sleep(1 * time.Second) 108 | entries = hook.AllEntries() 109 | if len(entries) > 0 { 110 | break 111 | } 112 | if i > 58 { 113 | t.Fatalf("timed out trying to receive error message for malformed JSON") 114 | } 115 | } 116 | 117 | close(inChan) 118 | found := false 119 | 120 | for _, entry := range entries { 121 | if entry.Message == `could not marshal event JSON: {""value":1}` { 122 | found = true 123 | break 124 | } 125 | } 126 | 127 | if !found { 128 | var entryStrings bytes.Buffer 129 | for i, entry := range entries { 130 | entryStrings.WriteString(fmt.Sprintf("%03d: %s\n", i, entry.Message)) 131 | } 132 | t.Fatalf("malformed JSON error message not found: %v", entryStrings.String()) 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /cmd/fever/cmds/bloom.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2021, DCSO GmbH 5 | 6 | import ( 7 | "bufio" 8 | "context" 9 | "fmt" 10 | "os" 11 | 12 | "github.com/DCSO/fever/mgmt" 13 | "github.com/sirupsen/logrus" 14 | "github.com/spf13/cobra" 15 | "google.golang.org/grpc" 16 | "google.golang.org/protobuf/types/known/emptypb" 17 | ) 18 | 19 | var ( 20 | clt mgmt.MgmtServiceClient 21 | conn *grpc.ClientConn 22 | ) 23 | 24 | func bloomAdd(cmd *cobra.Command, args []string) { 25 | stream, err := clt.BloomAdd(context.TODO()) 26 | if err != nil { 27 | logrus.Fatal(err) 28 | } 29 | 30 | scanner := bufio.NewScanner(os.Stdin) 31 | for scanner.Scan() { 32 | if err := stream.Send(&mgmt.MgmtBloomAddRequest{ 33 | Ioc: scanner.Text(), 34 | }); err != nil { 35 | logrus.Fatal(err) 36 | } 37 | } 38 | resp, err := stream.CloseAndRecv() 39 | if err != nil { 40 | logrus.Fatal(err) 41 | } 42 | logrus.Debugf("added %d items", resp.GetAdded()) 43 | } 44 | 45 | func bloomInfo(cmd *cobra.Command, args []string) { 46 | got, err := clt.BloomInfo(context.TODO(), &emptypb.Empty{}) 47 | if err != nil { 48 | logrus.Fatal(err) 49 | } 50 | fmt.Printf("Capacity: %d\n", got.GetCapacity()) 51 | fmt.Printf("Elements: %d\n", got.GetElements()) 52 | fmt.Printf("# Hashfuncs: %d\n", got.GetHashfuncs()) 53 | fmt.Printf("FP Probability: %v\n", got.GetFpprob()) 54 | fmt.Printf("Bits: %d\n", got.GetBits()) 55 | } 56 | 57 | func bloomSave(cmd *cobra.Command, args []string) { 58 | _, err := clt.BloomSave(context.TODO(), &emptypb.Empty{}) 59 | if err != nil { 60 | logrus.Fatal(err) 61 | } 62 | } 63 | 64 | func bloomReload(cmd *cobra.Command, args []string) { 65 | _, err := clt.BloomReload(context.TODO(), &emptypb.Empty{}) 66 | if err != nil { 67 | logrus.Fatal(err) 68 | } 69 | } 70 | 71 | var bloomInfoCmd = &cobra.Command{ 72 | Use: "show", 73 | Short: "print information on Bloom filter", 74 | Long: `The 'bloom info' command shows stats on the Bloom filter in FEVER's Bloom filter matcher.`, 75 | Run: bloomInfo, 76 | } 77 | 78 | var bloomAddCmd = &cobra.Command{ 79 | Use: "add", 80 | Short: "add items to Bloom filter", 81 | Long: `The 'bloom add' command adds IoCs from stdin into FEVER's Bloom filter matcher.`, 82 | Run: bloomAdd, 83 | } 84 | 85 | var bloomSaveCmd = &cobra.Command{ 86 | Use: "save", 87 | Short: "save Bloom filter to disk", 88 | Long: `The 'bloom save' command persists the current state of FEVER's Bloom filter matcher to disk.`, 89 | Run: bloomSave, 90 | } 91 | 92 | var bloomReloadCmd = &cobra.Command{ 93 | Use: "reload", 94 | Short: "reload Bloom filter from disk", 95 | Long: `The 'bloom reload' command reloads FEVER's Bloom filter from disk.`, 96 | Run: bloomReload, 97 | } 98 | 99 | var bloomCmd = &cobra.Command{ 100 | Use: "bloom", 101 | Short: "modify Bloom filter used by FEVER for detection", 102 | Long: `The 'bloom' command interacts with FEVER's Bloom filter matcher.`, 103 | } 104 | 105 | func init() { 106 | rootCmd.AddCommand(bloomCmd) 107 | bloomCmd.AddCommand(bloomAddCmd) 108 | bloomCmd.AddCommand(bloomInfoCmd) 109 | bloomCmd.AddCommand(bloomSaveCmd) 110 | bloomCmd.AddCommand(bloomReloadCmd) 111 | 112 | bloomCmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error { 113 | var err error 114 | mgmtCfg := mgmt.EndpointConfigFromViper() 115 | conn, err = grpc.Dial(mgmtCfg.DialString(), mgmtCfg.DialOptions...) 116 | if err != nil { 117 | return err 118 | } 119 | clt = mgmt.NewMgmtServiceClient(conn) 120 | return nil 121 | } 122 | 123 | bloomCmd.PersistentPostRunE = func(cmd *cobra.Command, args []string) error { 124 | return conn.Close() 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/DCSO/fever 2 | 3 | go 1.24.0 4 | 5 | require ( 6 | github.com/DCSO/bloom v0.2.3 7 | github.com/DCSO/fluxline v0.0.0-20200907065040-78686e5e68f6 8 | github.com/NeowayLabs/wabbit v0.0.0-20210927194032-73ad61d1620e 9 | github.com/buger/jsonparser v1.1.1 10 | github.com/gomodule/redigo v1.8.3 11 | github.com/jackc/pgx/v4 v4.18.3 12 | github.com/mitchellh/go-homedir v1.1.0 13 | github.com/patrickmn/go-cache v2.1.0+incompatible 14 | github.com/rabbitmq/amqp091-go v1.10.0 15 | github.com/sirupsen/logrus v1.9.3 16 | github.com/spf13/cobra v1.5.0 17 | github.com/spf13/viper v1.7.1 18 | github.com/stretchr/testify v1.11.1 19 | github.com/stvp/tempredis v0.0.0-20231107154819-8a695b693b9c 20 | github.com/yl2chen/cidranger v1.0.2 21 | golang.org/x/sync v0.18.0 22 | google.golang.org/grpc v1.59.0 23 | google.golang.org/protobuf v1.35.2 24 | gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 25 | ) 26 | 27 | require ( 28 | github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect 29 | github.com/Microsoft/go-winio v0.6.2 // indirect 30 | github.com/Showmax/go-fqdn v1.0.0 // indirect 31 | github.com/containerd/containerd v1.7.29 // indirect 32 | github.com/containerd/log v0.1.0 // indirect 33 | github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect 34 | github.com/davecgh/go-spew v1.1.1 // indirect 35 | github.com/docker/docker v25.0.13+incompatible // indirect 36 | github.com/docker/go-units v0.5.0 // indirect 37 | github.com/fsnotify/fsnotify v1.6.0 // indirect 38 | github.com/fsouza/go-dockerclient v1.7.1 // indirect 39 | github.com/golang/protobuf v1.5.4 // indirect 40 | github.com/google/go-cmp v0.6.0 // indirect 41 | github.com/google/uuid v1.4.0 // indirect 42 | github.com/hashicorp/hcl v1.0.0 // indirect 43 | github.com/inconshreveable/mousetrap v1.0.0 // indirect 44 | github.com/jackc/chunkreader/v2 v2.0.1 // indirect 45 | github.com/jackc/pgconn v1.14.3 // indirect 46 | github.com/jackc/pgio v1.0.0 // indirect 47 | github.com/jackc/pgpassfile v1.0.0 // indirect 48 | github.com/jackc/pgproto3/v2 v2.3.3 // indirect 49 | github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect 50 | github.com/jackc/pgtype v1.14.0 // indirect 51 | github.com/jackc/puddle v1.3.0 // indirect 52 | github.com/klauspost/compress v1.16.7 // indirect 53 | github.com/kr/pretty v0.3.1 // indirect 54 | github.com/lib/pq v1.10.9 // indirect 55 | github.com/magiconair/properties v1.8.1 // indirect 56 | github.com/mitchellh/mapstructure v1.1.2 // indirect 57 | github.com/moby/patternmatcher v0.6.0 // indirect 58 | github.com/moby/sys/sequential v0.5.0 // indirect 59 | github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 // indirect 60 | github.com/opencontainers/image-spec v1.1.0 // indirect 61 | github.com/pborman/uuid v1.2.1 // indirect 62 | github.com/pelletier/go-toml v1.9.5 // indirect 63 | github.com/pmezard/go-difflib v1.0.0 // indirect 64 | github.com/rogpeppe/go-internal v1.11.0 // indirect 65 | github.com/russross/blackfriday/v2 v2.1.0 // indirect 66 | github.com/spf13/afero v1.9.2 // indirect 67 | github.com/spf13/cast v1.3.0 // indirect 68 | github.com/spf13/jwalterweatherman v1.0.0 // indirect 69 | github.com/spf13/pflag v1.0.5 // indirect 70 | github.com/subosito/gotenv v1.2.0 // indirect 71 | github.com/tiago4orion/conjure v0.0.0-20150908101743-93cb30b9d218 // indirect 72 | golang.org/x/crypto v0.45.0 // indirect 73 | golang.org/x/net v0.47.0 // indirect 74 | golang.org/x/sys v0.38.0 // indirect 75 | golang.org/x/text v0.31.0 // indirect 76 | google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect 77 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect 78 | gopkg.in/ini.v1 v1.51.0 // indirect 79 | gopkg.in/yaml.v2 v2.4.0 // indirect 80 | gopkg.in/yaml.v3 v3.0.1 // indirect 81 | ) 82 | -------------------------------------------------------------------------------- /processing/rdns_handler.go: -------------------------------------------------------------------------------- 1 | package processing 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2019, 2020, DCSO GmbH 5 | 6 | import ( 7 | "fmt" 8 | "net" 9 | "sync" 10 | 11 | "github.com/DCSO/fever/types" 12 | "github.com/DCSO/fever/util" 13 | "github.com/buger/jsonparser" 14 | 15 | log "github.com/sirupsen/logrus" 16 | "github.com/yl2chen/cidranger" 17 | ) 18 | 19 | // RDNSHandler is a handler that enriches events with reverse DNS 20 | // information looked up on the sensor, for both source and destination 21 | // IP addresses. 22 | type RDNSHandler struct { 23 | sync.Mutex 24 | Logger *log.Entry 25 | HostNamer util.HostNamer 26 | PrivateRanges cidranger.Ranger 27 | PrivateRangesOnly bool 28 | } 29 | 30 | // MakeRDNSHandler returns a new RDNSHandler, backed by the passed HostNamer. 31 | func MakeRDNSHandler(hn util.HostNamer) *RDNSHandler { 32 | rh := &RDNSHandler{ 33 | Logger: log.WithFields(log.Fields{ 34 | "domain": "rdns", 35 | }), 36 | PrivateRanges: cidranger.NewPCTrieRanger(), 37 | HostNamer: hn, 38 | } 39 | for _, cidr := range []string{ 40 | "10.0.0.0/8", 41 | "172.16.0.0/12", 42 | "192.168.0.0/16", 43 | "fc00::/7", 44 | } { 45 | _, block, err := net.ParseCIDR(cidr) 46 | if err != nil { 47 | log.Fatalf("cannot parse fixed private IP range %v", cidr) 48 | } 49 | rh.PrivateRanges.Insert(cidranger.NewBasicRangerEntry(*block)) 50 | } 51 | return rh 52 | } 53 | 54 | // EnableOnlyPrivateIPRanges ensures that only private (RFC1918) IP ranges 55 | // are enriched 56 | func (a *RDNSHandler) EnableOnlyPrivateIPRanges() { 57 | a.PrivateRangesOnly = true 58 | } 59 | 60 | // Consume processes an Entry and enriches it 61 | func (a *RDNSHandler) Consume(e *types.Entry) error { 62 | var res []string 63 | var err error 64 | var isPrivate bool 65 | 66 | if e.SrcIP != "" { 67 | ip := net.ParseIP(e.SrcIP) 68 | if ip != nil { 69 | isPrivate, err = a.PrivateRanges.Contains(ip) 70 | if err != nil { 71 | return err 72 | } 73 | if !a.PrivateRangesOnly || isPrivate { 74 | res, err = a.HostNamer.GetHostname(e.SrcIP) 75 | if err == nil { 76 | for i, v := range res { 77 | hostname, err := util.EscapeJSON(v) 78 | if err != nil { 79 | log.Warningf("cannot escape hostname: %s", v) 80 | continue 81 | } 82 | newJSON, err := jsonparser.Set([]byte(e.JSONLine), hostname, 83 | "src_host", fmt.Sprintf("[%d]", i), "rdns") 84 | if err != nil { 85 | log.Warningf("cannot set hostname: %s", hostname) 86 | continue 87 | } else { 88 | e.JSONLine = string(newJSON) 89 | } 90 | } 91 | } 92 | } 93 | } else { 94 | log.Error("IP not valid") 95 | } 96 | } 97 | if e.DestIP != "" { 98 | ip := net.ParseIP(e.DestIP) 99 | if ip != nil { 100 | isPrivate, err = a.PrivateRanges.Contains(ip) 101 | if err != nil { 102 | return err 103 | } 104 | if !a.PrivateRangesOnly || isPrivate { 105 | res, err = a.HostNamer.GetHostname(e.DestIP) 106 | if err == nil { 107 | for i, v := range res { 108 | hostname, err := util.EscapeJSON(v) 109 | if err != nil { 110 | log.Warningf("cannot escape hostname: %s", v) 111 | continue 112 | } 113 | newJSON, err := jsonparser.Set([]byte(e.JSONLine), hostname, 114 | "dest_host", fmt.Sprintf("[%d]", i), "rdns") 115 | if err != nil { 116 | log.Warningf("cannot set hostname: %s", hostname) 117 | continue 118 | } else { 119 | e.JSONLine = string(newJSON) 120 | } 121 | } 122 | } 123 | } 124 | } else { 125 | log.Error("IP not valid") 126 | } 127 | } 128 | return nil 129 | } 130 | 131 | // GetName returns the name of the handler 132 | func (a *RDNSHandler) GetName() string { 133 | return "reverse DNS handler" 134 | } 135 | 136 | // GetEventTypes returns a slice of event type strings that this handler 137 | // should be applied to 138 | func (a *RDNSHandler) GetEventTypes() []string { 139 | return []string{"http", "dns", "tls", "smtp", "flow", "ssh", "tls", "smb", "alert"} 140 | } 141 | -------------------------------------------------------------------------------- /processing/dns_aggregator_test.go: -------------------------------------------------------------------------------- 1 | package processing 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2017, 2019, DCSO GmbH 5 | 6 | import ( 7 | "bytes" 8 | "encoding/json" 9 | "fmt" 10 | "math/rand" 11 | "sync" 12 | "testing" 13 | "time" 14 | 15 | "github.com/DCSO/fever/types" 16 | "github.com/DCSO/fever/util" 17 | ) 18 | 19 | const ( 20 | numTestEvents = 100000 21 | ) 22 | 23 | func makeDNSEvent() types.Entry { 24 | e := types.Entry{ 25 | SrcIP: fmt.Sprintf("10.0.0.%d", rand.Intn(5)+1), 26 | SrcPort: 53, 27 | DestIP: fmt.Sprintf("10.0.0.%d", rand.Intn(50)), 28 | DestPort: []int64{11, 12, 13, 14, 15}[rand.Intn(5)], 29 | Timestamp: time.Now().Format(types.SuricataTimestampFormat), 30 | EventType: "DNS", 31 | Proto: "TCP", 32 | DNSRCode: []string{"NOERROR", "NXDOMAIN"}[rand.Intn(2)], 33 | DNSRData: fmt.Sprintf("10.%d.0.%d", rand.Intn(50), rand.Intn(50)+100), 34 | DNSRRName: fmt.Sprintf("%s.com", util.RndStringFromAlpha(4)), 35 | DNSRRType: "answer", 36 | } 37 | return e 38 | } 39 | 40 | func TestDNSAggregator(t *testing.T) { 41 | rand.Seed(time.Now().UTC().UnixNano()) 42 | outChan := make(chan types.Entry) 43 | consumeWaitChan := make(chan bool) 44 | closeChan := make(chan bool) 45 | 46 | f := MakeDNSAggregator(1*time.Second, outChan) 47 | 48 | daTypes := f.GetEventTypes() 49 | if len(daTypes) != 1 { 50 | t.Fatal("DNS aggregation handler should only claim one type") 51 | } 52 | if daTypes[0] != "dns" { 53 | t.Fatal("DNS aggregation handler should only claim 'dns' type") 54 | } 55 | if f.GetName() != "DB DNS aggregator" { 56 | t.Fatal("DNS aggregation handler has wrong name") 57 | } 58 | 59 | var observedLock sync.Mutex 60 | observedSituations := make(map[string]int) 61 | observedDomains := make(map[string]bool) 62 | setupSituations := make(map[string]int) 63 | setupDomains := make(map[string]bool) 64 | 65 | go func() { 66 | var buf bytes.Buffer 67 | for { 68 | select { 69 | case e := <-outChan: 70 | var out AggregateDNSEvent 71 | err := json.Unmarshal([]byte(e.JSONLine), &out) 72 | if err != nil { 73 | t.Fail() 74 | } 75 | for _, v := range out.DNS.Details { 76 | buf.Write([]byte(out.DNS.Rrname)) 77 | buf.Write([]byte(v.Rrtype)) 78 | buf.Write([]byte(v.Rdata)) 79 | buf.Write([]byte(v.Rcode)) 80 | observedLock.Lock() 81 | observedSituations[buf.String()]++ 82 | observedLock.Unlock() 83 | observedDomains[out.DNS.Rrname] = true 84 | buf.Reset() 85 | } 86 | case <-closeChan: 87 | close(consumeWaitChan) 88 | return 89 | } 90 | } 91 | }() 92 | 93 | f.Run() 94 | for i := 0; i < numTestEvents; i++ { 95 | var buf bytes.Buffer 96 | ev := makeDNSEvent() 97 | buf.Write([]byte(ev.DNSRRName)) 98 | buf.Write([]byte(ev.DNSRRType)) 99 | buf.Write([]byte(ev.DNSRData)) 100 | buf.Write([]byte(ev.DNSRCode)) 101 | setupSituations[buf.String()]++ 102 | setupDomains[ev.DNSRRName] = true 103 | buf.Reset() 104 | f.Consume(&ev) 105 | } 106 | 107 | go func() { 108 | for { 109 | observedLock.Lock() 110 | if len(setupSituations) <= len(observedSituations) { 111 | observedLock.Unlock() 112 | break 113 | } 114 | observedLock.Unlock() 115 | time.Sleep(100 * time.Millisecond) 116 | 117 | } 118 | close(closeChan) 119 | }() 120 | 121 | <-consumeWaitChan 122 | close(outChan) 123 | 124 | waitChan := make(chan bool) 125 | f.Stop(waitChan) 126 | <-waitChan 127 | 128 | if len(setupSituations) != len(observedSituations) { 129 | t.Fatalf("results have different dimensions: %d/%d", len(setupSituations), 130 | len(observedSituations)) 131 | } 132 | for k, v := range setupSituations { 133 | if _, ok := observedSituations[k]; !ok { 134 | t.Fatalf("missing key: %s", k) 135 | } 136 | v2 := observedSituations[k] 137 | if v2 != v { 138 | t.Fatalf("mismatching counts for key %s: %d/%d", k, v, v2) 139 | } 140 | } 141 | for k, v := range observedSituations { 142 | if _, ok := setupSituations[k]; !ok { 143 | t.Fatalf("missing key: %s", k) 144 | } 145 | v2 := setupSituations[k] 146 | if v2 != v { 147 | t.Fatalf("mismatching counts for key %s: %d/%d", k, v, v2) 148 | } 149 | } 150 | if len(setupDomains) != len(observedDomains) { 151 | t.Fatalf("results have different dimensions: %d/%d", len(setupDomains), 152 | len(observedDomains)) 153 | } 154 | for k := range observedDomains { 155 | if _, ok := setupDomains[k]; !ok { 156 | t.Fatalf("missing key: %s", k) 157 | } 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /util/performance_stats_encoder_test.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2017, 2019, DCSO GmbH 5 | 6 | import ( 7 | "fmt" 8 | "regexp" 9 | "sync" 10 | "testing" 11 | "time" 12 | 13 | "github.com/NeowayLabs/wabbit" 14 | "github.com/NeowayLabs/wabbit/amqptest" 15 | "github.com/NeowayLabs/wabbit/amqptest/server" 16 | log "github.com/sirupsen/logrus" 17 | ) 18 | 19 | var testStruct = struct { 20 | TestVal uint64 `influx:"testval"` 21 | TestVal2 uint64 `influx:"testvalue"` 22 | TestVal3 uint64 23 | }{ 24 | 1, 25 | 2, 26 | 3, 27 | } 28 | 29 | var testStructUntagged = struct { 30 | TestVal uint64 31 | TestVal2 uint64 32 | TestVal3 uint64 33 | }{ 34 | 1, 35 | 2, 36 | 3, 37 | } 38 | 39 | func TestPerformanceStatsEncoderEmpty(t *testing.T) { 40 | serverURL := "amqp://sensor:sensor@127.0.0.1:9999/%2f/" 41 | 42 | // start mock AMQP server 43 | fakeServer := server.NewServer(serverURL) 44 | fakeServer.Start() 45 | defer fakeServer.Stop() 46 | 47 | // set up consumer 48 | results := make([]string, 0) 49 | c, err := NewConsumer(serverURL, "tdh.metrics", "direct", "tdh.metrics.testqueue", 50 | "", "", func(d wabbit.Delivery) { 51 | results = append(results, string(d.Body())) 52 | }) 53 | if err != nil { 54 | t.Fatal(err) 55 | } 56 | defer c.Shutdown() 57 | 58 | // set up submitter 59 | statssubmitter, err := MakeAMQPSubmitterWithReconnector(serverURL, 60 | "tdh.metrics", true, func(url string) (wabbit.Conn, error) { 61 | // we pass in a custom reconnector which uses the amqptest implementation 62 | var conn wabbit.Conn 63 | conn, err = amqptest.Dial(url) 64 | return conn, err 65 | }) 66 | if err != nil { 67 | t.Fatal(err) 68 | } 69 | defer statssubmitter.Finish() 70 | 71 | // create InfluxDB line protocol encoder/submitter 72 | pse := MakePerformanceStatsEncoder(statssubmitter, 1*time.Second, false) 73 | pse.Submit(testStructUntagged) 74 | time.Sleep(1 * time.Second) 75 | 76 | if len(results) != 0 { 77 | t.Fatalf("unexpected result length: %d !=0", len(results)) 78 | } 79 | } 80 | 81 | func TestPerformanceStatsEncoder(t *testing.T) { 82 | serverURL := "amqp://sensor:sensor@127.0.0.1:9999/%2f/" 83 | 84 | // start mock AMQP server 85 | fakeServer := server.NewServer(serverURL) 86 | fakeServer.Start() 87 | defer fakeServer.Stop() 88 | 89 | // set up consumer 90 | results := make([]string, 0) 91 | gateChan := make(chan bool) 92 | var resultsLock sync.Mutex 93 | c, err := NewConsumer(serverURL, "tdh.metrics", "direct", "tdh.metrics.testqueue", 94 | "", "", func(d wabbit.Delivery) { 95 | resultsLock.Lock() 96 | results = append(results, string(d.Body())) 97 | resultsLock.Unlock() 98 | log.Info(string(d.Body())) 99 | gateChan <- true 100 | }) 101 | if err != nil { 102 | t.Fatal(err) 103 | } 104 | defer c.Shutdown() 105 | 106 | // set up submitter 107 | statssubmitter, err := MakeAMQPSubmitterWithReconnector(serverURL, 108 | "tdh.metrics", true, func(url string) (wabbit.Conn, error) { 109 | // we pass in a custom reconnector which uses the amqptest implementation 110 | var conn wabbit.Conn 111 | conn, err = amqptest.Dial(url) 112 | return conn, err 113 | }) 114 | if err != nil { 115 | t.Fatal(err) 116 | } 117 | defer statssubmitter.Finish() 118 | 119 | // create InfluxDB line protocol encoder/submitter 120 | pse := MakePerformanceStatsEncoder(statssubmitter, 1*time.Second, false) 121 | pse.Submit(testStruct) 122 | <-gateChan 123 | pse.Submit(testStruct) 124 | <-gateChan 125 | testStruct.TestVal = 3 126 | pse.Submit(testStruct) 127 | <-gateChan 128 | pse.Submit(testStruct) 129 | <-gateChan 130 | 131 | resultsLock.Lock() 132 | if len(results) != 4 { 133 | t.Fatalf("unexpected result length: %d != 4", len(results)) 134 | } 135 | if match, _ := regexp.Match(fmt.Sprintf("^%s,[^ ]+ testval=1,testvalue=2", ToolName), []byte(results[0])); !match { 136 | t.Fatalf("unexpected match content: %s", results[0]) 137 | } 138 | if match, _ := regexp.Match(fmt.Sprintf("^%s,[^ ]+ testval=1,testvalue=2", ToolName), []byte(results[1])); !match { 139 | t.Fatalf("unexpected match content: %s", results[1]) 140 | } 141 | if match, _ := regexp.Match(fmt.Sprintf("^%s,[^ ]+ testval=3,testvalue=2", ToolName), []byte(results[2])); !match { 142 | t.Fatalf("unexpected match content: %s", results[2]) 143 | } 144 | if match, _ := regexp.Match(fmt.Sprintf("^%s,[^ ]+ testval=3,testvalue=2", ToolName), []byte(results[3])); !match { 145 | t.Fatalf("unexpected match content: %s", results[3]) 146 | } 147 | resultsLock.Unlock() 148 | } 149 | -------------------------------------------------------------------------------- /processing/flow_extractor.go: -------------------------------------------------------------------------------- 1 | package processing 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2017, DCSO GmbH 5 | 6 | import ( 7 | "bytes" 8 | "os" 9 | "strings" 10 | "sync" 11 | "time" 12 | 13 | "github.com/DCSO/fever/types" 14 | "github.com/DCSO/fever/util" 15 | 16 | "github.com/DCSO/bloom" 17 | log "github.com/sirupsen/logrus" 18 | ) 19 | 20 | // FlowExtractor is an aggregator that extracts the flows from 21 | // "hosts of interest" and sends them to the backend. 22 | type FlowExtractor struct { 23 | SensorID string 24 | BloomPath string 25 | BloomFilter *bloom.BloomFilter 26 | FlowsMutex sync.RWMutex 27 | flowCount int 28 | Flows *bytes.Buffer 29 | SubmitChannel chan []byte 30 | Submitter util.StatsSubmitter 31 | FlushPeriod time.Duration 32 | FlushCount int 33 | CloseChan chan bool 34 | ClosedChan chan bool 35 | Logger *log.Entry 36 | } 37 | 38 | // MakeFlowExtractor creates a new empty FlowExtractor. 39 | func MakeFlowExtractor(flushPeriod time.Duration, flushCount int, bloomPath string, submitter util.StatsSubmitter) (*FlowExtractor, error) { 40 | 41 | var bloomFilter *bloom.BloomFilter 42 | 43 | if bloomPath != "" { 44 | compressed := false 45 | if strings.HasSuffix(bloomPath, ".gz") { 46 | compressed = true 47 | } 48 | var err error 49 | bloomFilter, err = bloom.LoadFilter(bloomPath, compressed) 50 | if err != nil { 51 | return nil, err 52 | } 53 | } 54 | 55 | fe := &FlowExtractor{ 56 | FlushPeriod: flushPeriod, 57 | Submitter: submitter, 58 | BloomPath: bloomPath, 59 | Logger: log.WithFields(log.Fields{ 60 | "domain": "flow_extractor", 61 | }), 62 | Flows: new(bytes.Buffer), 63 | SubmitChannel: make(chan []byte, 60), 64 | BloomFilter: bloomFilter, 65 | CloseChan: make(chan bool), 66 | ClosedChan: make(chan bool), 67 | FlushCount: flushCount, 68 | flowCount: 0, 69 | } 70 | fe.SensorID, _ = os.Hostname() 71 | return fe, nil 72 | } 73 | 74 | func (fe *FlowExtractor) flush() { 75 | fe.FlowsMutex.Lock() 76 | myFlows := fe.Flows 77 | fe.Flows = new(bytes.Buffer) 78 | fe.flowCount = 0 79 | fe.FlowsMutex.Unlock() 80 | select { 81 | case fe.SubmitChannel <- myFlows.Bytes(): 82 | break 83 | default: 84 | log.Warning("Flow channel is full, cannot submit message...") 85 | } 86 | } 87 | 88 | // Consume processes an Entry, adding the data within to the flows 89 | func (fe *FlowExtractor) Consume(e *types.Entry) error { 90 | fe.FlowsMutex.Lock() 91 | defer fe.FlowsMutex.Unlock() 92 | 93 | if fe.BloomFilter != nil { 94 | if !fe.BloomFilter.Check([]byte(e.SrcIP)) && !fe.BloomFilter.Check([]byte(e.DestIP)) { 95 | return nil 96 | } 97 | } 98 | 99 | var fev types.FlowEvent 100 | err := fev.FromEntry(e) 101 | 102 | if err != nil { 103 | return err 104 | } 105 | 106 | err = fev.Marshal(fe.Flows) 107 | 108 | fe.flowCount++ 109 | 110 | return err 111 | } 112 | 113 | // Run starts the background aggregation service for this handler 114 | func (fe *FlowExtractor) Run() { 115 | //this goroutine asynchronously submit flow messages 116 | go func() { 117 | for message := range fe.SubmitChannel { 118 | fe.Submitter.Submit(message, "", "application/binary-flows") 119 | } 120 | }() 121 | //this go routine takes care of flushing the flows 122 | go func() { 123 | i := 0 * time.Second 124 | interval := 100 * time.Millisecond 125 | for { 126 | select { 127 | case <-fe.CloseChan: 128 | close(fe.SubmitChannel) 129 | close(fe.ClosedChan) 130 | return 131 | default: 132 | //we flush if the flush period has passed, or if the count 133 | //of events is larger then the flush count 134 | fe.FlowsMutex.Lock() 135 | flowCount := fe.flowCount 136 | fe.FlowsMutex.Unlock() 137 | if i >= fe.FlushPeriod || flowCount > fe.FlushCount { 138 | fe.flush() 139 | i = 0 * time.Second 140 | } 141 | time.Sleep(interval) 142 | i += interval 143 | } 144 | } 145 | }() 146 | } 147 | 148 | // Stop causes the aggregator to cease aggregating and submitting data 149 | func (fe *FlowExtractor) Stop(stopChan chan bool) { 150 | close(fe.CloseChan) 151 | <-fe.ClosedChan 152 | close(stopChan) 153 | } 154 | 155 | // GetName returns the name of the handler 156 | func (fe *FlowExtractor) GetName() string { 157 | return "Flow extractor" 158 | } 159 | 160 | // GetEventTypes returns a slice of event type strings that this handler 161 | // should be applied to 162 | func (fe *FlowExtractor) GetEventTypes() []string { 163 | return []string{"flow"} 164 | } 165 | -------------------------------------------------------------------------------- /util/submitter_test.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2017, DCSO GmbH 5 | 6 | import ( 7 | "bytes" 8 | "testing" 9 | "time" 10 | 11 | log "github.com/sirupsen/logrus" 12 | 13 | "github.com/NeowayLabs/wabbit" 14 | "github.com/NeowayLabs/wabbit/amqptest" 15 | "github.com/NeowayLabs/wabbit/amqptest/server" 16 | ) 17 | 18 | func TestSubmitter(t *testing.T) { 19 | serverURL := "amqp://sensor:sensor@localhost:9999/%2f/" 20 | log.SetLevel(log.DebugLevel) 21 | 22 | // start mock server 23 | fakeServer := server.NewServer(serverURL) 24 | fakeServer.Start() 25 | 26 | // set up consumer 27 | var buf bytes.Buffer 28 | allDone := make(chan bool) 29 | c, err := NewConsumer(serverURL, "foo.bar.test", "direct", "foo", 30 | "foo", "foo-test1", func(d wabbit.Delivery) { 31 | buf.Write(d.Body()) 32 | if buf.Len() == 4 { 33 | allDone <- true 34 | } 35 | }) 36 | if err != nil { 37 | t.Fatal(err) 38 | } 39 | 40 | // set up submitter 41 | submitter, err := MakeAMQPSubmitterWithReconnector(serverURL, 42 | "foo.bar.test", true, func(url string) (wabbit.Conn, error) { 43 | // we pass in a custom reconnector which uses the amqptest implementation 44 | var conn wabbit.Conn 45 | conn, err = amqptest.Dial(url) 46 | return conn, err 47 | }) 48 | if err != nil { 49 | t.Fatal(err) 50 | } 51 | 52 | // send some messages... 53 | submitter.Submit([]byte("1"), "foo", "text/plain") 54 | submitter.Submit([]byte("2"), "foo", "text/plain") 55 | submitter.Submit([]byte("3"), "foo", "text/plain") 56 | submitter.Submit([]byte("4"), "foo", "text/plain") 57 | 58 | // ... and wait until they are received and processed 59 | <-allDone 60 | // check if order and length is correct 61 | if buf.String() != "1234" { 62 | t.Fail() 63 | } 64 | 65 | // tear down test setup 66 | submitter.Finish() 67 | fakeServer.Stop() 68 | c.Shutdown() 69 | 70 | } 71 | 72 | func TestSubmitterReconnect(t *testing.T) { 73 | serverURL := "amqp://sensor:sensor@localhost:9992/%2f/" 74 | log.SetLevel(log.DebugLevel) 75 | 76 | // start mock server 77 | fakeServer := server.NewServer(serverURL) 78 | fakeServer.Start() 79 | 80 | // set up consumer 81 | var buf bytes.Buffer 82 | done := make(chan bool) 83 | c, err := NewConsumer(serverURL, "foo.bar.test", "direct", "foo", 84 | "foo", "foo-test1", func(d wabbit.Delivery) { 85 | buf.Write(d.Body()) 86 | log.Printf("received '%s', buf length %d", d.Body(), buf.Len()) 87 | if buf.Len() == 2 { 88 | done <- true 89 | } 90 | }) 91 | if err != nil { 92 | t.Fatal(err) 93 | } 94 | 95 | // set up submitter 96 | submitter, err := MakeAMQPSubmitterWithReconnector(serverURL, 97 | "foo.bar.test", true, func(url string) (wabbit.Conn, error) { 98 | // we pass in a custom reconnector which uses the amqptest implementation 99 | var conn wabbit.Conn 100 | conn, err = amqptest.Dial(url) 101 | return conn, err 102 | }) 103 | if err != nil { 104 | t.Fatal(err) 105 | } 106 | defer submitter.Finish() 107 | 108 | // send some messages... 109 | submitter.Submit([]byte("A"), "foo", "text/plain") 110 | submitter.Submit([]byte("B"), "foo", "text/plain") 111 | stopped := make(chan bool) 112 | restarted := make(chan bool) 113 | <-done 114 | go func() { 115 | fakeServer.Stop() 116 | close(stopped) 117 | time.Sleep(5 * time.Second) 118 | fakeServer := server.NewServer(serverURL) 119 | fakeServer.Start() 120 | close(restarted) 121 | }() 122 | <-stopped 123 | log.Info("server stopped") 124 | 125 | // these are buffered on client side because submitter will not publish 126 | // with immediate flag set 127 | submitter.Submit([]byte("C"), "foo", "text/plain") 128 | submitter.Submit([]byte("D"), "foo", "text/plain") 129 | 130 | <-restarted 131 | log.Info("server restarted") 132 | 133 | // reconnect consumer 134 | c.Shutdown() 135 | c2, err := NewConsumer(serverURL, "foo.bar.test", "direct", "foo", 136 | "foo", "foo-test1", func(d wabbit.Delivery) { 137 | buf.Write(d.Body()) 138 | log.Printf("received '%s', buf length %d", d.Body(), buf.Len()) 139 | if buf.Len() == 6 { 140 | done <- true 141 | } 142 | }) 143 | if err != nil { 144 | t.Fatal(err) 145 | } 146 | 147 | submitter.Submit([]byte("E"), "foo", "text/plain") 148 | submitter.Submit([]byte("F"), "foo", "text/plain") 149 | 150 | // ... and wait until they are received and processed 151 | <-done 152 | log.Debug("All done") 153 | 154 | // check if order and length is correct 155 | log.Info(buf.String()) 156 | if buf.String() != "ABCDEF" { 157 | t.Fail() 158 | } 159 | 160 | // tear down test setup 161 | c2.Shutdown() 162 | fakeServer.Stop() 163 | } 164 | -------------------------------------------------------------------------------- /processing/flow_profiler.go: -------------------------------------------------------------------------------- 1 | package processing 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2020, DCSO GmbH 5 | 6 | import ( 7 | "fmt" 8 | "sync" 9 | "time" 10 | 11 | "github.com/DCSO/fever/types" 12 | "github.com/DCSO/fever/util" 13 | log "github.com/sirupsen/logrus" 14 | ) 15 | 16 | // ProtoProfile contains flow statistics for a give app layer protocol. 17 | type ProtoProfile struct { 18 | PacketsToSrv uint64 19 | PacketsToClt uint64 20 | BytesToSrv uint64 21 | BytesToClt uint64 22 | } 23 | 24 | // FlowProfiler counts EVE event type statistics, such as number and size 25 | // of JSON data received from the input. 26 | type FlowProfiler struct { 27 | SensorID string 28 | Host string 29 | Profile map[string]ProtoProfile 30 | FlushPeriod time.Duration 31 | ProfileMutex sync.Mutex 32 | CloseChan chan bool 33 | ClosedChan chan bool 34 | Logger *log.Entry 35 | Submitter util.StatsSubmitter 36 | SubmitChannel chan []byte 37 | SubmitChannelFull bool 38 | } 39 | 40 | // MakeFlowProfiler creates a new FlowProfiler. 41 | func MakeFlowProfiler(flushPeriod time.Duration, submitter util.StatsSubmitter) (*FlowProfiler, error) { 42 | a := &FlowProfiler{ 43 | FlushPeriod: flushPeriod, 44 | Logger: log.WithFields(log.Fields{ 45 | "domain": "flowprofiler", 46 | }), 47 | Profile: make(map[string]ProtoProfile), 48 | CloseChan: make(chan bool), 49 | ClosedChan: make(chan bool), 50 | SubmitChannel: make(chan []byte, 60), 51 | Submitter: submitter, 52 | } 53 | a.Host = getFQDN() 54 | return a, nil 55 | } 56 | 57 | func (a *FlowProfiler) formatLineProtocol() []string { 58 | out := make([]string, 0) 59 | a.ProfileMutex.Lock() 60 | myProfile := a.Profile 61 | for proto, protoVals := range myProfile { 62 | out = append(out, fmt.Sprintf("%s,host=%s,proto=%s flowbytestoclient=%d,flowbytestoserver=%d,flowpktstoclient=%d,flowpktstoserver=%d %d", 63 | util.ToolName, a.Host, proto, 64 | protoVals.BytesToClt, protoVals.BytesToSrv, 65 | protoVals.PacketsToClt, protoVals.PacketsToSrv, 66 | uint64(time.Now().UnixNano()))) 67 | a.Profile[proto] = ProtoProfile{} 68 | } 69 | a.ProfileMutex.Unlock() 70 | return out 71 | } 72 | 73 | func (a *FlowProfiler) flush() { 74 | lineStrings := a.formatLineProtocol() 75 | for _, lineString := range lineStrings { 76 | select { 77 | case a.SubmitChannel <- []byte(lineString): 78 | if a.SubmitChannelFull { 79 | log.Warning("channel was free to submit again") 80 | a.SubmitChannelFull = false 81 | } 82 | default: 83 | if !a.SubmitChannelFull { 84 | log.Warning("channel is full, cannot submit message...") 85 | a.SubmitChannelFull = true 86 | } 87 | } 88 | } 89 | } 90 | 91 | // Consume processes an Entry, adding the data within to the internal 92 | // aggregated state 93 | func (a *FlowProfiler) Consume(e *types.Entry) error { 94 | aproto := e.AppProto 95 | if aproto == "" { 96 | aproto = "unknown" 97 | } 98 | a.ProfileMutex.Lock() 99 | profile := a.Profile[aproto] 100 | profile.BytesToClt += uint64(e.BytesToClient) 101 | profile.BytesToSrv += uint64(e.BytesToServer) 102 | profile.PacketsToClt += uint64(e.PktsToClient) 103 | profile.PacketsToSrv += uint64(e.PktsToServer) 104 | a.Profile[aproto] = profile 105 | a.ProfileMutex.Unlock() 106 | return nil 107 | } 108 | 109 | // Run starts the background aggregation service for this handler 110 | func (a *FlowProfiler) Run() { 111 | go func() { 112 | for message := range a.SubmitChannel { 113 | a.Submitter.SubmitWithHeaders(message, "", "text/plain", map[string]string{ 114 | "database": "telegraf", 115 | "retention_policy": "default", 116 | }) 117 | } 118 | }() 119 | go func() { 120 | i := 0 * time.Second 121 | for { 122 | select { 123 | case <-a.CloseChan: 124 | close(a.SubmitChannel) 125 | close(a.ClosedChan) 126 | return 127 | default: 128 | if i >= a.FlushPeriod { 129 | a.flush() 130 | i = 0 * time.Second 131 | } 132 | time.Sleep(1 * time.Second) 133 | i += 1 * time.Second 134 | } 135 | } 136 | }() 137 | } 138 | 139 | // Stop causes the aggregator to cease aggregating and submitting data 140 | func (a *FlowProfiler) Stop(stopChan chan bool) { 141 | close(a.CloseChan) 142 | <-a.ClosedChan 143 | close(stopChan) 144 | } 145 | 146 | // GetName returns the name of the handler 147 | func (a *FlowProfiler) GetName() string { 148 | return "Flow profiler" 149 | } 150 | 151 | // GetEventTypes returns a slice of event type strings that this handler 152 | // should be applied to 153 | func (a *FlowProfiler) GetEventTypes() []string { 154 | return []string{"flow"} 155 | } 156 | -------------------------------------------------------------------------------- /processing/flow_profiler_test.go: -------------------------------------------------------------------------------- 1 | package processing 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2020, DCSO GmbH 5 | 6 | import ( 7 | "encoding/json" 8 | "fmt" 9 | "math/rand" 10 | "reflect" 11 | "regexp" 12 | "strconv" 13 | "strings" 14 | "sync" 15 | "testing" 16 | "time" 17 | 18 | "github.com/DCSO/fever/types" 19 | ) 20 | 21 | const ( 22 | numOfProfiledFlowItems = 10000 23 | ) 24 | 25 | func makeFlowProfilerEvent() types.Entry { 26 | e := types.Entry{ 27 | SrcIP: fmt.Sprintf("10.0.0.%d", rand.Intn(250)), 28 | SrcPort: []int64{1, 2, 3, 4, 5}[rand.Intn(5)], 29 | DestIP: fmt.Sprintf("10.0.0.%d", rand.Intn(250)), 30 | DestPort: []int64{11, 12, 13, 14, 15}[rand.Intn(5)], 31 | Timestamp: time.Now().Format(types.SuricataTimestampFormat), 32 | EventType: "flow", 33 | Proto: "TCP", 34 | AppProto: []string{"foo", "bar", "baz"}[rand.Intn(3)], 35 | BytesToClient: int64(rand.Intn(10000)), 36 | BytesToServer: int64(rand.Intn(10000)), 37 | PktsToClient: int64(rand.Intn(100)), 38 | PktsToServer: int64(rand.Intn(100)), 39 | } 40 | jsonBytes, _ := json.Marshal(e) 41 | e.JSONLine = string(jsonBytes) 42 | return e 43 | } 44 | 45 | type flowProfilerTestSubmitter struct { 46 | sync.Mutex 47 | Values [][]byte 48 | } 49 | 50 | func (fpts *flowProfilerTestSubmitter) SubmitWithHeaders(rawData []byte, key string, contentType string, myHeaders map[string]string) { 51 | fpts.Lock() 52 | defer fpts.Unlock() 53 | fpts.Values = append(fpts.Values, rawData) 54 | } 55 | 56 | func (fpts *flowProfilerTestSubmitter) Submit(rawData []byte, key string, contentType string) { 57 | fpts.Lock() 58 | defer fpts.Unlock() 59 | fpts.Values = append(fpts.Values, rawData) 60 | } 61 | 62 | func (fpts *flowProfilerTestSubmitter) UseCompression() { 63 | // pass 64 | } 65 | 66 | func (fpts *flowProfilerTestSubmitter) Finish() { 67 | // pass 68 | } 69 | 70 | // TestFlowProfiler checks whether flow profiles are generated correctly. 71 | // To do this, it consumes a set of example events with randomized event types 72 | // and sizes, generates a reference set of statistics and then compares it to 73 | // the values submitted to a test submitter which simply stores these values. 74 | func TestFlowProfiler(t *testing.T) { 75 | rand.Seed(time.Now().UTC().UnixNano()) 76 | myMap := make(map[string]ProtoProfile) 77 | seenProfile := make(map[string]ProtoProfile) 78 | 79 | feedWaitChan := make(chan bool) 80 | 81 | s := &flowProfilerTestSubmitter{ 82 | Values: make([][]byte, 0), 83 | } 84 | 85 | f, err := MakeFlowProfiler(1*time.Second, s) 86 | if err != nil { 87 | t.Fatal(err) 88 | } 89 | 90 | f.Run() 91 | 92 | for i := 0; i < numOfProfiledFlowItems; i++ { 93 | ev := makeFlowProfilerEvent() 94 | myProfile := myMap[ev.AppProto] 95 | myProfile.BytesToClt += uint64(ev.BytesToClient) 96 | myProfile.BytesToSrv += uint64(ev.BytesToServer) 97 | myProfile.PacketsToClt += uint64(ev.PktsToClient) 98 | myProfile.PacketsToSrv += uint64(ev.PktsToServer) 99 | myMap[ev.AppProto] = myProfile 100 | f.Consume(&ev) 101 | } 102 | 103 | go func() { 104 | r := regexp.MustCompile(`proto=(?P[^ ]+) flowbytestoclient=(?P[0-9]+),flowbytestoserver=(?P[0-9]+),flowpktstoclient=(?P[0-9]+),flowpktstoserver=(?P[0-9]+)`) 105 | for { 106 | s.Lock() 107 | found := 0 108 | for _, v := range s.Values { 109 | for _, proto := range []string{"foo", "bar", "baz"} { 110 | if strings.Contains(string(v), fmt.Sprintf("proto=%s flowbytestoclient=0,flowbytestoserver=0,flowpktstoclient=0,flowpktstoserver=0", proto)) { 111 | found++ 112 | } 113 | } 114 | } 115 | s.Unlock() 116 | if found == 3 { 117 | break 118 | } 119 | time.Sleep(100 * time.Millisecond) 120 | } 121 | s.Lock() 122 | for _, v := range s.Values { 123 | sm := r.FindStringSubmatch(string(v)) 124 | if sm == nil { 125 | continue 126 | } 127 | p := seenProfile[sm[1]] 128 | intV, err := strconv.ParseUint(sm[2], 10, 64) 129 | if err == nil { 130 | p.BytesToClt += intV 131 | } 132 | intV, err = strconv.ParseUint(sm[3], 10, 64) 133 | if err == nil { 134 | p.BytesToSrv += intV 135 | } 136 | intV, err = strconv.ParseUint(sm[4], 10, 64) 137 | if err == nil { 138 | p.PacketsToClt += intV 139 | } 140 | intV, err = strconv.ParseUint(sm[5], 10, 64) 141 | if err == nil { 142 | p.PacketsToSrv += intV 143 | } 144 | seenProfile[sm[1]] = p 145 | } 146 | s.Unlock() 147 | close(feedWaitChan) 148 | }() 149 | 150 | <-feedWaitChan 151 | 152 | consumeWaitChan := make(chan bool) 153 | f.Stop(consumeWaitChan) 154 | <-consumeWaitChan 155 | 156 | if !reflect.DeepEqual(myMap, seenProfile) { 157 | t.Fatal("different result for test") 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /processing/heartbeat_injector.go: -------------------------------------------------------------------------------- 1 | package processing 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2020, 2021, DCSO GmbH 5 | 6 | import ( 7 | "encoding/json" 8 | "fmt" 9 | "math/rand" 10 | "regexp" 11 | "time" 12 | 13 | "github.com/DCSO/fever/types" 14 | "github.com/DCSO/fever/util" 15 | log "github.com/sirupsen/logrus" 16 | ) 17 | 18 | var ( 19 | // match 24 hour local time string, separated by colon 20 | injectTimeRegex = regexp.MustCompile(`^(([01][0-9])|(2[0-3])):[0-5][0-9]$`) 21 | // We pick this value as a tick interval to check the time against the list 22 | // of times to send heartbeats at. We check once per minute as that is the 23 | // resolution of the specified times as well. 24 | injectTimeCheckTick = 1 * time.Minute 25 | ) 26 | 27 | // HeartbeatInjector regularly adds a date-based pseudo-event to the forwarded 28 | // event stream. 29 | type HeartbeatInjector struct { 30 | SensorID string 31 | Times []string 32 | AlertTimes []string 33 | CloseChan chan bool 34 | Logger *log.Entry 35 | ForwardHandler Handler 36 | } 37 | 38 | // MakeHeartbeatInjector creates a new HeartbeatInjector. 39 | func MakeHeartbeatInjector(forwardHandler Handler, injectTimes []string, alertTimes []string) (*HeartbeatInjector, error) { 40 | sensorID, err := util.GetSensorID() 41 | if err != nil { 42 | return nil, err 43 | } 44 | for _, v := range injectTimes { 45 | if !injectTimeRegex.Match([]byte(v)) { 46 | return nil, fmt.Errorf("invalid time specification in heartbeat injector config: '%s'", v) 47 | } 48 | } 49 | for _, v := range alertTimes { 50 | if !injectTimeRegex.Match([]byte(v)) { 51 | return nil, fmt.Errorf("invalid alert time specification in heartbeat injector config: '%s'", v) 52 | } 53 | } 54 | a := &HeartbeatInjector{ 55 | ForwardHandler: forwardHandler, 56 | Logger: log.WithFields(log.Fields{ 57 | "domain": "heartbeat_injector", 58 | }), 59 | Times: injectTimes, 60 | AlertTimes: alertTimes, 61 | CloseChan: make(chan bool), 62 | SensorID: sensorID, 63 | } 64 | return a, nil 65 | } 66 | 67 | func makeHeartbeatEvent(eventType string) types.Entry { 68 | now := time.Now() 69 | entry := types.Entry{ 70 | SrcIP: "192.0.2.1", 71 | SrcPort: int64(rand.Intn(60000) + 1025), 72 | DestIP: "192.0.2.2", 73 | DestPort: 80, 74 | Timestamp: time.Now().Format(types.SuricataTimestampFormat), 75 | EventType: eventType, 76 | Proto: "TCP", 77 | HTTPHost: fmt.Sprintf("test-%d-%02d-%02d.vast", 78 | now.Year(), now.Month(), now.Day()), 79 | HTTPUrl: "/just-visiting", 80 | HTTPMethod: "GET", 81 | } 82 | eve := types.EveEvent{ 83 | Timestamp: &types.SuriTime{ 84 | Time: time.Now().UTC(), 85 | }, 86 | EventType: entry.EventType, 87 | SrcIP: entry.SrcIP, 88 | SrcPort: int(entry.SrcPort), 89 | DestIP: entry.DestIP, 90 | DestPort: int(entry.DestPort), 91 | Proto: entry.Proto, 92 | HTTP: &types.HTTPEvent{ 93 | Hostname: entry.HTTPHost, 94 | URL: entry.HTTPUrl, 95 | HTTPMethod: entry.HTTPMethod, 96 | HTTPUserAgent: "FEVER", 97 | Status: 200, 98 | Protocol: "HTTP/1.1", 99 | Length: 42, 100 | HTTPContentType: "text/html", 101 | }, 102 | } 103 | if eventType == "alert" { 104 | eve.Alert = &types.AlertEvent{ 105 | Action: "allowed", 106 | Category: "Not Suspicious Traffic", 107 | Signature: "DCSO FEVER TEST alert", 108 | } 109 | entry.HTTPHost = "testalert.fever" 110 | eve.HTTP.Hostname = entry.HTTPHost 111 | } 112 | json, err := json.Marshal(eve) 113 | if err != nil { 114 | log.Warn(err) 115 | } else { 116 | entry.JSONLine = string(json) 117 | } 118 | return entry 119 | } 120 | 121 | // Run starts the background service. 122 | func (a *HeartbeatInjector) Run() { 123 | go func() { 124 | for { 125 | select { 126 | case <-a.CloseChan: 127 | return 128 | default: 129 | curTime := time.Now().Format("15:04") 130 | for _, timeVal := range a.Times { 131 | if curTime == timeVal { 132 | ev := makeHeartbeatEvent("http") 133 | a.Logger.Infof("creating heartbeat HTTP event for %s: %s", 134 | curTime, string(ev.JSONLine)) 135 | a.ForwardHandler.Consume(&ev) 136 | } 137 | } 138 | for _, timeVal := range a.AlertTimes { 139 | if curTime == timeVal { 140 | ev := makeHeartbeatEvent("alert") 141 | a.Logger.Infof("creating heartbeat alert event for %s: %s", 142 | curTime, string(ev.JSONLine)) 143 | a.ForwardHandler.Consume(&ev) 144 | } 145 | } 146 | time.Sleep(injectTimeCheckTick) 147 | } 148 | } 149 | }() 150 | } 151 | 152 | // Stop causes the service to cease the background work. 153 | func (a *HeartbeatInjector) Stop() { 154 | close(a.CloseChan) 155 | } 156 | -------------------------------------------------------------------------------- /processing/event_profiler.go: -------------------------------------------------------------------------------- 1 | package processing 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2018, DCSO GmbH 5 | 6 | import ( 7 | "bytes" 8 | "fmt" 9 | "os" 10 | "os/exec" 11 | "sync" 12 | "time" 13 | 14 | "github.com/DCSO/fever/types" 15 | "github.com/DCSO/fever/util" 16 | log "github.com/sirupsen/logrus" 17 | ) 18 | 19 | // EventProfile contains counts per event_type such as occurrences and 20 | // JSON size. 21 | type EventProfile struct { 22 | CountMap map[string]uint64 23 | SizeMap map[string]uint64 24 | } 25 | 26 | // EventProfiler counts EVE event type statistics, such as number and size 27 | // of JSON data received from the input. 28 | type EventProfiler struct { 29 | SensorID string 30 | Host string 31 | Profile EventProfile 32 | FlushPeriod time.Duration 33 | ProfileMutex sync.Mutex 34 | CloseChan chan bool 35 | ClosedChan chan bool 36 | Logger *log.Entry 37 | Submitter util.StatsSubmitter 38 | SubmitChannel chan []byte 39 | } 40 | 41 | func getFQDN() (fqdn string) { 42 | cmd := exec.Command("/bin/hostname", "-f") 43 | var out bytes.Buffer 44 | cmd.Stdout = &out 45 | err := cmd.Run() 46 | if err != nil { 47 | log.Warn(err) 48 | host, err := os.Hostname() 49 | if err != nil { 50 | return "unknown" 51 | } 52 | return host 53 | } 54 | fqdn = out.String() 55 | if len(fqdn) > 1 { 56 | fqdn = fqdn[:len(fqdn)-1] 57 | } else { 58 | fqdn = "unknown" 59 | } 60 | return fqdn 61 | } 62 | 63 | // MakeEventProfiler creates a new EventProfiler. 64 | func MakeEventProfiler(flushPeriod time.Duration, submitter util.StatsSubmitter) (*EventProfiler, error) { 65 | sensorID, err := util.GetSensorID() 66 | if err != nil { 67 | return nil, err 68 | } 69 | a := &EventProfiler{ 70 | FlushPeriod: flushPeriod, 71 | Logger: log.WithFields(log.Fields{ 72 | "domain": "eventprofiler", 73 | }), 74 | Profile: EventProfile{ 75 | CountMap: make(map[string]uint64), 76 | SizeMap: make(map[string]uint64), 77 | }, 78 | CloseChan: make(chan bool), 79 | ClosedChan: make(chan bool), 80 | SubmitChannel: make(chan []byte, 60), 81 | Submitter: submitter, 82 | SensorID: sensorID, 83 | } 84 | a.SensorID, _ = os.Hostname() 85 | a.Host = getFQDN() 86 | return a, nil 87 | } 88 | 89 | func (a *EventProfiler) formatLineProtocol() string { 90 | out := "" 91 | a.ProfileMutex.Lock() 92 | myProfile := a.Profile 93 | first := true 94 | for k, v := range myProfile.SizeMap { 95 | if !first { 96 | out += "," 97 | } else { 98 | first = false 99 | } 100 | out += fmt.Sprintf("size.%s=%d", k, v) 101 | } 102 | for k, v := range myProfile.CountMap { 103 | out += fmt.Sprintf(",count.%s=%d", k, v) 104 | } 105 | a.ProfileMutex.Unlock() 106 | if out == "" { 107 | return "" 108 | } 109 | return fmt.Sprintf("%s,host=%s %s %d", util.ToolName, a.Host, out, uint64(time.Now().UnixNano())) 110 | } 111 | 112 | func (a *EventProfiler) flush() { 113 | lineString := a.formatLineProtocol() 114 | if lineString == "" { 115 | return 116 | } 117 | select { 118 | case a.SubmitChannel <- []byte(lineString): 119 | break 120 | default: 121 | log.Warning("channel is full, cannot submit message...") 122 | } 123 | } 124 | 125 | // Consume processes an Entry, adding the data within to the internal 126 | // aggregated state 127 | func (a *EventProfiler) Consume(e *types.Entry) error { 128 | etype := e.EventType 129 | a.ProfileMutex.Lock() 130 | a.Profile.CountMap[etype]++ 131 | a.Profile.SizeMap[etype] += uint64(len(e.JSONLine)) 132 | a.ProfileMutex.Unlock() 133 | return nil 134 | } 135 | 136 | // Run starts the background aggregation service for this handler 137 | func (a *EventProfiler) Run() { 138 | go func() { 139 | for message := range a.SubmitChannel { 140 | a.Submitter.SubmitWithHeaders(message, "", "text/plain", map[string]string{ 141 | "database": "telegraf", 142 | "retention_policy": "default", 143 | }) 144 | } 145 | }() 146 | go func() { 147 | i := 0 * time.Second 148 | for { 149 | select { 150 | case <-a.CloseChan: 151 | close(a.SubmitChannel) 152 | close(a.ClosedChan) 153 | return 154 | default: 155 | if i >= a.FlushPeriod { 156 | a.flush() 157 | i = 0 * time.Second 158 | } 159 | time.Sleep(1 * time.Second) 160 | i += 1 * time.Second 161 | } 162 | } 163 | }() 164 | } 165 | 166 | // Stop causes the aggregator to cease aggregating and submitting data 167 | func (a *EventProfiler) Stop(stopChan chan bool) { 168 | close(a.CloseChan) 169 | <-a.ClosedChan 170 | close(stopChan) 171 | } 172 | 173 | // GetName returns the name of the handler 174 | func (a *EventProfiler) GetName() string { 175 | return "Event profiler" 176 | } 177 | 178 | // GetEventTypes returns a slice of event type strings that this handler 179 | // should be applied to 180 | func (a *EventProfiler) GetEventTypes() []string { 181 | return []string{"*"} 182 | } 183 | -------------------------------------------------------------------------------- /db/slurper_postgres_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "bytes" 5 | "io" 6 | "testing" 7 | "time" 8 | 9 | "context" 10 | "regexp" 11 | 12 | "github.com/DCSO/fever/types" 13 | "github.com/jackc/pgx/v4/pgxpool" 14 | "github.com/sirupsen/logrus" 15 | "github.com/stretchr/testify/assert" 16 | ) 17 | 18 | func TestPostgresSlurperCopyBufferContents(t *testing.T) { 19 | var capturedSQL string 20 | var capturedBody []byte 21 | done := make(chan struct{}, 1) 22 | 23 | s := &PostgresSlurper{ 24 | DB: (*pgxpool.Pool)(nil), 25 | DBUser: "testuser", 26 | LastRotatedTime: time.Now(), 27 | IndexChan: make(chan string, 1), 28 | CurrentTableName: "event-2025-01-01-0000", 29 | RotationInterval: time.Hour, 30 | MaxTableSize: 1 << 30, 31 | ChunkSize: 1, 32 | Logger: logrus.WithField("test", "pgx-copy"), 33 | } 34 | 35 | // CopyFn to capture SQL and the entire reader body 36 | s.CopyFn = func(ctx context.Context, pool *pgxpool.Pool, sql string, r io.Reader) (int64, error) { 37 | capturedSQL = sql 38 | b, _ := io.ReadAll(r) 39 | capturedBody = b 40 | done <- struct{}{} 41 | return int64(bytes.Count(b, []byte("\n"))), nil 42 | } 43 | 44 | events := []types.Entry{ 45 | {Timestamp: "2023-01-01T12:00:00Z", JSONLine: `{"k":"v1"}`}, 46 | {Timestamp: "2023-01-01T12:00:01Z", JSONLine: `{"k":"v2"}`}, 47 | } 48 | expectedBody := "2023-01-01T12:00:00Z\t{\"k\":\"v1\"}\n" + 49 | "2023-01-01T12:00:01Z\t{\"k\":\"v2\"}\n" 50 | expectedSQL := "COPY \"" + s.CurrentTableName + "\" (ts, payload) FROM STDIN WITH CSV DELIMITER E'\\t' QUOTE E'\\b'" 51 | 52 | eventCh := make(chan types.Entry, len(events)+1) 53 | go s.slurpPostgres(context.TODO(), eventCh) 54 | 55 | // send exactly 2 events to trigger a copy 56 | for _, e := range events { 57 | eventCh <- e 58 | } 59 | 60 | select { 61 | case <-done: 62 | // proceed 63 | case <-time.After(2 * time.Second): 64 | t.Fatalf("timeout waiting for COPY to be invoked") 65 | } 66 | 67 | assert.Equal(t, expectedSQL, capturedSQL, "COPY SQL should match expected") 68 | assert.Equal(t, expectedBody, string(capturedBody), "COPY body should equal concatenation of lines with tab and newline") 69 | } 70 | 71 | func TestPostgresSlurperRotateCreateAndCopy(t *testing.T) { 72 | var capturedExecSQL string 73 | var capturedCopySQL string 74 | var capturedBody []byte 75 | done := make(chan struct{}, 1) 76 | 77 | s := &PostgresSlurper{ 78 | DB: (*pgxpool.Pool)(nil), 79 | DBUser: "testuser", 80 | LastRotatedTime: time.Now().Add(-2 * time.Second), 81 | IndexChan: make(chan string, 1), 82 | CurrentTableName: "event-old", 83 | RotationInterval: time.Millisecond, 84 | MaxTableSize: 1 << 30, 85 | ChunkSize: 1, 86 | Logger: logrus.WithField("test", "pgx-rotate"), 87 | } 88 | 89 | // ExecFn to capture CREATE/GRANT SQL 90 | s.ExecFn = func(ctx context.Context, sql string) error { 91 | capturedExecSQL = sql 92 | return nil 93 | } 94 | // CopyFn to capture COPY SQL and body 95 | s.CopyFn = func(ctx context.Context, pool *pgxpool.Pool, sql string, r io.Reader) (int64, error) { 96 | capturedCopySQL = sql 97 | b, _ := io.ReadAll(r) 98 | capturedBody = b 99 | done <- struct{}{} 100 | return int64(bytes.Count(b, []byte("\n"))), nil 101 | } 102 | 103 | events := []types.Entry{ 104 | {Timestamp: "2023-01-01T12:00:00Z", JSONLine: `{"k":"v1"}`}, 105 | {Timestamp: "2023-01-01T12:00:01Z", JSONLine: `{"k":"v2"}`}, 106 | } 107 | expectedBody := "2023-01-01T12:00:00Z\t{\"k\":\"v1\"}\n" + 108 | "2023-01-01T12:00:01Z\t{\"k\":\"v2\"}\n" 109 | 110 | eventCh := make(chan types.Entry, len(events)+1) 111 | go s.slurpPostgres(context.Background(), eventCh) 112 | for _, e := range events { 113 | eventCh <- e 114 | } 115 | 116 | select { 117 | case <-done: 118 | // proceed 119 | case <-time.After(2 * time.Second): 120 | t.Fatalf("timeout waiting for COPY to be invoked") 121 | } 122 | 123 | // Assert CREATE/GRANT executed for event-YYYY-mm-dd-HHMM 124 | createRegex := regexp.MustCompile(`CREATE UNLOGGED TABLE IF NOT EXISTS "event-[0-9-]+"\s*\(ts timestamp without time zone default now\(\),\s*payload jsonb\);\s*GRANT ALL PRIVILEGES ON TABLE "event-[0-9-]+" to testuser;`) 125 | assert.Regexp(t, createRegex, capturedExecSQL, "CREATE/GRANT SQL should match expected pattern") 126 | 127 | // Assert COPY SQL targets an event-YYYY.. table and uses correct options 128 | copyRegex := regexp.MustCompile(`^COPY "event-[0-9-]+" \(ts, payload\) FROM STDIN WITH CSV DELIMITER E'\\t' QUOTE E'\\b'$`) 129 | assert.Regexp(t, copyRegex, capturedCopySQL, "COPY SQL should target rotated table with correct format") 130 | 131 | // Assert body 132 | assert.Equal(t, expectedBody, string(capturedBody), "COPY body should equal concatenation of lines") 133 | 134 | // Assert that previous table was enqueued for indexing upon rotation 135 | select { 136 | case prev := <-s.IndexChan: 137 | assert.Equal(t, "event-old", prev, "previous table should be enqueued after rotation") 138 | default: 139 | t.Fatalf("expected previous table to be enqueued for indexing") 140 | } 141 | } 142 | -------------------------------------------------------------------------------- /input/input_socket.go: -------------------------------------------------------------------------------- 1 | package input 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2017, DCSO GmbH 5 | 6 | import ( 7 | "bufio" 8 | "net" 9 | "time" 10 | 11 | "github.com/DCSO/fever/types" 12 | "github.com/DCSO/fever/util" 13 | 14 | log "github.com/sirupsen/logrus" 15 | ) 16 | 17 | // SocketInputPerfStats contains performance stats written to InfluxDB 18 | // for monitoring. 19 | type SocketInputPerfStats struct { 20 | SocketQueueLength uint64 `influx:"input_queue_length"` 21 | SocketQueueDropped uint64 `influx:"input_queue_dropped"` 22 | } 23 | 24 | // SocketInput is an Input reading JSON EVE input from a Unix socket. 25 | type SocketInput struct { 26 | EventChan chan types.Entry 27 | Verbose bool 28 | Running bool 29 | InputListener net.Listener 30 | StopChan chan bool 31 | StoppedChan chan bool 32 | DropIfChannelFull bool 33 | PerfStats SocketInputPerfStats 34 | StatsEncoder *util.PerformanceStatsEncoder 35 | } 36 | 37 | // GetName returns a printable name for the input 38 | func (si *SocketInput) GetName() string { 39 | return "Socket input" 40 | } 41 | 42 | func (si *SocketInput) handleServerConnection() { 43 | for { 44 | select { 45 | case <-si.StopChan: 46 | close(si.StoppedChan) 47 | return 48 | default: 49 | var start time.Time 50 | var totalLen int 51 | 52 | si.InputListener.(*net.UnixListener).SetDeadline(time.Now().Add(1e9)) 53 | c, err := si.InputListener.Accept() 54 | if nil != err { 55 | if opErr, ok := err.(*net.OpError); ok && opErr.Timeout() { 56 | continue 57 | } 58 | log.Info(err) 59 | } 60 | 61 | if si.Verbose { 62 | start = time.Now() 63 | } 64 | scanner := bufio.NewScanner(c) 65 | buf := make([]byte, 0, 32*1024*1024) 66 | scanner.Buffer(buf, 32*1024*1024) 67 | for { 68 | for scanner.Scan() { 69 | select { 70 | case <-si.StopChan: 71 | close(si.StoppedChan) 72 | return 73 | default: 74 | json := scanner.Bytes() 75 | totalLen += len(json) 76 | e, err := util.ParseJSON(json) 77 | if err != nil { 78 | log.Warn(err, string(json[:])) 79 | continue 80 | } 81 | if si.DropIfChannelFull { 82 | select { 83 | case si.EventChan <- e: 84 | // pass 85 | default: 86 | si.PerfStats.SocketQueueDropped++ 87 | } 88 | } else { 89 | si.EventChan <- e 90 | } 91 | } 92 | } 93 | errRead := scanner.Err() 94 | if errRead == nil { 95 | break 96 | } else if errRead == bufio.ErrTooLong { 97 | log.Warn(errRead) 98 | scanner = bufio.NewScanner(c) 99 | scanner.Buffer(buf, 2*cap(buf)) 100 | } else { 101 | log.Warn(errRead) 102 | } 103 | } 104 | 105 | if si.Verbose { 106 | elapsed := time.Since(start) 107 | log.WithFields(log.Fields{ 108 | "size": totalLen, 109 | "elapsedTime": elapsed, 110 | }).Info("connection handled") 111 | } 112 | } 113 | } 114 | } 115 | 116 | func (si *SocketInput) sendPerfStats() { 117 | start := time.Now() 118 | for { 119 | select { 120 | case <-si.StopChan: 121 | return 122 | default: 123 | // We briefly wake up once a second to check whether we are asked 124 | // to stop or whether it's time to submit stats. This is neglegible 125 | // in overhead but massively improves shutdown time, as a simple 126 | // time.Sleep() is non-interruptible by the stop channel. 127 | if time.Since(start) > perfStatsSendInterval { 128 | if si.StatsEncoder != nil { 129 | si.PerfStats.SocketQueueLength = uint64(len(si.EventChan)) 130 | si.StatsEncoder.Submit(si.PerfStats) 131 | } 132 | start = time.Now() 133 | } 134 | time.Sleep(1 * time.Second) 135 | } 136 | } 137 | } 138 | 139 | // MakeSocketInput returns a new SocketInput reading from the Unix socket 140 | // inputSocket and writing parsed events to outChan. If no such socket could be 141 | // created for listening, the error returned is set accordingly. 142 | func MakeSocketInput(inputSocket string, 143 | outChan chan types.Entry, bufDrop bool) (*SocketInput, error) { 144 | var err error 145 | si := &SocketInput{ 146 | EventChan: outChan, 147 | Verbose: false, 148 | StopChan: make(chan bool), 149 | DropIfChannelFull: bufDrop, 150 | } 151 | si.InputListener, err = net.Listen("unix", inputSocket) 152 | if err != nil { 153 | return nil, err 154 | } 155 | return si, err 156 | } 157 | 158 | // SubmitStats registers a PerformanceStatsEncoder for runtime stats submission. 159 | func (si *SocketInput) SubmitStats(sc *util.PerformanceStatsEncoder) { 160 | si.StatsEncoder = sc 161 | } 162 | 163 | // Run starts the SocketInput 164 | func (si *SocketInput) Run() { 165 | if !si.Running { 166 | si.Running = true 167 | si.StopChan = make(chan bool) 168 | go si.handleServerConnection() 169 | go si.sendPerfStats() 170 | } 171 | } 172 | 173 | // Stop causes the SocketInput to stop reading from the socket and close all 174 | // associated channels, including the passed notification channel. 175 | func (si *SocketInput) Stop(stoppedChan chan bool) { 176 | if si.Running { 177 | si.StoppedChan = stoppedChan 178 | close(si.StopChan) 179 | si.Running = false 180 | } 181 | } 182 | 183 | // SetVerbose sets the input's verbosity level 184 | func (si *SocketInput) SetVerbose(verbose bool) { 185 | si.Verbose = verbose 186 | } 187 | -------------------------------------------------------------------------------- /util/consumer.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | // Parts of this code have been taken from 4 | // https://github.com/streadway/amqp/blob/master/_examples/simple-consumer/consumer.go 5 | // released under the license of the main streadway/amqp project: 6 | // 7 | // Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. 8 | // All rights reserved. 9 | // 10 | // Redistribution and use in source and binary forms, with or without 11 | // modification, are permitted provided that the following conditions are met: 12 | // 13 | // Redistributions of source code must retain the above copyright notice, this 14 | // list of conditions and the following disclaimer. 15 | // 16 | // Redistributions in binary form must reproduce the above copyright notice, this 17 | // list of conditions and the following disclaimer in the documentation and/or 18 | // other materials provided with the distribution. 19 | // 20 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 21 | // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 22 | // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | import ( 32 | "fmt" 33 | 34 | "github.com/NeowayLabs/wabbit" 35 | "github.com/NeowayLabs/wabbit/amqptest" 36 | log "github.com/sirupsen/logrus" 37 | ) 38 | 39 | // Consumer reads and processes messages from a fake RabbitMQ server. 40 | type Consumer struct { 41 | conn wabbit.Conn 42 | channel wabbit.Channel 43 | tag string 44 | done chan error 45 | Callback func(wabbit.Delivery) 46 | } 47 | 48 | // NewConsumer creates a new consumer with the given properties. The callback 49 | // function is called for each delivery accepted from a consumer channel. 50 | func NewConsumer(amqpURI, exchange, exchangeType, queueName, key, ctag string, callback func(wabbit.Delivery)) (*Consumer, error) { 51 | var err error 52 | c := &Consumer{ 53 | conn: nil, 54 | channel: nil, 55 | tag: ctag, 56 | done: make(chan error), 57 | Callback: callback, 58 | } 59 | 60 | log.Debugf("dialing %q", amqpURI) 61 | c.conn, err = amqptest.Dial(amqpURI) 62 | if err != nil { 63 | return nil, fmt.Errorf("dial: %s", err) 64 | } 65 | 66 | log.Debugf("got Connection, getting Channel") 67 | c.channel, err = c.conn.Channel() 68 | if err != nil { 69 | return nil, fmt.Errorf("channel: %s", err) 70 | } 71 | 72 | log.Debugf("got Channel, declaring Exchange (%q)", exchange) 73 | if err = c.channel.ExchangeDeclare( 74 | exchange, // name of the exchange 75 | exchangeType, // type 76 | wabbit.Option{ 77 | "durable": true, 78 | "delete": false, 79 | "internal": false, 80 | "noWait": false, 81 | }, 82 | ); err != nil { 83 | return nil, fmt.Errorf("exchange declare: %s", err) 84 | } 85 | 86 | queue, err := c.channel.QueueDeclare( 87 | queueName, // name of the queue 88 | wabbit.Option{ 89 | "durable": true, 90 | "delete": false, 91 | "exclusive": false, 92 | "noWait": false, 93 | }, 94 | ) 95 | if err != nil { 96 | return nil, fmt.Errorf("queue declare: %s", err) 97 | } 98 | 99 | log.Debugf("declared Queue (%q %d messages, %d consumers), binding to Exchange (key %q)", 100 | queue.Name(), queue.Messages(), queue.Consumers(), key) 101 | 102 | if err = c.channel.QueueBind( 103 | queue.Name(), // name of the queue 104 | key, // bindingKey 105 | exchange, // sourceExchange 106 | wabbit.Option{ 107 | "noWait": false, 108 | }, 109 | ); err != nil { 110 | return nil, fmt.Errorf("queue bind: %s", err) 111 | } 112 | 113 | log.Debugf("Queue bound to Exchange, starting Consume (consumer tag %q)", c.tag) 114 | deliveries, err := c.channel.Consume( 115 | queue.Name(), // name 116 | c.tag, // consumerTag, 117 | wabbit.Option{ 118 | "exclusive": false, 119 | "noLocal": false, 120 | "noWait": false, 121 | }, 122 | ) 123 | if err != nil { 124 | return nil, fmt.Errorf("queue consume: %s", err) 125 | } 126 | go handle(deliveries, c.done, c.Callback) 127 | 128 | return c, nil 129 | } 130 | 131 | // Shutdown shuts down a consumer, closing down its channels and connections. 132 | func (c *Consumer) Shutdown() error { 133 | // will close() the deliveries channel 134 | if err := c.channel.Close(); err != nil { 135 | return fmt.Errorf("channel close failed: %s", err) 136 | } 137 | if err := c.conn.Close(); err != nil { 138 | return fmt.Errorf("AMQP connection close error: %s", err) 139 | } 140 | defer log.Debugf("AMQP shutdown OK") 141 | // wait for handle() to exit 142 | return <-c.done 143 | } 144 | 145 | const maxLogLen = 100 146 | 147 | func handle(deliveries <-chan wabbit.Delivery, done chan error, callback func(wabbit.Delivery)) { 148 | for d := range deliveries { 149 | v := d.Body() 150 | if len(v) > maxLogLen { 151 | v = v[:maxLogLen] 152 | } 153 | log.Debugf( 154 | "got %dB delivery: [%v] %q", 155 | len(d.Body()), 156 | d.DeliveryTag(), 157 | v, 158 | ) 159 | callback(d) 160 | d.Ack(false) 161 | } 162 | done <- nil 163 | } 164 | -------------------------------------------------------------------------------- /processing/handler_dispatcher_test.go: -------------------------------------------------------------------------------- 1 | package processing 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2017, DCSO GmbH 5 | 6 | import ( 7 | "fmt" 8 | "math/rand" 9 | "regexp" 10 | "testing" 11 | "time" 12 | 13 | "github.com/DCSO/fever/types" 14 | "github.com/DCSO/fever/util" 15 | 16 | "github.com/NeowayLabs/wabbit" 17 | "github.com/NeowayLabs/wabbit/amqptest" 18 | "github.com/NeowayLabs/wabbit/amqptest/server" 19 | ) 20 | 21 | type Test1Handler struct { 22 | Vals []string 23 | } 24 | 25 | func (h *Test1Handler) GetName() string { 26 | return "Test handler 1" 27 | } 28 | 29 | func (h *Test1Handler) GetEventTypes() []string { 30 | return []string{"dns"} 31 | } 32 | 33 | func (h *Test1Handler) Consume(e *types.Entry) error { 34 | h.Vals = append(h.Vals, e.JSONLine) 35 | return nil 36 | } 37 | 38 | type Test2Handler struct { 39 | Vals []string 40 | } 41 | 42 | func (h *Test2Handler) GetName() string { 43 | return "Test handler 2" 44 | } 45 | 46 | func (h *Test2Handler) GetEventTypes() []string { 47 | return []string{"http"} 48 | } 49 | 50 | func (h *Test2Handler) Consume(e *types.Entry) error { 51 | h.Vals = append(h.Vals, e.JSONLine) 52 | return nil 53 | } 54 | 55 | func TestHandlerDispatcherExampleHandler(t *testing.T) { 56 | outChan := make(chan types.Entry) 57 | closeChan := make(chan bool) 58 | defaultSelection := make([]string, 0) 59 | 60 | go func(closeChan chan bool, inChan chan types.Entry) { 61 | for v := range inChan { 62 | defaultSelection = append(defaultSelection, v.JSONLine) 63 | } 64 | close(closeChan) 65 | }(closeChan, outChan) 66 | 67 | ad := MakeHandlerDispatcher(outChan) 68 | t1 := &Test1Handler{ 69 | Vals: make([]string, 0), 70 | } 71 | ad.RegisterHandler(t1) 72 | 73 | t2 := &Test2Handler{ 74 | Vals: make([]string, 0), 75 | } 76 | ad.RegisterHandler(t2) 77 | 78 | rand.Seed(time.Now().UTC().UnixNano()) 79 | // make test entries 80 | typestrs := []string{"http", "dns", "flow", "foo"} 81 | var createdEntries [10000]types.Entry 82 | entries := make(map[string]([]string)) 83 | for i := 0; i < 10000; i++ { 84 | myIdentifier := fmt.Sprintf("val%d", i) 85 | myType := typestrs[rand.Intn(len(typestrs))] 86 | createdEntries[i] = types.Entry{ 87 | EventType: myType, 88 | JSONLine: myIdentifier, 89 | } 90 | if _, ok := entries[myType]; !ok { 91 | entries[myType] = make([]string, 0) 92 | } 93 | entries[myType] = append(entries[myType], myIdentifier) 94 | ad.Dispatch(&createdEntries[i]) 95 | } 96 | 97 | close(outChan) 98 | <-closeChan 99 | 100 | if len(t1.Vals) != len(entries["dns"]) { 101 | t.Fatalf("wrong number of 'dns' entries delivered to DNS handler (%d/%d)", 102 | len(t1.Vals), len(entries["dns"])) 103 | } 104 | for i := 0; i < len(t1.Vals); i++ { 105 | if t1.Vals[i] != entries["dns"][i] { 106 | t.Fatalf("'dns' pair of entries differs: %s/%s", t1.Vals[i], 107 | entries["dns"][i]) 108 | } 109 | } 110 | if len(t2.Vals) != len(entries["http"]) { 111 | t.Fatalf("wrong number of 'http' entries delivered to HTTP handler (%d/%d)", 112 | len(t2.Vals), len(entries["http"])) 113 | } 114 | for i := 0; i < len(t2.Vals); i++ { 115 | if t2.Vals[i] != entries["http"][i] { 116 | t.Fatalf("'http' pair of entries differs: %s/%s", t2.Vals[i], 117 | entries["http"][i]) 118 | } 119 | } 120 | } 121 | 122 | func TestHandlerDispatcherMonitoring(t *testing.T) { 123 | serverURL := "amqp://sensor:sensor@127.0.0.1:9999/%2f/" 124 | 125 | // start mock AMQP server 126 | fakeServer := server.NewServer(serverURL) 127 | fakeServer.Start() 128 | defer fakeServer.Stop() 129 | 130 | // set up consumer 131 | results := make([]string, 0) 132 | c, err := util.NewConsumer(serverURL, "nsm.test.metrics", "direct", "nsm.test.metrics.testqueue", 133 | "", "", func(d wabbit.Delivery) { 134 | results = append(results, string(d.Body())) 135 | }) 136 | if err != nil { 137 | t.Fatal(err) 138 | } 139 | 140 | // set up submitter 141 | statssubmitter, err := util.MakeAMQPSubmitterWithReconnector(serverURL, 142 | "nsm.test.metrics", true, func(url string) (wabbit.Conn, error) { 143 | // we pass in a custom reconnector which uses the amqptest implementation 144 | var conn wabbit.Conn 145 | conn, err = amqptest.Dial(url) 146 | return conn, err 147 | }) 148 | if err != nil { 149 | t.Fatal(err) 150 | } 151 | defer statssubmitter.Finish() 152 | 153 | // create InfluxDB line protocol encoder/submitter 154 | pse := util.MakePerformanceStatsEncoder(statssubmitter, 2*time.Second, false) 155 | 156 | outChan := make(chan types.Entry) 157 | closeChan := make(chan bool) 158 | ad := MakeHandlerDispatcher(outChan) 159 | ad.SubmitStats(pse) 160 | ad.Run() 161 | 162 | go func() { 163 | for i := 0; i < 100; i++ { 164 | ad.Dispatch(&types.Entry{ 165 | JSONLine: "foo", 166 | }) 167 | ad.Dispatch(&types.Entry{ 168 | JSONLine: "bar", 169 | }) 170 | ad.Dispatch(&types.Entry{ 171 | JSONLine: "baz", 172 | }) 173 | time.Sleep(50 * time.Millisecond) 174 | } 175 | }() 176 | 177 | go func(closeChan chan bool, inChan chan types.Entry) { 178 | i := 0 179 | for v := range inChan { 180 | _ = v 181 | i++ 182 | if i == 300 { 183 | break 184 | } 185 | } 186 | close(closeChan) 187 | }(closeChan, outChan) 188 | 189 | <-closeChan 190 | close(outChan) 191 | 192 | stopChan := make(chan bool) 193 | ad.Stop(stopChan) 194 | <-stopChan 195 | 196 | c.Shutdown() 197 | 198 | if len(results) == 0 { 199 | t.Fatalf("unexpected result length: 0") 200 | } 201 | 202 | if match, _ := regexp.Match(fmt.Sprintf("^%s,[^ ]+ dispatch_calls_per_sec=[0-9]+", util.ToolName), []byte(results[0])); !match { 203 | t.Fatalf("unexpected match content: %s", results[0]) 204 | } 205 | } 206 | -------------------------------------------------------------------------------- /processing/handler_dispatcher.go: -------------------------------------------------------------------------------- 1 | package processing 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2017, 2018, DCSO GmbH 5 | 6 | import ( 7 | "sync" 8 | "time" 9 | 10 | "github.com/DCSO/fever/types" 11 | "github.com/DCSO/fever/util" 12 | 13 | log "github.com/sirupsen/logrus" 14 | ) 15 | 16 | // HandlerDispatcherPerfStats contains performance stats written to InfluxDB 17 | // for monitoring. 18 | type HandlerDispatcherPerfStats struct { 19 | DispatchedPerSec uint64 `influx:"dispatch_calls_per_sec"` 20 | } 21 | 22 | // HandlerDispatcher is a component to collect and properly apply a set of 23 | // Handlers to a stream of Entry objects. Handlers can register the event types 24 | // they are meant to act on and are called with relevant Entries to perform 25 | // their job. 26 | type HandlerDispatcher struct { 27 | Lock sync.Mutex 28 | DispatchMap map[string]([]Handler) 29 | DBHandler Handler 30 | PerfStats HandlerDispatcherPerfStats 31 | Logger *log.Entry 32 | StatsEncoder *util.PerformanceStatsEncoder 33 | StopCounterChan chan bool 34 | StoppedCounterChan chan bool 35 | } 36 | 37 | // DBHandler writes consumed events to a database. 38 | type DBHandler struct { 39 | OutChan chan types.Entry 40 | } 41 | 42 | // GetName just returns the name of the default handler 43 | func (h *DBHandler) GetName() string { 44 | return "Default handler" 45 | } 46 | 47 | // GetEventTypes here is a dummy method -- since this handler is never 48 | // registered we don't need to set this to an actual event type 49 | func (h *DBHandler) GetEventTypes() []string { 50 | return []string{"not applicable"} 51 | } 52 | 53 | // Consume simply emits the consumed entry on the default output channel 54 | func (h *DBHandler) Consume(e *types.Entry) error { 55 | h.OutChan <- *e 56 | return nil 57 | } 58 | 59 | func (ad *HandlerDispatcher) runCounter() { 60 | sTime := time.Now() 61 | for { 62 | time.Sleep(500 * time.Millisecond) 63 | select { 64 | case <-ad.StopCounterChan: 65 | close(ad.StoppedCounterChan) 66 | return 67 | default: 68 | if ad.StatsEncoder == nil || time.Since(sTime) < ad.StatsEncoder.SubmitPeriod { 69 | continue 70 | } 71 | // Lock the current measurements for submission. Since this is a blocking 72 | // operation, we don't want this to depend on how long submitter.Submit() 73 | // takes but keep it independent of that. Hence we take the time to create 74 | // a local copy of the counter to be able to reset and release the live 75 | // one as quickly as possible. 76 | ad.Lock.Lock() 77 | // Make our own copy of the current counter 78 | myStats := HandlerDispatcherPerfStats{ 79 | DispatchedPerSec: ad.PerfStats.DispatchedPerSec, 80 | } 81 | myStats.DispatchedPerSec /= uint64(ad.StatsEncoder.SubmitPeriod.Seconds()) 82 | // Reset live counter 83 | ad.PerfStats.DispatchedPerSec = 0 84 | // Release live counter to not block further events 85 | ad.Lock.Unlock() 86 | 87 | ad.StatsEncoder.Submit(myStats) 88 | sTime = time.Now() 89 | } 90 | } 91 | } 92 | 93 | // MakeHandlerDispatcher returns a new HandlerDispatcher. The channel passed 94 | // as an argument is used as an output channel for the default handler, which 95 | // simply forwards events to a given channel (for example to be written to a 96 | // database) 97 | func MakeHandlerDispatcher(databaseOut chan types.Entry) *HandlerDispatcher { 98 | ad := &HandlerDispatcher{ 99 | DispatchMap: make(map[string]([]Handler)), 100 | Logger: log.WithFields(log.Fields{ 101 | "domain": "dispatch", 102 | }), 103 | } 104 | if databaseOut != nil { 105 | ad.DBHandler = &DBHandler{ 106 | OutChan: databaseOut, 107 | } 108 | } 109 | ad.Logger.WithFields(log.Fields{ 110 | "type": "*", 111 | "name": "default handler", 112 | }).Debugf("event handler added") 113 | return ad 114 | } 115 | 116 | // RegisterHandler adds the given Handler to the set of callbacks to be 117 | // called on the relevant Entries received by the dispatcher. 118 | func (ad *HandlerDispatcher) RegisterHandler(agg Handler) { 119 | eventTypes := agg.GetEventTypes() 120 | for _, eventType := range eventTypes { 121 | if _, ok := ad.DispatchMap[eventType]; !ok { 122 | ad.DispatchMap[eventType] = make([]Handler, 0) 123 | } 124 | ad.DispatchMap[eventType] = append(ad.DispatchMap[eventType], agg) 125 | ad.Logger.WithFields(log.Fields{ 126 | "type": eventType, 127 | "name": agg.GetName(), 128 | }).Info("event handler added") 129 | } 130 | } 131 | 132 | // Dispatch applies the set of handlers currently registered in the dispatcher 133 | // to the Entry object passed to it. 134 | func (ad *HandlerDispatcher) Dispatch(e *types.Entry) { 135 | if _, ok := ad.DispatchMap[e.EventType]; ok { 136 | for _, agg := range ad.DispatchMap[e.EventType] { 137 | agg.Consume(e) 138 | } 139 | } 140 | if a, ok := ad.DispatchMap["*"]; ok { 141 | for _, agg := range a { 142 | agg.Consume(e) 143 | } 144 | } 145 | if ad.DBHandler != nil { 146 | ad.DBHandler.Consume(e) 147 | } 148 | ad.Lock.Lock() 149 | ad.PerfStats.DispatchedPerSec++ 150 | ad.Lock.Unlock() 151 | } 152 | 153 | // SubmitStats registers a PerformanceStatsEncoder for runtime stats submission. 154 | func (ad *HandlerDispatcher) SubmitStats(sc *util.PerformanceStatsEncoder) { 155 | ad.StatsEncoder = sc 156 | } 157 | 158 | // Run starts the background service for this handler 159 | func (ad *HandlerDispatcher) Run() { 160 | ad.StopCounterChan = make(chan bool) 161 | ad.StoppedCounterChan = make(chan bool) 162 | go ad.runCounter() 163 | } 164 | 165 | // Stop causes the handler to cease counting and submitting data 166 | func (ad *HandlerDispatcher) Stop(stopChan chan bool) { 167 | close(ad.StopCounterChan) 168 | <-ad.StoppedCounterChan 169 | close(stopChan) 170 | } 171 | -------------------------------------------------------------------------------- /util/util_test.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2017, DCSO GmbH 5 | 6 | import ( 7 | "bufio" 8 | "os" 9 | "reflect" 10 | "testing" 11 | 12 | "github.com/DCSO/fever/types" 13 | ) 14 | 15 | var nullEntry = types.Entry{ 16 | Timestamp: "2017-03-06T06:54:10.839668+0000", 17 | EventType: "fileinfo", 18 | JSONLine: `{"timestamp":"2017-03-06T06:54:10.839668+0000","flow_id":null,"in_iface":"enp2s0f1","event_type":"fileinfo","vlan":null,"src_ip":null,"src_port":null,"dest_ip":null,"dest_port":null,"http":{"hostname":"api.icndb.com","url":null,"state":"CLOSED","md5":null}}`, 19 | Iface: "enp2s0f1", 20 | HTTPHost: "api.icndb.com", 21 | } 22 | 23 | var entries = []types.Entry{ 24 | types.Entry{ 25 | SrcIP: "10.0.0.10", 26 | SrcPort: 53, 27 | DestIP: "10.0.0.11", 28 | DestPort: 51323, 29 | Timestamp: "2017-03-06T06:54:06.047429+0000", 30 | EventType: "dns", 31 | Proto: "UDP", 32 | JSONLine: `{"timestamp":"2017-03-06T06:54:06.047429+0000","flow_id":4711,"in_iface":"enp2s0f1","event_type":"dns","vlan":61,"src_ip":"10.0.0.10","src_port":53,"dest_ip":"10.0.0.11","dest_port":51323,"proto":"UDP","dns":{"type":"answer","id":1,"rcode":"NOERROR","rrname":"test.test.local","rrtype":"A","ttl":2365,"rdata":"10.0.0.12"}}`, 33 | DNSRRName: "test.test.local", 34 | DNSRRType: "A", 35 | DNSRCode: "NOERROR", 36 | DNSRData: "10.0.0.12", 37 | DNSType: "answer", 38 | Iface: "enp2s0f1", 39 | FlowID: "4711", 40 | }, 41 | types.Entry{ 42 | SrcIP: "10.0.0.10", 43 | SrcPort: 80, 44 | DestIP: "10.0.0.11", 45 | DestPort: 52914, 46 | Timestamp: "2017-03-06T06:54:10.839668+0000", 47 | EventType: "fileinfo", 48 | Proto: "TCP", 49 | JSONLine: `{"timestamp":"2017-03-06T06:54:10.839668+0000","flow_id":2323,"in_iface":"enp2s0f1","event_type":"fileinfo","vlan":91,"src_ip":"10.0.0.10","src_port":80,"dest_ip":"10.0.0.11","dest_port":52914,"proto":"TCP","http":{"hostname":"api.icndb.com","url":"\/jokes\/random?firstName=Chuck&lastName=Norris&limitTo=[nerdy]","http_user_agent":"Ruby","http_content_type":"application\/json","http_method":"GET","protocol":"HTTP\/1.1","status":200,"length":178},"app_proto":"http","fileinfo":{"filename":"\/jokes\/random","magic":"ASCII text, with no line terminators","state":"CLOSED","md5":"8d81d793b28b098e8623d47bae23cf44","stored":false,"size":176,"tx_id":0}}`, 50 | HTTPHost: "api.icndb.com", 51 | HTTPUrl: `/jokes/random?firstName=Chuck&lastName=Norris&limitTo=[nerdy]`, 52 | HTTPMethod: `GET`, 53 | Iface: "enp2s0f1", 54 | AppProto: "http", 55 | FlowID: "2323", 56 | }, 57 | types.Entry{ 58 | SrcIP: "10.0.0.10", 59 | SrcPort: 24092, 60 | DestIP: "10.0.0.11", 61 | DestPort: 80, 62 | Timestamp: "2017-03-06T06:54:14.002504+0000", 63 | EventType: "http", 64 | Proto: "TCP", 65 | JSONLine: `{"timestamp":"2017-03-06T06:54:14.002504+0000","flow_id":2134,"in_iface":"enp2s0f1","event_type":"http","vlan":72,"src_ip":"10.0.0.10","src_port":24092,"dest_ip":"10.0.0.11","dest_port":80,"proto":"TCP","tx_id":0,"http":{"hostname":"foobar","url":"\/scripts\/wpnbr.dll","http_content_type":"text\/xml","http_method":"POST","protocol":"HTTP\/1.1","status":200,"length":347}}`, 66 | HTTPHost: "foobar", 67 | HTTPUrl: `/scripts/wpnbr.dll`, 68 | HTTPMethod: `POST`, 69 | Iface: "enp2s0f1", 70 | FlowID: "2134", 71 | }, 72 | } 73 | 74 | func TestJSONParseEVE(t *testing.T) { 75 | f, err := os.Open("testdata/jsonparse_eve.json") 76 | if err != nil { 77 | t.Fatal(err) 78 | } 79 | scanner := bufio.NewScanner(f) 80 | i := 0 81 | for scanner.Scan() { 82 | json := scanner.Bytes() 83 | e, err := ParseJSON(json) 84 | if err != nil { 85 | t.Fatal(err) 86 | } 87 | if !reflect.DeepEqual(entries[i], e) { 88 | t.Fatalf("entry %d parsed from JSON does not match expected value", i) 89 | } 90 | i++ 91 | } 92 | } 93 | 94 | func TestJSONParseEVEBroken(t *testing.T) { 95 | f, err := os.Open("testdata/jsonparse_eve_broken1.json") 96 | if err != nil { 97 | t.Fatal(err) 98 | } 99 | scanner := bufio.NewScanner(f) 100 | i := 0 101 | for scanner.Scan() { 102 | json := scanner.Bytes() 103 | e, err := ParseJSON(json) 104 | if i != 1 { 105 | if err != nil { 106 | t.Fatal(err) 107 | } 108 | } 109 | if i == 1 { 110 | if err == nil { 111 | t.Fatalf("broken JSON line should raise an error") 112 | } 113 | } 114 | if i != 1 { 115 | if !reflect.DeepEqual(entries[i], e) { 116 | t.Fatalf("entry %d parsed from JSON does not match expected value", i) 117 | } 118 | } 119 | i++ 120 | } 121 | } 122 | 123 | func TestJSONParseEVEempty(t *testing.T) { 124 | f, err := os.Open("testdata/jsonparse_eve_empty.json") 125 | if err != nil { 126 | t.Fatal(err) 127 | } 128 | scanner := bufio.NewScanner(f) 129 | i := 0 130 | for scanner.Scan() { 131 | i++ 132 | } 133 | if i > 0 { 134 | t.Fatal("empty file should not generate any entries") 135 | } 136 | } 137 | 138 | func TestJSONParseEVEwithnull(t *testing.T) { 139 | f, err := os.Open("testdata/jsonparse_eve_nulls.json") 140 | if err != nil { 141 | t.Fatal(err) 142 | } 143 | scanner := bufio.NewScanner(f) 144 | i := 0 145 | var entry types.Entry 146 | for scanner.Scan() { 147 | json := scanner.Bytes() 148 | e, err := ParseJSON(json) 149 | if err != nil { 150 | t.Fatal(err) 151 | } 152 | entry = e 153 | i++ 154 | } 155 | if i != 1 { 156 | t.Fatalf("should parse only one entry, got %d", i) 157 | } 158 | if !reflect.DeepEqual(nullEntry, entry) { 159 | t.Fatalf("entry %d parsed from JSON does not match expected value", i) 160 | } 161 | } 162 | 163 | func TestGetSensorID(t *testing.T) { 164 | sid, err := GetSensorID() 165 | if err != nil { 166 | t.Fatal(err) 167 | } 168 | if len(sid) == 0 { 169 | t.Fatal("missing sensor ID") 170 | } 171 | } 172 | -------------------------------------------------------------------------------- /db/sql.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2017, DCSO GmbH 5 | 6 | // SQLTrigramFunction is a plpgsql function to pull out indexable content from event JSON 7 | const SQLTrigramFunction = `CREATE OR REPLACE FUNCTION trigram_string(payload jsonb) 8 | RETURNS text 9 | AS $$ 10 | DECLARE 11 | buffer varchar := ''; 12 | BEGIN 13 | -- trying in typical order of frequency 14 | IF payload->>'event_type' = 'dns' 15 | THEN 16 | RETURN payload->'dns'->>'rdata'; 17 | END IF; 18 | IF payload->>'event_type' = 'http' 19 | THEN 20 | RETURN (payload->'http'->>'hostname') || '|' || (payload->'http'->>'url') || '|' || (payload->'http'->>'http_user_agent'); 21 | END IF; 22 | IF payload->>'event_type' = 'tls' 23 | THEN 24 | RETURN (payload->'tls'->>'subject') ||'|' || (payload->'tls'->>'issuerdn') || '|' || (payload->'tls'->>'fingerprint'); 25 | END IF; 26 | IF payload->>'event_type' = 'alert' 27 | THEN 28 | RETURN (payload->'alert'->>'payload_printable') || '|' || (payload->'alert'->>'payload'); 29 | END IF; 30 | IF payload->>'event_type' = 'smtp' 31 | THEN 32 | RETURN (payload->'smtp'->>'helo') || '|' || (payload->'smtp'->>'mail_from') || '|' || (payload->'smtp'->>'rcpt_to') || '|' || (payload->'email'->>'from') || '|' || (payload->'email'->>'to') || '|' || (payload->'email'->>'attachment'); 33 | END IF; 34 | IF payload->>'event_type' = 'fileinfo' 35 | THEN 36 | RETURN (payload->'fileinfo'->>'filename') || '|' || (payload->'fileinfo'->>'md5'); 37 | END IF; 38 | RETURN buffer; 39 | END; 40 | $$ 41 | LANGUAGE plpgsql 42 | IMMUTABLE;` 43 | 44 | // SQLCheckForTrigramExtension is an SQL query to check whether the trigram extension is available. 45 | const SQLCheckForTrigramExtension = `SELECT COUNT(*) FROM pg_available_extensions WHERE name = 'pg_trgm';` 46 | 47 | // SQLCreate is an SQL/DDL clause to create a new event table 48 | const SQLCreate = `CREATE UNLOGGED TABLE IF NOT EXISTS "%s" 49 | (ts timestamp without time zone default now(), 50 | payload jsonb); 51 | GRANT ALL PRIVILEGES ON TABLE "%s" to %s;` 52 | 53 | // SQLCopy is an SQL/DDL clause to bulk insert a chunk of JSON into the database 54 | const SQLCopy = `COPY "%s" (ts, payload) FROM STDIN WITH CSV DELIMITER E'\t' QUOTE E'\b'` 55 | 56 | // SQLIndex is an SQL/DDL clause to create indexes on event tables 57 | const SQLIndex = `CREATE INDEX ON "%s" (((payload->>'src_ip')::INET), ((payload->>'src_port')::INT)); 58 | CREATE INDEX ON "%s" (ts); 59 | CREATE INDEX ON "%s" (((payload->>'dest_ip')::INET), ((payload->>'dest_port')::INT)); 60 | CREATE INDEX ON "%s" ((payload->>'event_type')); 61 | CREATE INDEX ON "%s" using GIN (trigram_string(payload) gin_trgm_ops)` 62 | 63 | // SQLGetTableSizes is an SQL query to obtain the names of tables in the current schema and their size in bytes. 64 | const SQLGetTableSizes = `SELECT relname as table, 65 | pg_total_relation_size(relid) as size 66 | FROM pg_catalog.pg_statio_user_tables 67 | ORDER BY 1 DESC;` 68 | 69 | // SQLGenericQuery is the main kind of query used to pull out event metadata. 70 | const SQLGenericQuery = `SELECT * FROM all_events_query($1::text, $2::timestamp, $3::timestamp, $4::text[], $5::inet, $6::int, $7::inet, $8::int, $9::int);` 71 | 72 | // SQLQueryAllEvents is a plpgsql function to enable queries over all hourly tables 73 | // Example: SELECT COUNT(*) FROM all_events_query('WHERE trigram_string(payload) LIKE ''%%foo%%'''); 74 | const SQLQueryAllEvents = `CREATE OR REPLACE FUNCTION all_events_query(keyword text, 75 | start_time timestamp with time zone, 76 | end_time timestamp with time zone, 77 | event_type text[], 78 | ipsrc inet, portsrc int, 79 | ipdest inet, portdest int, 80 | mlimit int) 81 | RETURNS TABLE (ts timestamp, payload jsonb) 82 | AS $$ 83 | DECLARE 84 | clause text; 85 | t RECORD; 86 | tables CURSOR FOR 87 | SELECT * FROM information_schema.tables 88 | WHERE table_name LIKE 'event%'; 89 | BEGIN 90 | clause := ''; 91 | OPEN tables; 92 | 93 | LOOP 94 | FETCH tables INTO t; 95 | EXIT WHEN NOT FOUND; 96 | IF clause != '' THEN 97 | clause := clause || ' UNION ALL '; 98 | END IF; 99 | clause := clause 100 | || 'SELECT * FROM ' || quote_ident(t.table_name) 101 | || ' WHERE ts BETWEEN ' || quote_literal(start_time) 102 | || ' AND ' || quote_literal(end_time); 103 | IF keyword IS NOT NULL THEN 104 | clause := clause 105 | || ' AND trigram_string(payload) LIKE ' || quote_literal(keyword); 106 | END IF; 107 | IF event_type IS NOT NULL THEN 108 | clause := clause 109 | || ' AND payload->>''event_type'' = ANY(' || quote_literal(event_type) || ')'; 110 | END IF; 111 | IF ipsrc IS NOT NULL THEN 112 | clause := clause 113 | || ' AND (payload->>''src_ip'')::inet <<= inet ' || quote_literal(ipsrc); 114 | END IF; 115 | IF portsrc IS NOT NULL THEN 116 | clause := clause 117 | || ' AND payload->>''src_port'' = ' || quote_literal(portsrc); 118 | END IF; 119 | IF ipdest IS NOT NULL THEN 120 | clause := clause 121 | || ' AND (payload->>''dest_ip'')::inet <<= inet ' || quote_literal(ipdest); 122 | END IF; 123 | IF portdest IS NOT NULL THEN 124 | clause := clause 125 | || ' AND payload->>''dest_port'' = ' || quote_literal(portdest); 126 | END IF; 127 | END LOOP; 128 | 129 | IF mlimit IS NOT NULL THEN 130 | clause := clause || ' LIMIT ' || quote_literal(mlimit); 131 | END IF; 132 | 133 | RAISE NOTICE '%', clause; 134 | 135 | CLOSE tables; 136 | RETURN QUERY EXECUTE clause; 137 | END; 138 | $$ 139 | LANGUAGE plpgsql 140 | STABLE; 141 | ` 142 | -------------------------------------------------------------------------------- /mgmt/server_test.go: -------------------------------------------------------------------------------- 1 | package mgmt 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2021, DCSO GmbH 5 | 6 | import ( 7 | context "context" 8 | "io/ioutil" 9 | "os" 10 | "testing" 11 | "time" 12 | 13 | "github.com/DCSO/bloom" 14 | "github.com/DCSO/fever/processing" 15 | "github.com/sirupsen/logrus" 16 | "golang.org/x/sync/errgroup" 17 | grpc "google.golang.org/grpc" 18 | emptypb "google.golang.org/protobuf/types/known/emptypb" 19 | ) 20 | 21 | var ( 22 | mgmtCfg = GRPCEndpointConfig{ 23 | EndpointConfig: EndpointConfig{ 24 | Network: "unix", 25 | ListenerAddress: "../tmp/test-fever-mgmt.socket", 26 | TLSDisable: true, 27 | }, 28 | DialOptions: []grpc.DialOption{grpc.WithInsecure()}, 29 | } 30 | ) 31 | 32 | func TestMain(m *testing.M) { 33 | logrus.SetLevel(logrus.TraceLevel) 34 | cctx, cancel := context.WithCancel(context.Background()) 35 | defer cancel() 36 | eg, ectx := errgroup.WithContext(cctx) 37 | 38 | if _, err := os.Stat("../tmp"); os.IsNotExist(err) { 39 | err := os.Mkdir("../tmp", os.ModePerm) 40 | if err != nil { 41 | logrus.Fatal(err) 42 | } 43 | } 44 | 45 | bf := bloom.Initialize(100000, 0.0000001) 46 | bf.Add([]byte("foo")) 47 | 48 | bfFile, err := ioutil.TempFile("", "example") 49 | if err != nil { 50 | logrus.Fatal(err) 51 | } 52 | defer os.Remove(bfFile.Name()) 53 | bf.Write(bfFile) 54 | bfFile.Close() 55 | 56 | bh, err := processing.MakeBloomHandlerFromFile(bfFile.Name(), false, nil, 57 | nil, "alert", []string{}) 58 | if err != nil { 59 | logrus.Fatal(err) 60 | } 61 | 62 | msrv, err := NewMgmtServer(ectx, mgmtCfg, &State{ 63 | BloomHandler: bh, 64 | }) 65 | if err != nil { 66 | logrus.Fatal(err) 67 | } 68 | eg.Go(func() error { 69 | if err := msrv.ListenAndServe(); err != nil { 70 | logrus.WithError(err).Error("gRPC server failed") 71 | return err 72 | } 73 | return nil 74 | }) 75 | time.Sleep(100 * time.Millisecond) 76 | 77 | defer func() { 78 | cancel() 79 | }() 80 | if rc := m.Run(); rc != 0 { 81 | cancel() 82 | msrv.Stop() 83 | logrus.Warnf("test failed with %d", rc) 84 | os.Exit(rc) 85 | } 86 | cancel() 87 | msrv.Stop() 88 | } 89 | 90 | func TestAlive(t *testing.T) { 91 | logrus.StandardLogger().SetLevel(logrus.DebugLevel) 92 | conn, err := grpc.Dial(mgmtCfg.Network+":"+mgmtCfg.ListenerAddress, mgmtCfg.DialOptions...) 93 | if err != nil { 94 | t.Fatal(err) 95 | } 96 | defer conn.Close() 97 | 98 | clt := NewMgmtServiceClient(conn) 99 | got, err := clt.Alive(context.TODO(), &MgmtAliveRequest{Alive: "TestAlive"}) 100 | if err != nil { 101 | t.Fatal(err) 102 | } 103 | if got.GetEcho() != "TestAlive" { 104 | t.Errorf("Alive(): %v, want %v", got.GetEcho(), "TestAlive") 105 | } 106 | } 107 | 108 | func TestBloomInfo(t *testing.T) { 109 | logrus.StandardLogger().SetLevel(logrus.DebugLevel) 110 | conn, err := grpc.Dial(mgmtCfg.Network+":"+mgmtCfg.ListenerAddress, mgmtCfg.DialOptions...) 111 | if err != nil { 112 | t.Fatal(err) 113 | } 114 | defer conn.Close() 115 | 116 | clt := NewMgmtServiceClient(conn) 117 | got, err := clt.BloomInfo(context.TODO(), &emptypb.Empty{}) 118 | if err != nil { 119 | t.Fatal(err) 120 | } 121 | if got.GetCapacity() != 100000 { 122 | t.Errorf("BloomInfo(): %v, want %v", got.GetCapacity(), 100000) 123 | } 124 | if got.GetFpprob() != 0.0000001 { 125 | t.Errorf("BloomInfo(): %v, want %v", got.GetFpprob(), 0.0000001) 126 | } 127 | if got.GetElements() != 1 { 128 | t.Errorf("BloomInfo(): %v, want %v", got.GetElements(), 1) 129 | } 130 | if got.GetHashfuncs() != 24 { 131 | t.Errorf("BloomInfo(): %v, want %v", got.GetHashfuncs(), 24) 132 | } 133 | if got.GetBits() != 3354770 { 134 | t.Errorf("BloomInfo(): %v, want %v", got.GetBits(), 3354770) 135 | } 136 | } 137 | 138 | func TestBloomSave(t *testing.T) { 139 | logrus.StandardLogger().SetLevel(logrus.DebugLevel) 140 | conn, err := grpc.Dial(mgmtCfg.Network+":"+mgmtCfg.ListenerAddress, mgmtCfg.DialOptions...) 141 | if err != nil { 142 | t.Fatal(err) 143 | } 144 | defer conn.Close() 145 | 146 | clt := NewMgmtServiceClient(conn) 147 | _, err = clt.BloomSave(context.TODO(), &emptypb.Empty{}) 148 | if err != nil { 149 | t.Fatal(err) 150 | } 151 | 152 | } 153 | 154 | func TestBloomReload(t *testing.T) { 155 | logrus.StandardLogger().SetLevel(logrus.DebugLevel) 156 | conn, err := grpc.Dial(mgmtCfg.Network+":"+mgmtCfg.ListenerAddress, mgmtCfg.DialOptions...) 157 | if err != nil { 158 | t.Fatal(err) 159 | } 160 | defer conn.Close() 161 | 162 | clt := NewMgmtServiceClient(conn) 163 | _, err = clt.BloomReload(context.TODO(), &emptypb.Empty{}) 164 | if err != nil { 165 | t.Fatal(err) 166 | } 167 | } 168 | 169 | func TestBloomAdd(t *testing.T) { 170 | logrus.StandardLogger().SetLevel(logrus.DebugLevel) 171 | conn, err := grpc.Dial(mgmtCfg.Network+":"+mgmtCfg.ListenerAddress, mgmtCfg.DialOptions...) 172 | if err != nil { 173 | t.Fatal(err) 174 | } 175 | defer conn.Close() 176 | 177 | clt := NewMgmtServiceClient(conn) 178 | got, err := clt.BloomInfo(context.TODO(), &emptypb.Empty{}) 179 | if err != nil { 180 | t.Fatal(err) 181 | } 182 | if got.GetElements() != 1 { 183 | t.Errorf("BloomAdd(): %v, want %v", got.GetElements(), 1) 184 | } 185 | 186 | stream, err := clt.BloomAdd(context.TODO()) 187 | if err != nil { 188 | t.Fatal(err) 189 | } 190 | for _, part := range []string{"a", "b", "c"} { 191 | if err := stream.Send(&MgmtBloomAddRequest{Ioc: part}); err != nil { 192 | t.Fatal(err) 193 | } 194 | } 195 | resp, err := stream.CloseAndRecv() 196 | if err != nil { 197 | t.Fatal(err) 198 | } 199 | if resp.GetAdded() != 3 { 200 | t.Fatalf("wanted 3, got %d", resp.GetAdded()) 201 | } 202 | 203 | got, err = clt.BloomInfo(context.TODO(), &emptypb.Empty{}) 204 | if err != nil { 205 | t.Fatal(err) 206 | } 207 | if got.GetElements() != 4 { 208 | t.Errorf("BloomAdd(): %v, want %v", got.GetElements(), 4) 209 | } 210 | } 211 | -------------------------------------------------------------------------------- /processing/ip_handler.go: -------------------------------------------------------------------------------- 1 | package processing 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2018, 2020, DCSO GmbH 5 | 6 | import ( 7 | "bufio" 8 | "net" 9 | "os" 10 | "sync" 11 | 12 | "github.com/DCSO/fever/types" 13 | "github.com/DCSO/fever/util" 14 | 15 | log "github.com/sirupsen/logrus" 16 | "github.com/yl2chen/cidranger" 17 | ) 18 | 19 | // IPAlertJSONProviderSrcIP is an AlertJSONProvider for source IP address matches. 20 | type IPAlertJSONProviderSrcIP struct{} 21 | 22 | // GetAlertJSON returns the "alert" subobject for an alert EVE event. 23 | func (a IPAlertJSONProviderSrcIP) GetAlertJSON(inputEvent types.Entry, 24 | prefix string, ioc string) ([]byte, error) { 25 | return util.GenericGetAlertObjForIoc(inputEvent, prefix, ioc, 26 | "%s Communication involving IP "+inputEvent.SrcIP+" in listed range %s") 27 | } 28 | 29 | // IPAlertJSONProviderDstIP is an AlertJSONProvider for destination IP address 30 | // matches. 31 | type IPAlertJSONProviderDstIP struct{} 32 | 33 | // GetAlertJSON returns the "alert" subobject for an alert EVE event. 34 | func (a IPAlertJSONProviderDstIP) GetAlertJSON(inputEvent types.Entry, 35 | prefix string, ioc string) ([]byte, error) { 36 | return util.GenericGetAlertObjForIoc(inputEvent, prefix, ioc, 37 | "%s Communication involving IP "+inputEvent.DestIP+" in listed range %s") 38 | } 39 | 40 | // IPHandler is a Handler which is meant to check for the presence of 41 | // event type-specific keywords in a Bloom filter, raising new 'alert' type 42 | // events when matches are found. 43 | type IPHandler struct { 44 | sync.Mutex 45 | Logger *log.Entry 46 | Name string 47 | EventType string 48 | Ranger cidranger.Ranger 49 | IPListFilename string 50 | DatabaseEventChan chan types.Entry 51 | ForwardHandler Handler 52 | AlertPrefix string 53 | Alertifier *util.Alertifier 54 | } 55 | 56 | // MakeIPHandler returns a new IPHandler, checking against the given 57 | // IP ranges and sending alerts to databaseChan as well as forwarding them 58 | // to a given forwarding handler. 59 | func MakeIPHandler(ranger cidranger.Ranger, 60 | databaseChan chan types.Entry, forwardHandler Handler, 61 | alertPrefix string) *IPHandler { 62 | ih := &IPHandler{ 63 | Logger: log.WithFields(log.Fields{ 64 | "domain": "ip-blacklist", 65 | }), 66 | Ranger: ranger, 67 | DatabaseEventChan: databaseChan, 68 | ForwardHandler: forwardHandler, 69 | AlertPrefix: alertPrefix, 70 | Alertifier: util.MakeAlertifier(alertPrefix), 71 | } 72 | ih.Alertifier.SetExtraModifier(bloomExtraModifier) 73 | ih.Alertifier.RegisterMatchType("ip-src", IPAlertJSONProviderSrcIP{}) 74 | ih.Alertifier.RegisterMatchType("ip-dst", IPAlertJSONProviderDstIP{}) 75 | ih.Alertifier.SetExtraModifier(nil) 76 | log.WithFields(log.Fields{}).Info("IP range list loaded") 77 | return ih 78 | } 79 | 80 | func rangerFromFile(IPListFilename string) (cidranger.Ranger, error) { 81 | inFile, err := os.Open(IPListFilename) 82 | if err != nil { 83 | return nil, err 84 | } 85 | defer inFile.Close() 86 | ranger := cidranger.NewPCTrieRanger() 87 | scanner := bufio.NewScanner(inFile) 88 | scanner.Split(bufio.ScanLines) 89 | for scanner.Scan() { 90 | lineText := scanner.Text() 91 | _, network, err := net.ParseCIDR(lineText) 92 | if err != nil { 93 | log.Warnf("invalid IP range %s, skipping", lineText) 94 | } else { 95 | log.Debugf("adding IP range %s", lineText) 96 | ranger.Insert(cidranger.NewBasicRangerEntry(*network)) 97 | } 98 | } 99 | return ranger, nil 100 | } 101 | 102 | // MakeIPHandlerFromFile returns a new IPHandler created from a new 103 | // IP range list specified by the given file name. 104 | func MakeIPHandlerFromFile(IPListFilename string, 105 | databaseChan chan types.Entry, forwardHandler Handler, alertPrefix string) (*IPHandler, error) { 106 | ranger, err := rangerFromFile(IPListFilename) 107 | if err != nil { 108 | return nil, err 109 | } 110 | ih := MakeIPHandler(ranger, databaseChan, forwardHandler, alertPrefix) 111 | ih.IPListFilename = IPListFilename 112 | return ih, nil 113 | } 114 | 115 | // Reload triggers a reload of the contents of the IP list file. 116 | func (a *IPHandler) Reload() error { 117 | ranger, err := rangerFromFile(a.IPListFilename) 118 | if err != nil { 119 | return err 120 | } 121 | a.Lock() 122 | a.Ranger = ranger 123 | a.Unlock() 124 | return nil 125 | } 126 | 127 | // Consume processes an Entry, emitting alerts if there is a match 128 | func (a *IPHandler) Consume(e *types.Entry) error { 129 | a.Lock() 130 | srcRanges, err := a.Ranger.ContainingNetworks(net.ParseIP(e.SrcIP)) 131 | if err != nil { 132 | log.Warn(err) 133 | } 134 | for _, v := range srcRanges { 135 | matchedNet := v.Network() 136 | matchedNetString := matchedNet.String() 137 | if n, err := a.Alertifier.MakeAlert(*e, matchedNetString, "ip-src"); err == nil { 138 | a.DatabaseEventChan <- *n 139 | a.ForwardHandler.Consume(n) 140 | } else { 141 | log.Warn(err) 142 | } 143 | } 144 | dstRanges, err := a.Ranger.ContainingNetworks(net.ParseIP(e.DestIP)) 145 | if err != nil { 146 | log.Warn(err) 147 | } 148 | for _, v := range dstRanges { 149 | matchedNet := v.Network() 150 | matchedNetString := matchedNet.String() 151 | if n, err := a.Alertifier.MakeAlert(*e, matchedNetString, "ip-dst"); err == nil { 152 | a.DatabaseEventChan <- *n 153 | a.ForwardHandler.Consume(n) 154 | } else { 155 | log.Warn(err) 156 | } 157 | } 158 | a.Unlock() 159 | return nil 160 | } 161 | 162 | // GetName returns the name of the handler 163 | func (a *IPHandler) GetName() string { 164 | return "IP blacklist handler" 165 | } 166 | 167 | // GetEventTypes returns a slice of event type strings that this handler 168 | // should be applied to 169 | func (a *IPHandler) GetEventTypes() []string { 170 | return []string{"http", "dns", "tls", "smtp", "flow", "ssh", "tls", "smb"} 171 | } 172 | -------------------------------------------------------------------------------- /mgmt/mgmtserver.go: -------------------------------------------------------------------------------- 1 | package mgmt 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2021, DCSO GmbH 5 | 6 | import ( 7 | context "context" 8 | "errors" 9 | "fmt" 10 | "io" 11 | "net" 12 | "os" 13 | "path/filepath" 14 | 15 | "github.com/DCSO/bloom" 16 | "github.com/sirupsen/logrus" 17 | grpc "google.golang.org/grpc" 18 | codes "google.golang.org/grpc/codes" 19 | status "google.golang.org/grpc/status" 20 | emptypb "google.golang.org/protobuf/types/known/emptypb" 21 | ) 22 | 23 | const ( 24 | permSocketPath = 0750 25 | ) 26 | 27 | type mgmtServer struct { 28 | UnimplementedMgmtServiceServer 29 | ctx context.Context 30 | Logger *logrus.Entry 31 | grpcSrv *grpc.Server 32 | cfg GRPCEndpointConfig 33 | state *State 34 | } 35 | 36 | // NewMgmtServer returns a new mamagement server instance registered with gRPC. 37 | func NewMgmtServer(parent context.Context, cfg GRPCEndpointConfig, state *State) (Server, error) { 38 | srv := &mgmtServer{ 39 | ctx: parent, 40 | cfg: cfg, 41 | state: state, 42 | Logger: logrus.StandardLogger().WithFields(logrus.Fields{ 43 | "domain": "mgmt", 44 | }), 45 | } 46 | srv.grpcSrv = grpc.NewServer(cfg.ServerOptions...) 47 | RegisterMgmtServiceServer(srv.grpcSrv, srv) 48 | 49 | return srv, nil 50 | } 51 | 52 | // Stop stops the mgmtServer. 53 | func (srv *mgmtServer) Stop() { 54 | srv.grpcSrv.GracefulStop() 55 | } 56 | 57 | // ListenAndServe starts the mgmtServer, accepting connections on the given 58 | // communication channel. 59 | func (srv *mgmtServer) ListenAndServe() (err error) { 60 | err = errors.New("ListenAndServe() can only be called once") 61 | 62 | var ln net.Listener 63 | 64 | if ln, err = net.Listen(srv.cfg.Network, srv.cfg.ListenerAddress); err != nil { 65 | srv.Logger.WithError(err).WithFields(logrus.Fields{ 66 | "network": srv.cfg.Network, 67 | "address": srv.cfg.ListenerAddress, 68 | }).Error("setting up mgmt endpoint") 69 | return 70 | } 71 | defer ln.Close() 72 | 73 | if dsln, ok := ln.(*net.UnixListener); ok { 74 | if err = os.MkdirAll(filepath.Dir(srv.cfg.ListenerAddress), permSocketPath); err != nil { 75 | srv.Logger.WithError(err).WithFields(logrus.Fields{ 76 | "path": filepath.Dir(srv.cfg.ListenerAddress), 77 | "perm_path": permSocketPath, 78 | }).Error("unable to create path") 79 | return 80 | } 81 | dsln.SetUnlinkOnClose(true) 82 | } 83 | 84 | srv.Logger.Info("gRPC mgmt service listening ...") 85 | err = srv.grpcSrv.Serve(ln) 86 | srv.Logger.Info("gRPC mgmt service stopped") 87 | return err 88 | } 89 | 90 | // 91 | // MgmtServiceServer interface 92 | // 93 | 94 | // BloomInfo implements the function to return internal status information about 95 | // the Bloom filter currently loaded in the FEVER instance. 96 | func (srv *mgmtServer) BloomInfo(ctx context.Context, _req *emptypb.Empty) (*MgmtBloomInfoResponse, error) { 97 | srv.Logger.Debug("responding to BloomInfo") 98 | 99 | var resp *MgmtBloomInfoResponse 100 | hasBloom := (srv.state.BloomHandler != nil) 101 | if hasBloom { 102 | resp = &MgmtBloomInfoResponse{ 103 | HasBloom: true, 104 | Capacity: srv.state.BloomHandler.IocBloom.MaxNumElements(), 105 | Elements: srv.state.BloomHandler.IocBloom.N, 106 | Bits: srv.state.BloomHandler.IocBloom.NumBits(), 107 | Fpprob: srv.state.BloomHandler.IocBloom.FalsePositiveProb(), 108 | Hashfuncs: srv.state.BloomHandler.IocBloom.NumHashFuncs(), 109 | } 110 | } else { 111 | resp = &MgmtBloomInfoResponse{ 112 | HasBloom: false, 113 | } 114 | } 115 | return resp, nil 116 | } 117 | 118 | // BloomAdd implements the function to add items from an incoming stream to the 119 | // Bloom filter currently loaded in the FEVER instance. 120 | func (srv *mgmtServer) BloomAdd(stream MgmtService_BloomAddServer) error { 121 | srv.Logger.Debug("responding to BloomAdd") 122 | 123 | hasBloom := (srv.state.BloomHandler != nil) 124 | if !hasBloom { 125 | return stream.SendAndClose(&MgmtBloomAddResponse{Added: 0}) 126 | } 127 | i := uint64(0) 128 | for { 129 | req, err := stream.Recv() 130 | if err != nil { 131 | if err == io.EOF { 132 | return stream.SendAndClose(&MgmtBloomAddResponse{Added: i}) 133 | } 134 | return status.Error(codes.InvalidArgument, err.Error()) 135 | } 136 | srv.state.BloomHandler.IocBloom.Add([]byte(req.GetIoc())) 137 | i++ 138 | } 139 | } 140 | 141 | // BloomSave implements the function to serialize the Bloom filter currently 142 | // loaded in the FEVER instance to disk. 143 | func (srv *mgmtServer) BloomSave(ctx context.Context, _req *emptypb.Empty) (*emptypb.Empty, error) { 144 | srv.Logger.Debug("responding to BloomSave") 145 | 146 | hasBloom := (srv.state.BloomHandler != nil) 147 | if !hasBloom { 148 | return &emptypb.Empty{}, nil 149 | } 150 | if srv.state.BloomHandler.BloomFilename == "" { 151 | return &emptypb.Empty{}, fmt.Errorf("filter was not created from file, cannot be saved") 152 | } 153 | err := bloom.WriteFilter(srv.state.BloomHandler.IocBloom, 154 | srv.state.BloomHandler.BloomFilename, 155 | srv.state.BloomHandler.BloomFileIsCompressed) 156 | if err != nil { 157 | return &emptypb.Empty{}, err 158 | } 159 | 160 | return &emptypb.Empty{}, nil 161 | } 162 | 163 | // BloomReload implements the function to reload the Bloom filter currently 164 | // loaded in the FEVER instance from disk. 165 | func (srv *mgmtServer) BloomReload(ctx context.Context, _req *emptypb.Empty) (*emptypb.Empty, error) { 166 | srv.Logger.Debug("responding to BloomReload") 167 | 168 | hasBloom := (srv.state.BloomHandler != nil) 169 | if !hasBloom { 170 | return &emptypb.Empty{}, nil 171 | } 172 | err := srv.state.BloomHandler.Reload() 173 | if err != nil { 174 | return &emptypb.Empty{}, err 175 | } 176 | 177 | return &emptypb.Empty{}, nil 178 | } 179 | 180 | // Alive implements a simple echo command. 181 | func (srv *mgmtServer) Alive(ctx context.Context, req *MgmtAliveRequest) (*MgmtAliveResponse, error) { 182 | return &MgmtAliveResponse{Echo: req.GetAlive()}, nil 183 | } 184 | -------------------------------------------------------------------------------- /fever.yaml: -------------------------------------------------------------------------------- 1 | # Config file for FEVER 2 | # --------------------- 3 | 4 | # Output additional debug information. 5 | # verbose: true 6 | # Enable output of profiling information to specified file. 7 | # profile: profile.out 8 | # Use the given size for defining the size of data blocks to be handled at once. 9 | # chunksize: 50000 10 | # Do not submit data to the sinks, only print on stdout. 11 | # dummy: true 12 | # Retry connection to sockets or servers for at most the given amount of times before 13 | # giving up. Use the value of 0 to never give up. 14 | # reconnect-retries: 5 15 | # Specify time interval or number of items to cache before flushing to 16 | # database, whichever happens first. 17 | # flushtime: 1m 18 | # flushcount: 100000 19 | 20 | # Configuration for PostgreSQL 9.5+ database connection. 21 | database: 22 | enable: false 23 | host: localhost 24 | user: user 25 | password: pass 26 | database: test 27 | # Set to true to use the MongoDB interface instead of PostgreSQL. 28 | mongo: false 29 | # Time interval after which a new table is created and background 30 | # indexing is started. 31 | rotate: 1h 32 | # Maximum size in gigabytes. 33 | maxtablesize: 50 34 | 35 | # Configuration for input (from Suricata side). Only one of 'socket' 36 | # or 'redis' is supported at the same time, comment/uncomment to choose. 37 | input: 38 | # Path to the socket that Suricata writes to. 39 | socket: /tmp/suri.sock 40 | # Buffer length for EVE items parsed from input socket. Useful to help FEVER 41 | # keep up with input from Suricata in case the processing pipeline is 42 | # temporarily slow. 43 | # Will track current buffer size in the `input_queue_length` metric. 44 | buffer: 500000 45 | # Rather drop items from a full buffer than causing writes to the input 46 | # socket to block. 47 | # This avoids congestion effects in Suricata (up to packet drops) if FEVER 48 | # or its forwarding receiver remains slow for a longer period of time. 49 | # Will count the number of dropped items in the `input_queue_dropped` metric. 50 | buffer-drop: true 51 | #redis: 52 | # # Redis server hostname. We assume the 'suricata' list as a source. 53 | # server: localhost 54 | # # Disables Redis pipelining. 55 | # nopipe: true 56 | 57 | # Configure forwarding of events processed by FEVER, i.e. define what event 58 | # types to forward. 59 | multi-forward: 60 | # Set 'all' to true to forward everything received from Suricata, otherwise 61 | # use the 'types' list to choose. Example: 62 | # socketall: 63 | # socket: /tmp/out-all.sock 64 | # buffer-length: 100000 65 | # all: true 66 | # types: [] 67 | socketalerts: 68 | socket: /tmp/suri-forward.sock 69 | all: false 70 | buffer-length: 1000 71 | types: 72 | - alert 73 | - stats 74 | 75 | # Configuration for flow report submission. 76 | flowreport: 77 | # Interval used for aggregation. 78 | interval: 60s 79 | submission-url: amqp://guest:guest@localhost:5672/ 80 | submission-exchange: aggregations 81 | # Set to true to disable gzip compression for uploads. 82 | nocompress: false 83 | # If both srcip and destip are non-empty, inject an extra flow record for 84 | # these towards the given destination port. 85 | #testdata-srcip: 0.0.0.1 86 | #testdata-destip: 0.0.0.2 87 | #testdata-destport: 99999 88 | # Set to true to count _all_ flows, not just TCP bidirectional ones. 89 | all: false 90 | 91 | # Configuration for metrics (i.e. InfluxDB) submission. 92 | metrics: 93 | enable: true 94 | submission-url: amqp://guest:guest@localhost:5672/ 95 | submission-exchange: metrics 96 | 97 | # Configuration for passive DNS submission. 98 | pdns: 99 | enable: true 100 | submission-url: amqp://guest:guest@localhost:5672/ 101 | submission-exchange: pdns 102 | # If test-domain is non-empty, add an extra A observation for this rrname to 103 | # all submissions 104 | #test-domain: heartbeat.fever-heartbeat 105 | 106 | # Configuration for alert-associated metadata submission. 107 | context: 108 | enable: false 109 | cache-timeout: 1h 110 | submission-url: amqp://guest:guest@localhost:5672/ 111 | submission-exchange: context 112 | 113 | # Extra fields to add to each forwarded event. 114 | #add-fields: 115 | # sensor-id: foobar 116 | 117 | # Send 'heartbeat' HTTP or alert event 118 | heartbeat: 119 | enable: false 120 | # 24h HH:MM strings with local times to send heartbeat as HTTP event 121 | times: 122 | - "00:01" 123 | # 24h HH:MM strings with local times to send heartbeat as alert 124 | #alert-times: 125 | # - "00:02" 126 | 127 | # Configuration for detailed flow metadata submission. 128 | flowextract: 129 | enable: false 130 | submission-url: amqp://guest:guest@localhost:5672/ 131 | submission-exchange: aggregations 132 | # Uncomment to enable flow collection only for IPs in the given 133 | # Bloom filter. 134 | # bloom-selector: /tmp/flows.bloom 135 | 136 | # Configuration for Bloom filter alerting on HTTP, DNS and 137 | # TLS metadata events. 138 | #bloom: 139 | # file: ./in.bloom.gz 140 | # zipped: true 141 | # alert-prefix: BLF 142 | # blacklist-iocs: 143 | # - / 144 | # - /index.htm 145 | # - /index.html 146 | 147 | # Configuration for active information gathering. 148 | active: 149 | # Enable reverse DNS lookups for src/dst IPs. 150 | rdns: false 151 | # Only do reverse lookups for RFC 1918 IPs. 152 | rdns-private-only: true 153 | # Duration to cache lookup redults for to avoid excessive DNS load. 154 | rdns-cache-expiry: 120s 155 | 156 | # Configuration for FEVER's log file handling. 157 | logging: 158 | # Insert file name here to redirect logs to separate file. If left blank, logs 159 | # will be printed to the stdout/stderr of the FEVER process. 160 | file: 161 | # Set to true to enable JSON output. 162 | json: false 163 | 164 | # Configuration for FEVER's remote management interface. 165 | mgmt: 166 | # Use local socket for gRPC communication. 167 | socket: /tmp/fever-mgmt.sock 168 | # Use network server for gRPC commmunication. 169 | #network: tcp 170 | #host: localhost:9999 171 | -------------------------------------------------------------------------------- /processing/flow_extractor_test.go: -------------------------------------------------------------------------------- 1 | package processing 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2017, 2019, DCSO GmbH 5 | 6 | import ( 7 | "github.com/NeowayLabs/wabbit" 8 | "github.com/NeowayLabs/wabbit/amqptest" 9 | "github.com/NeowayLabs/wabbit/amqptest/server" 10 | 11 | "github.com/DCSO/bloom" 12 | "github.com/DCSO/fever/types" 13 | "github.com/DCSO/fever/util" 14 | 15 | "bytes" 16 | "fmt" 17 | "math/rand" 18 | "reflect" 19 | "sync" 20 | "testing" 21 | "time" 22 | ) 23 | 24 | const ( 25 | numFlowExtractorEvents = 100000 26 | ) 27 | 28 | func makeFlowExtractorEvent(ipv6 bool) types.Entry { 29 | 30 | protos := []string{"TCP", "UDP"} 31 | n := rand.Int() % len(protos) 32 | 33 | var srcIP, destIP string 34 | if !ipv6 { 35 | srcIP = fmt.Sprintf("10.0.0.%d", rand.Intn(50)) 36 | destIP = fmt.Sprintf("10.0.0.%d", rand.Intn(50)) 37 | } else { 38 | srcIP = "2001:0db8:85a3:0000:0000:8a2e:0370:7334" 39 | destIP = "2001:0db8:85a3:0000:0000:8a2e:0370:7334" 40 | } 41 | 42 | e := types.Entry{ 43 | SrcIP: srcIP, 44 | SrcPort: []int64{1, 2, 3, 4, 5}[rand.Intn(5)], 45 | DestIP: destIP, 46 | DestPort: []int64{11, 12, 13, 14, 15}[rand.Intn(5)], 47 | Timestamp: time.Now().Format(types.SuricataTimestampFormat), 48 | EventType: "flow", 49 | Proto: protos[n], 50 | BytesToClient: int64(rand.Intn(10000)), 51 | BytesToServer: int64(rand.Intn(10000)), 52 | PktsToClient: int64(rand.Intn(100)), 53 | PktsToServer: int64(rand.Intn(100)), 54 | } 55 | return e 56 | } 57 | 58 | func makeBloomFilter() *bloom.BloomFilter { 59 | bf := bloom.Initialize(10000, 1e-10) 60 | for i := 0; i < 10000; i++ { 61 | bf.Add([]byte(fmt.Sprintf("10.0.0.%d", rand.Intn(50)))) 62 | } 63 | bf.Add([]byte("2001:0db8:85a3:0000:0000:8a2e:0370:7334")) 64 | return &bf 65 | } 66 | 67 | func flowExtractorWaitForResults(results *[]string, expectedFlows []types.Entry, 68 | resultsLock *sync.Mutex) []types.FlowEvent { 69 | defer resultsLock.Unlock() 70 | var flows []types.FlowEvent 71 | for { 72 | flows = make([]types.FlowEvent, 0) 73 | resultsLock.Lock() 74 | for i := range *results { 75 | result := (*results)[i] 76 | buffer := bytes.NewBufferString(result) 77 | for { 78 | var fe types.FlowEvent 79 | err := fe.Unmarshal(buffer) 80 | if err != nil { 81 | break 82 | } 83 | flows = append(flows, fe) 84 | } 85 | if len(flows) == len(expectedFlows) { 86 | return flows 87 | } 88 | } 89 | resultsLock.Unlock() 90 | time.Sleep(100 * time.Millisecond) 91 | } 92 | } 93 | 94 | func TestFlowExtractor(t *testing.T) { 95 | serverURL := "amqp://sensor:sensor@127.0.0.1:11111/%2f/" 96 | 97 | // start mock AMQP server 98 | fakeServer := server.NewServer(serverURL) 99 | fakeServer.Start() 100 | defer fakeServer.Stop() 101 | 102 | // set up consumer 103 | results := make([]string, 0) 104 | var resultsLock sync.Mutex 105 | c, err := util.NewConsumer(serverURL, "tdh.flows", "direct", "tdh.flows.testqueue", 106 | "", "", func(d wabbit.Delivery) { 107 | resultsLock.Lock() 108 | results = append(results, string(d.Body())) 109 | resultsLock.Unlock() 110 | }) 111 | if err != nil { 112 | t.Fatal(err) 113 | } 114 | defer c.Shutdown() 115 | 116 | // set up submitter 117 | submitter, err := util.MakeAMQPSubmitterWithReconnector(serverURL, 118 | "tdh.flows", true, func(url string) (wabbit.Conn, error) { 119 | var conn wabbit.Conn 120 | conn, err = amqptest.Dial(url) 121 | return conn, err 122 | }) 123 | if err != nil { 124 | t.Fatal(err) 125 | } 126 | defer submitter.Finish() 127 | 128 | mla, err := MakeFlowExtractor(1*time.Second, 100, "", submitter) 129 | 130 | mla.BloomFilter = makeBloomFilter() 131 | 132 | if err != nil { 133 | t.Fatal(err) 134 | } 135 | 136 | mla.Run() 137 | 138 | expectedFlows := make([]types.Entry, 0) 139 | 140 | for i := 0; i < numFlowExtractorEvents; i++ { 141 | ipv6 := false 142 | //we mix in some IPv6 packets... 143 | if rand.Intn(2) == 0 { 144 | ipv6 = true 145 | } 146 | ev := makeFlowExtractorEvent(ipv6) 147 | err := mla.Consume(&ev) 148 | if err != nil { 149 | t.Fatal(err) 150 | } 151 | if mla.BloomFilter.Check([]byte(ev.SrcIP)) || mla.BloomFilter.Check([]byte(ev.DestIP)) { 152 | expectedFlows = append(expectedFlows, ev) 153 | } 154 | } 155 | 156 | flows := flowExtractorWaitForResults(&results, expectedFlows, &resultsLock) 157 | 158 | stopChan := make(chan bool) 159 | mla.Stop(stopChan) 160 | <-stopChan 161 | 162 | if len(flows) != len(expectedFlows) { 163 | t.Fatalf("Error: Expected %d flows, got %d!", len(expectedFlows), len(flows)) 164 | } 165 | 166 | for i := range flows { 167 | flow := flows[i] 168 | expectedEntry := expectedFlows[i] 169 | var expectedFlow types.FlowEvent 170 | expectedFlow.FromEntry(&expectedEntry) 171 | if !reflect.DeepEqual(flow, expectedFlow) { 172 | t.Errorf("Flows do not match!") 173 | 174 | if flow.Format != expectedFlow.Format { 175 | t.Errorf("Formats do not match!") 176 | } 177 | 178 | if flow.Timestamp != expectedFlow.Timestamp { 179 | t.Errorf("Timestamps do not match!") 180 | } 181 | 182 | if !bytes.Equal(flow.SrcIP, expectedFlow.SrcIP) { 183 | t.Errorf("Source IPs do not match!") 184 | } 185 | 186 | if !bytes.Equal(flow.DestIP, expectedFlow.DestIP) { 187 | t.Errorf("Destination IPs do not match!") 188 | } 189 | 190 | if flow.SrcPort != expectedFlow.SrcPort { 191 | t.Errorf("Source Ports do not match!") 192 | } 193 | 194 | if flow.DestPort != expectedFlow.DestPort { 195 | t.Errorf("Destination Ports do not match!") 196 | } 197 | 198 | if flow.Flags != expectedFlow.Flags { 199 | t.Errorf("Flags do not match!") 200 | } 201 | 202 | if flow.BytesToServer != expectedFlow.BytesToServer { 203 | t.Errorf("BytesToServer do not match!") 204 | } 205 | 206 | if flow.BytesToClient != expectedFlow.BytesToClient { 207 | t.Errorf("BytesToClient do not match!") 208 | } 209 | 210 | if flow.PktsToServer != expectedFlow.PktsToServer { 211 | t.Errorf("PktsToServer do not match!") 212 | } 213 | 214 | if flow.PktsToClient != expectedFlow.PktsToClient { 215 | t.Errorf("PktsToClient do not match!") 216 | } 217 | } 218 | } 219 | 220 | } 221 | -------------------------------------------------------------------------------- /processing/flow_aggregator.go: -------------------------------------------------------------------------------- 1 | package processing 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2017, DCSO GmbH 5 | 6 | import ( 7 | "bytes" 8 | "encoding/json" 9 | "fmt" 10 | "os" 11 | "sync" 12 | "time" 13 | 14 | "github.com/DCSO/fever/types" 15 | "github.com/DCSO/fever/util" 16 | 17 | log "github.com/sirupsen/logrus" 18 | ) 19 | 20 | // FlowAggregatorPerfStats contains performance stats written to InfluxDB 21 | // for monitoring. 22 | type FlowAggregatorPerfStats struct { 23 | FlowAggregateRawCount uint64 `influx:"flow_aggregate_raw_count"` 24 | FlowAggregateCount uint64 `influx:"flow_aggregate_count"` 25 | } 26 | 27 | // AggregatedFlowDetails holds summarized traffic stats for a given 28 | // AggregateFlowEvent. 29 | type AggregatedFlowDetails struct { 30 | PktsToserver int64 `json:"pkts_toserver"` 31 | PktsToclient int64 `json:"pkts_toclient"` 32 | BytesToserver int64 `json:"bytes_toserver"` 33 | BytesToclient int64 `json:"bytes_toclient"` 34 | } 35 | 36 | // AggregateFlowEvent holds aggregated flow data. 37 | type AggregateFlowEvent struct { 38 | Timestamp []string `json:"timestamp"` 39 | EventType string `json:"event_type"` 40 | SrcIP string `json:"src_ip,omitempty"` 41 | SrcPort []int `json:"src_port,omitempty"` 42 | DestIP string `json:"dest_ip,omitempty"` 43 | DestPort int `json:"dest_port,omitempty"` 44 | Flow AggregatedFlowDetails `json:"flow,omitempty"` 45 | } 46 | 47 | // FlowAggregator is an aggregator that groups flows with the same combination 48 | // of srcIP/destIP/destPort. 49 | type FlowAggregator struct { 50 | SensorID string 51 | Count int64 52 | FlowsMutex sync.RWMutex 53 | Flows map[string]*AggregateFlowEvent 54 | PerfStats FlowAggregatorPerfStats 55 | StatsEncoder *util.PerformanceStatsEncoder 56 | FlushPeriod time.Duration 57 | StringBuf bytes.Buffer 58 | DatabaseOutChan chan types.Entry 59 | CloseChan chan bool 60 | ClosedChan chan bool 61 | Logger *log.Entry 62 | } 63 | 64 | // MakeFlowAggregator creates a new empty FlowAggregator. 65 | func MakeFlowAggregator(flushPeriod time.Duration, outChan chan types.Entry) *FlowAggregator { 66 | a := &FlowAggregator{ 67 | FlushPeriod: flushPeriod, 68 | Logger: log.WithFields(log.Fields{ 69 | "domain": "flow_aggregate", 70 | }), 71 | Flows: make(map[string]*AggregateFlowEvent), 72 | DatabaseOutChan: outChan, 73 | CloseChan: make(chan bool), 74 | ClosedChan: make(chan bool), 75 | } 76 | a.SensorID, _ = os.Hostname() 77 | return a 78 | } 79 | 80 | func (a *FlowAggregator) flush() { 81 | a.FlowsMutex.Lock() 82 | myFlows := a.Flows 83 | myCount := a.Count 84 | a.Flows = make(map[string]*AggregateFlowEvent) 85 | a.Count = 0 86 | a.PerfStats.FlowAggregateRawCount = uint64(myCount) 87 | a.PerfStats.FlowAggregateCount = uint64(len(myFlows)) 88 | a.FlowsMutex.Unlock() 89 | if a.StatsEncoder != nil { 90 | a.StatsEncoder.Submit(a.PerfStats) 91 | } 92 | a.Logger.WithFields(log.Fields{ 93 | "agg_flows": a.PerfStats.FlowAggregateCount, 94 | "in_flows": a.PerfStats.FlowAggregateRawCount, 95 | }).Info("flushing events") 96 | for _, v := range myFlows { 97 | jsonString, _ := json.Marshal(v) 98 | newEntry := types.Entry{ 99 | SrcIP: v.SrcIP, 100 | SrcPort: int64(v.SrcPort[0]), 101 | DestIP: v.DestIP, 102 | DestPort: int64(v.DestPort), 103 | Timestamp: v.Timestamp[0], 104 | EventType: v.EventType, 105 | JSONLine: string(jsonString[:]), 106 | } 107 | a.DatabaseOutChan <- newEntry 108 | } 109 | } 110 | 111 | func (a *FlowAggregator) countFlow(key string, e *types.Entry) { 112 | a.FlowsMutex.Lock() 113 | a.Count++ 114 | if _, ok := a.Flows[key]; !ok { 115 | a.Flows[key] = &AggregateFlowEvent{ 116 | Timestamp: []string{e.Timestamp}, 117 | EventType: "flow", 118 | SrcIP: e.SrcIP, 119 | SrcPort: []int{int(e.SrcPort)}, 120 | DestIP: e.DestIP, 121 | DestPort: int(e.DestPort), 122 | Flow: AggregatedFlowDetails{ 123 | PktsToserver: e.PktsToServer, 124 | PktsToclient: e.PktsToClient, 125 | BytesToserver: e.BytesToServer, 126 | BytesToclient: e.BytesToClient, 127 | }, 128 | } 129 | } else { 130 | flow := a.Flows[key] 131 | flow.SrcPort = append(flow.SrcPort, int(e.SrcPort)) 132 | flow.Flow.PktsToserver += e.PktsToServer 133 | flow.Flow.PktsToclient += e.PktsToClient 134 | flow.Flow.BytesToserver += e.BytesToServer 135 | flow.Flow.BytesToclient += e.BytesToClient 136 | } 137 | a.FlowsMutex.Unlock() 138 | } 139 | 140 | // Consume processes an Entry, adding the data within to the internal 141 | // aggregated state 142 | func (a *FlowAggregator) Consume(e *types.Entry) error { 143 | a.StringBuf.Write([]byte(e.SrcIP)) 144 | a.StringBuf.Write([]byte(e.DestIP)) 145 | a.StringBuf.Write([]byte(fmt.Sprint(e.DestPort))) 146 | a.countFlow(a.StringBuf.String(), e) 147 | a.StringBuf.Reset() 148 | return nil 149 | } 150 | 151 | // Run starts the background aggregation service for this handler 152 | func (a *FlowAggregator) Run() { 153 | go func() { 154 | i := 0 * time.Second 155 | for { 156 | select { 157 | case <-a.CloseChan: 158 | close(a.ClosedChan) 159 | return 160 | default: 161 | if i >= a.FlushPeriod { 162 | a.flush() 163 | i = 0 * time.Second 164 | } 165 | time.Sleep(1 * time.Second) 166 | i += 1 * time.Second 167 | } 168 | } 169 | }() 170 | } 171 | 172 | // SubmitStats registers a PerformanceStatsEncoder for runtime stats submission. 173 | func (a *FlowAggregator) SubmitStats(sc *util.PerformanceStatsEncoder) { 174 | a.StatsEncoder = sc 175 | } 176 | 177 | // Stop causes the aggregator to cease aggregating and submitting data 178 | func (a *FlowAggregator) Stop(stopChan chan bool) { 179 | close(a.CloseChan) 180 | <-a.ClosedChan 181 | close(stopChan) 182 | } 183 | 184 | // GetName returns the name of the handler 185 | func (a *FlowAggregator) GetName() string { 186 | return "DB flow aggregator" 187 | } 188 | 189 | // GetEventTypes returns a slice of event type strings that this handler 190 | // should be applied to 191 | func (a *FlowAggregator) GetEventTypes() []string { 192 | return []string{"flow"} 193 | } 194 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to FEVER will be documented in this file. 4 | 5 | ## [1.4.0] - 2025-09-23 6 | 7 | ### Changed 8 | - Move from go-pg PostgreSQL library `github.com/go-pg/pg` to `github.com/jackc/pgx/v4`. 9 | - Add unit tests to PostgreSQL slurper. 10 | - Update dependencies. 11 | 12 | ## [1.3.7] - 2025-04-16 13 | 14 | ### Changed 15 | - Move from deprecated AMQP library `github.com/streadway/amqp` to `github.com/rabbitmq/amqp091-go`. 16 | - Move from deprecated Redis library `github.com/garyburd/redigo` to `github.com/gomodule/redigo`. 17 | - Update dependencies. 18 | 19 | ## [1.3.6] - 2024-07-03 20 | 21 | ### Added 22 | - Add support for sending aggregations from all flows, not just TCP 23 | bidirectional ones. 24 | 25 | ## [1.3.5] - 2023-03-27 26 | 27 | ### Fixed 28 | - Properly handle `null` fields in DNS v2 data (#104) 29 | 30 | ## [1.3.4] - 2022-04-28 31 | 32 | ### Changed 33 | - Log heartbeat creation with Info level (#100) 34 | - Update Go dependency versions (#99) 35 | 36 | ### Removed 37 | - Support for Stenosis (#98) 38 | 39 | ## [1.3.3] - 2022-01-25 40 | 41 | ### Changed 42 | - Fixed handling of JSON `null` values (#97) 43 | 44 | ## [1.3.2] - 2021-12-09 45 | 46 | ### Added 47 | - End-to-end test support 48 | - Add heartbeat alerts to forwarded events (#94) 49 | - Add flow report testdata submission (#93) 50 | - Add passive DNS testdata submission (#92) 51 | - Add option to remove `null` JSON fields when using `fever alertify` (#91) 52 | 53 | ## [1.3.1] - 2021-11-03 54 | 55 | ### Fixed 56 | - Ensure that alertified events also contain added fields (#90) 57 | 58 | ## [1.3.0] - 2021-08-15 59 | 60 | ### Added 61 | - gRPC based infrastructure for remote runtime communication with FEVER process. 62 | - Runtime control tool for Bloom filter matcher `fever bloom` (#86, #85) 63 | 64 | ### Changed 65 | - CI now uses GitHub Actions (#87, #81) 66 | 67 | ## [1.2.0] - 2021-06-25 68 | 69 | ### Added 70 | - Support for multiple output sockets with event type filtering and 71 | buffers (#84) 72 | 73 | ### Changed 74 | - Speed up addition of fields to forwarded EVE-JSON (#83) 75 | 76 | ## [1.1.0] - 2021-06-09 77 | 78 | ### Added 79 | - Support for input buffering (#82) 80 | 81 | ## [1.0.19] - 2021-05-04 82 | 83 | ### Added 84 | - Support Bloom filter matching for TLS fingerprints (#76, #38) 85 | 86 | ### Changed 87 | - Reduce log noise by moving AMQP messages to debug log level (#78) 88 | 89 | ## [1.0.18] - 2021-03-30 90 | 91 | ### Added 92 | - Added `version` subcommand (#73) 93 | 94 | ### Changed 95 | - Prevent deadlock on main event stream during reconnect (#75) 96 | 97 | ## [1.0.17] - 2021-03-04 98 | 99 | ### Changed 100 | - change timestamp handling when alertifying (#72) 101 | 102 | ## [1.0.16] - 2021-02-19 103 | 104 | ### Changed 105 | - Remove potentially blocking calls/locks (#71) 106 | - Use Go modules. 107 | 108 | ## [1.0.15] - 2021-01-22 109 | 110 | ### Changed 111 | - Make sure timestamps created by alertifier match regular Suricata timestamps. 112 | - Ensure FEVER starts up with unreachable AMQP endpoint (#69) 113 | 114 | ## [1.0.14] - 2020-12-04 115 | 116 | ### Added 117 | - Add heartbeat injector (#67) 118 | 119 | ## [1.0.13] - 2020-11-05 120 | 121 | ### Added 122 | - Add flow profiling metrics gathering (#66) 123 | 124 | ## [1.0.12] - 2020-10-13 125 | 126 | ### Added 127 | - Add interface filtering for Stenosis connector (#60) 128 | - Add alertify tool (#62) 129 | 130 | ### Changed 131 | - Various bugfixes (#63, #64) 132 | 133 | ## [1.0.11] - 2020-08-11 134 | 135 | ### Added 136 | - CHANGELOG.md now available. 137 | - Add option to inject arbitrary fields into EVE-JSON (#49) 138 | 139 | ### Changed 140 | 141 | - Various code simplifications and robustness improvements. 142 | 143 | ## [1.0.10] - 2020-06-11 144 | 145 | ### Changed 146 | - Only extend incoming EVE-JSON instead of marshaling into predefined schema. This enables future-proof consistent output of EVE-JSON as there are no assuptions about what fields are present or allowed in the JSON schema (#54) 147 | 148 | ### Fixed 149 | - Some bugfixes (such as race conditions). 150 | 151 | ## [1.0.9] - 2020-05-14 152 | 153 | ### Added 154 | - Support for interacting with an external persistence tool (Stenosis). 155 | 156 | ### Changed 157 | Various cleanups as well as test and code simplifications. 158 | 159 | ## [1.0.8] - 2019-09-19 160 | 161 | ### Added 162 | - Optional collection of metadata bundles (context) for each alert, to be submitted over a separate AMQP connection (#46) 163 | 164 | ### Changed 165 | - Flow IDs are now forwarded as strings to work around potential issues with syslog-ng (#48) 166 | 167 | ## [1.0.7] - 2019-08-06 168 | 169 | ### Fixed 170 | - Bloom filter alerts might not be properly forwarded (cf. rhaist/surevego@b1cf215) 171 | 172 | ## [1.0.6] - 2019-08-02 173 | 174 | ### Added 175 | - Support for active rDNS queries (#36) 176 | - Bloom filter IoC blocking (#44) 177 | 178 | ### Changed 179 | - Do not use explicit types in InfluxDB submissions (#34) 180 | - Distinguish DNS query and answer in Bloom filter alerting (#40) 181 | - Allow AMQP channel multiplexing (#43) 182 | 183 | ### Fixed 184 | - Fix bug causing 100% CPU on AMQP reconnect (#43) 185 | 186 | ## [1.0.5] - 2019-02-14 187 | 188 | ### Added 189 | - Support for more flexible URL Bloom filter matching (#33) 190 | 191 | ### Fixed 192 | - Improved stability of tests w.r.t. run time, see (#32 and #31) 193 | 194 | ## [1.0.4] - 2019-01-25 195 | 196 | ### Added 197 | - Forwarding can be disabled by setting -o to empty string (#22) 198 | - TLS metadata is included in TLS SNI Bloom filter alert (#26) 199 | 200 | ### Fixed 201 | - Tests no longer fail intermittently (#27) 202 | 203 | ### Changed 204 | - All events are sent to the database, not just those unhandled by any additional processors (#29) 205 | 206 | ## [1.0.3] - 2019-01-11 207 | 208 | ### Added 209 | - Support for IP alerting via EVE metadata (#18) 210 | 211 | ### Changed 212 | - Improves robustness of Bloom filter matching by more relaxed handling of corrupted filter input files (#19) 213 | 214 | ## [1.0.2] - 2018-12-11 215 | 216 | ### Added 217 | - Configurable Bloom filter prefixes (#16) 218 | 219 | ## [1.0.1] - 2018-11-12 220 | 221 | ### Added 222 | - `makeman` subcommand 223 | 224 | ### Changed 225 | - Do not fail when no config file can be read. 226 | - Do not use DCSO-specific alert prefixes by default for Bloom filter alerts. 227 | 228 | ## [1.0.0] - 2018-11-09 229 | 230 | First proper open-source release. 231 | -------------------------------------------------------------------------------- /input/input_redis_test.go: -------------------------------------------------------------------------------- 1 | package input 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2017, 2019, DCSO GmbH 5 | 6 | import ( 7 | "encoding/json" 8 | "fmt" 9 | "io/ioutil" 10 | "math/rand" 11 | "os" 12 | "path/filepath" 13 | "sort" 14 | "sync" 15 | "testing" 16 | "time" 17 | 18 | "github.com/DCSO/fever/types" 19 | 20 | "github.com/gomodule/redigo/redis" 21 | log "github.com/sirupsen/logrus" 22 | "github.com/stvp/tempredis" 23 | ) 24 | 25 | const nofRedisTests = 10000 26 | 27 | func makeEveEvent(etype string, number int) string { 28 | eve := types.EveEvent{ 29 | EventType: etype, 30 | FlowID: int64(number), 31 | SrcIP: fmt.Sprintf("10.0.0.%d", number), 32 | SrcPort: []int{11, 12, 13, 14, 15}[rand.Intn(5)], 33 | DestIP: fmt.Sprintf("10.0.0.%d", rand.Intn(50)), 34 | DestPort: []int{11, 12, 13, 14, 15}[rand.Intn(5)], 35 | Proto: []string{"TCP", "UDP"}[rand.Intn(2)], 36 | } 37 | json, err := json.Marshal(eve) 38 | if err != nil { 39 | panic(err) 40 | } 41 | return string(json) 42 | } 43 | 44 | type byID []types.Entry 45 | 46 | func (a byID) Len() int { return len(a) } 47 | func (a byID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 48 | func (a byID) Less(i, j int) bool { 49 | var ie, je types.EveEvent 50 | err := json.Unmarshal([]byte(a[i].JSONLine), &ie) 51 | if err != nil { 52 | log.Fatal(err) 53 | } 54 | err = json.Unmarshal([]byte(a[j].JSONLine), &je) 55 | if err != nil { 56 | log.Fatal(err) 57 | } 58 | return ie.FlowID < je.FlowID 59 | } 60 | 61 | func _TestRedisInput(t *testing.T, usePipelining bool, sock string) { 62 | s, err := tempredis.Start(tempredis.Config{ 63 | "unixsocket": sock, 64 | }) 65 | if err != nil { 66 | t.Fatal(err) 67 | } 68 | defer s.Term() 69 | 70 | client, err := redis.Dial("unix", s.Socket()) 71 | if err != nil { 72 | t.Fatal(err) 73 | } 74 | defer client.Close() 75 | 76 | events := make([]string, nofRedisTests) 77 | 78 | var wg sync.WaitGroup 79 | wg.Add(1) 80 | go func(myWg *sync.WaitGroup) { 81 | defer myWg.Done() 82 | for i := 0; i < nofRedisTests; i++ { 83 | events[i] = makeEveEvent([]string{"http", "dns", "foo"}[rand.Intn(3)], i) 84 | client.Do("LPUSH", "suricata", events[i]) 85 | } 86 | }(&wg) 87 | wg.Wait() 88 | 89 | evChan := make(chan types.Entry) 90 | 91 | coll := make([]types.Entry, 0) 92 | wg.Add(1) 93 | go func(myWg *sync.WaitGroup) { 94 | defer myWg.Done() 95 | i := 0 96 | for e := range evChan { 97 | coll = append(coll, e) 98 | if i == nofRedisTests-1 { 99 | return 100 | } 101 | i++ 102 | } 103 | }(&wg) 104 | 105 | ri, err := MakeRedisInputSocket(s.Socket(), evChan, 500) 106 | ri.UsePipelining = usePipelining 107 | if err != nil { 108 | t.Fatal(err) 109 | } 110 | ri.Run() 111 | 112 | wg.Wait() 113 | 114 | stopChan := make(chan bool) 115 | ri.Stop(stopChan) 116 | <-stopChan 117 | close(evChan) 118 | 119 | sort.Sort(byID(coll)) 120 | 121 | if len(coll) != nofRedisTests { 122 | t.Fatalf("unexpected number of items read from Redis queue: %d != %d", 123 | len(coll), nofRedisTests) 124 | } 125 | for i := 0; i < nofRedisTests; i++ { 126 | var checkEvent types.EveEvent 127 | err := json.Unmarshal([]byte(events[i]), &checkEvent) 128 | if err != nil { 129 | t.Fatal(err) 130 | } 131 | if coll[i].EventType != checkEvent.EventType { 132 | t.Fatalf("wrong event type for test event %d: %s != %s", i, 133 | coll[i].EventType, checkEvent.EventType) 134 | } 135 | } 136 | } 137 | 138 | func TestRedisInputWithPipelining(t *testing.T) { 139 | dir, err := ioutil.TempDir("", "test") 140 | if err != nil { 141 | log.Fatal(err) 142 | } 143 | defer os.RemoveAll(dir) 144 | tmpfn := filepath.Join(dir, "withPipe.sock") 145 | _TestRedisInput(t, true, tmpfn) 146 | } 147 | 148 | func TestRedisInputNoPipelining(t *testing.T) { 149 | dir, err := ioutil.TempDir("", "test") 150 | if err != nil { 151 | log.Fatal(err) 152 | } 153 | defer os.RemoveAll(dir) 154 | tmpfn := filepath.Join(dir, "withPipe.sock") 155 | _TestRedisInput(t, false, tmpfn) 156 | } 157 | 158 | func _TestRedisGone(t *testing.T, usePipelining bool, sock string) { 159 | s, err := tempredis.Start(tempredis.Config{ 160 | "unixsocket": sock, 161 | }) 162 | if err != nil { 163 | t.Fatal(err) 164 | } 165 | 166 | evChan := make(chan types.Entry) 167 | ri, err := MakeRedisInputSocket(s.Socket(), evChan, 500) 168 | ri.UsePipelining = usePipelining 169 | if err != nil { 170 | t.Fatal(err) 171 | } 172 | ri.Run() 173 | 174 | time.Sleep(2 * time.Second) 175 | 176 | s.Term() 177 | 178 | s, err = tempredis.Start(tempredis.Config{ 179 | "unixsocket": sock, 180 | }) 181 | if err != nil { 182 | t.Fatal(err) 183 | } 184 | 185 | client, err := redis.Dial("unix", s.Socket()) 186 | if err != nil { 187 | t.Fatal(err) 188 | } 189 | defer client.Close() 190 | 191 | events := make([]string, nofRedisTests) 192 | 193 | var wg sync.WaitGroup 194 | go func() { 195 | for i := 0; i < nofRedisTests; i++ { 196 | events[i] = makeEveEvent([]string{"http", "dns", "foo"}[rand.Intn(3)], i) 197 | client.Do("LPUSH", "suricata", events[i]) 198 | } 199 | }() 200 | 201 | coll := make([]types.Entry, 0) 202 | wg.Add(1) 203 | go func(myWg *sync.WaitGroup) { 204 | defer myWg.Done() 205 | i := 0 206 | for e := range evChan { 207 | coll = append(coll, e) 208 | if i == nofRedisTests-1 { 209 | return 210 | } 211 | i++ 212 | } 213 | }(&wg) 214 | 215 | wg.Wait() 216 | 217 | stopChan := make(chan bool) 218 | ri.Stop(stopChan) 219 | <-stopChan 220 | close(evChan) 221 | 222 | sort.Sort(byID(coll)) 223 | 224 | if len(coll) != nofRedisTests { 225 | t.Fatalf("unexpected number of items read from Redis queue: %d != %d", 226 | len(coll), nofRedisTests) 227 | } 228 | for i := 0; i < nofRedisTests; i++ { 229 | var checkEvent types.EveEvent 230 | err := json.Unmarshal([]byte(events[i]), &checkEvent) 231 | if err != nil { 232 | t.Fatal(err) 233 | } 234 | if coll[i].EventType != checkEvent.EventType { 235 | t.Fatalf("wrong event type for test event %d: %s != %s", i, 236 | coll[i].EventType, checkEvent.EventType) 237 | } 238 | } 239 | } 240 | 241 | func TestRedisGoneWithPipelining(t *testing.T) { 242 | dir, err := os.MkdirTemp("", "test") 243 | if err != nil { 244 | log.Fatal(err) 245 | } 246 | defer os.RemoveAll(dir) 247 | tmpfn := filepath.Join(dir, "withPipe.sock") 248 | _TestRedisGone(t, true, tmpfn) 249 | } 250 | 251 | func TestRedisGoneNoPipelining(t *testing.T) { 252 | dir, err := os.MkdirTemp("", "test") 253 | if err != nil { 254 | log.Fatal(err) 255 | } 256 | defer os.RemoveAll(dir) 257 | tmpfn := filepath.Join(dir, "withPipe.sock") 258 | _TestRedisGone(t, false, tmpfn) 259 | } 260 | -------------------------------------------------------------------------------- /thirdparty/google/protobuf/timestamp.proto: -------------------------------------------------------------------------------- 1 | // Protocol Buffers - Google's data interchange format 2 | // Copyright 2008 Google Inc. All rights reserved. 3 | // https://developers.google.com/protocol-buffers/ 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions are 7 | // met: 8 | // 9 | // * Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // * Redistributions in binary form must reproduce the above 12 | // copyright notice, this list of conditions and the following disclaimer 13 | // in the documentation and/or other materials provided with the 14 | // distribution. 15 | // * Neither the name of Google Inc. nor the names of its 16 | // contributors may be used to endorse or promote products derived from 17 | // this software without specific prior written permission. 18 | // 19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | syntax = "proto3"; 32 | 33 | package google.protobuf; 34 | 35 | option csharp_namespace = "Google.Protobuf.WellKnownTypes"; 36 | option cc_enable_arenas = true; 37 | option go_package = "github.com/golang/protobuf/ptypes/timestamp"; 38 | option java_package = "com.google.protobuf"; 39 | option java_outer_classname = "TimestampProto"; 40 | option java_multiple_files = true; 41 | option objc_class_prefix = "GPB"; 42 | 43 | // A Timestamp represents a point in time independent of any time zone or local 44 | // calendar, encoded as a count of seconds and fractions of seconds at 45 | // nanosecond resolution. The count is relative to an epoch at UTC midnight on 46 | // January 1, 1970, in the proleptic Gregorian calendar which extends the 47 | // Gregorian calendar backwards to year one. 48 | // 49 | // All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap 50 | // second table is needed for interpretation, using a [24-hour linear 51 | // smear](https://developers.google.com/time/smear). 52 | // 53 | // The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By 54 | // restricting to that range, we ensure that we can convert to and from [RFC 55 | // 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. 56 | // 57 | // # Examples 58 | // 59 | // Example 1: Compute Timestamp from POSIX `time()`. 60 | // 61 | // Timestamp timestamp; 62 | // timestamp.set_seconds(time(NULL)); 63 | // timestamp.set_nanos(0); 64 | // 65 | // Example 2: Compute Timestamp from POSIX `gettimeofday()`. 66 | // 67 | // struct timeval tv; 68 | // gettimeofday(&tv, NULL); 69 | // 70 | // Timestamp timestamp; 71 | // timestamp.set_seconds(tv.tv_sec); 72 | // timestamp.set_nanos(tv.tv_usec * 1000); 73 | // 74 | // Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. 75 | // 76 | // FILETIME ft; 77 | // GetSystemTimeAsFileTime(&ft); 78 | // UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; 79 | // 80 | // // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z 81 | // // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. 82 | // Timestamp timestamp; 83 | // timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); 84 | // timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); 85 | // 86 | // Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. 87 | // 88 | // long millis = System.currentTimeMillis(); 89 | // 90 | // Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) 91 | // .setNanos((int) ((millis % 1000) * 1000000)).build(); 92 | // 93 | // 94 | // Example 5: Compute Timestamp from current time in Python. 95 | // 96 | // timestamp = Timestamp() 97 | // timestamp.GetCurrentTime() 98 | // 99 | // # JSON Mapping 100 | // 101 | // In JSON format, the Timestamp type is encoded as a string in the 102 | // [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the 103 | // format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" 104 | // where {year} is always expressed using four digits while {month}, {day}, 105 | // {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional 106 | // seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), 107 | // are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone 108 | // is required. A proto3 JSON serializer should always use UTC (as indicated by 109 | // "Z") when printing the Timestamp type and a proto3 JSON parser should be 110 | // able to accept both UTC and other timezones (as indicated by an offset). 111 | // 112 | // For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past 113 | // 01:30 UTC on January 15, 2017. 114 | // 115 | // In JavaScript, one can convert a Date object to this format using the 116 | // standard 117 | // [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) 118 | // method. In Python, a standard `datetime.datetime` object can be converted 119 | // to this format using 120 | // [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with 121 | // the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use 122 | // the Joda Time's [`ISODateTimeFormat.dateTime()`]( 123 | // http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D 124 | // ) to obtain a formatter capable of generating timestamps in this format. 125 | // 126 | // 127 | message Timestamp { 128 | // Represents seconds of UTC time since Unix epoch 129 | // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to 130 | // 9999-12-31T23:59:59Z inclusive. 131 | int64 seconds = 1; 132 | 133 | // Non-negative fractions of a second at nanosecond resolution. Negative 134 | // second values with fractions must still have non-negative nanos values 135 | // that count forward in time. Must be from 0 to 999,999,999 136 | // inclusive. 137 | int32 nanos = 2; 138 | } 139 | -------------------------------------------------------------------------------- /processing/context_collector.go: -------------------------------------------------------------------------------- 1 | package processing 2 | 3 | // DCSO FEVER 4 | // Copyright (c) 2019, DCSO GmbH 5 | 6 | import ( 7 | "sync" 8 | "time" 9 | 10 | "github.com/DCSO/fever/types" 11 | "github.com/DCSO/fever/util" 12 | 13 | "github.com/patrickmn/go-cache" 14 | log "github.com/sirupsen/logrus" 15 | ) 16 | 17 | // DebugOutputInterval specifies the amount of cache operations before 18 | // printing the current cache size, in verbose mode. 19 | const DebugOutputInterval = 100000 20 | 21 | // GlobalContextCollector is a shared ContextCollector to be used by FEVER. 22 | var GlobalContextCollector *ContextCollector 23 | 24 | // ContextShipper is a function that processes a slice of Entries that make up a 25 | // context of an alert, e.g. all events that share a flow ID relevant for the 26 | // alert. 27 | type ContextShipper func(Context, *log.Entry) error 28 | 29 | // ContextCollectorPerfStats contains performance stats written to InfluxDB 30 | // for monitoring. 31 | type ContextCollectorPerfStats struct { 32 | Flows uint64 `influx:"context_flows"` 33 | Events uint64 `influx:"context_events"` 34 | JSONBytes uint64 `influx:"context_json_bytes"` 35 | } 36 | 37 | // ContextCollector is a component that maintains a cache of metadata per 38 | // flow ID, forwarding it to a specified sink if associated with an alert. 39 | type ContextCollector struct { 40 | PerfStats ContextCollectorPerfStats 41 | StatsEncoder *util.PerformanceStatsEncoder 42 | StopChan chan bool 43 | StoppedChan chan bool 44 | StopCounterChan chan bool 45 | StoppedCounterChan chan bool 46 | Running bool 47 | StatsLock sync.Mutex 48 | FlowListeners []chan types.Entry 49 | 50 | Cache *cache.Cache 51 | MarkLock sync.Mutex 52 | Marked map[string]struct{} 53 | Logger *log.Entry 54 | i uint64 55 | Ship ContextShipper 56 | } 57 | 58 | // Context is a collection of JSON events that belong to a given flow. 59 | type Context []string 60 | 61 | // MakeContextCollector creates a new ContextCollector. 62 | func MakeContextCollector(shipper ContextShipper, defaultTTL time.Duration) *ContextCollector { 63 | c := &ContextCollector{ 64 | Logger: log.WithFields(log.Fields{ 65 | "domain": "context", 66 | }), 67 | Cache: cache.New(defaultTTL, defaultTTL), 68 | Marked: make(map[string]struct{}), 69 | i: 0, 70 | Ship: shipper, 71 | FlowListeners: make([]chan types.Entry, 0), 72 | } 73 | c.Logger.Debugf("created cache with default TTL %v", defaultTTL) 74 | return c 75 | } 76 | 77 | // Mark queues metadata for a given flow for forwarding, identified by its 78 | // flow ID. 79 | func (c *ContextCollector) Mark(flowID string) { 80 | // when seeing an alert, just mark the flow ID as relevant 81 | c.MarkLock.Lock() 82 | c.Marked[flowID] = struct{}{} 83 | c.MarkLock.Unlock() 84 | } 85 | 86 | // Consume processes an Entry, adding the data within to the internal 87 | // aggregated state 88 | func (c *ContextCollector) Consume(e *types.Entry) error { 89 | var myC Context 90 | // Some events, e.g. stats, have no flow ID set 91 | if e.FlowID == "" { 92 | return nil 93 | } 94 | 95 | cval, exist := c.Cache.Get(e.FlowID) 96 | if exist { 97 | // the 'flow' event always comes last, so we can use it as an 98 | // indicator that the flow is complete and can be processed 99 | if e.EventType == types.EventTypeFlow { 100 | var isMarked bool 101 | c.MarkLock.Lock() 102 | if _, ok := c.Marked[e.FlowID]; ok { 103 | isMarked = true 104 | } 105 | c.MarkLock.Unlock() 106 | if isMarked { 107 | c.StatsLock.Lock() 108 | c.PerfStats.Flows++ 109 | c.PerfStats.Events += uint64(len(cval.(Context))) 110 | for _, v := range cval.(Context) { 111 | c.PerfStats.JSONBytes += uint64(len(v)) 112 | } 113 | c.StatsLock.Unlock() 114 | if c.Ship != nil { 115 | c.Ship(cval.(Context), c.Logger) 116 | } 117 | for _, fl := range c.FlowListeners { 118 | fl <- *e 119 | } 120 | delete(c.Marked, e.FlowID) 121 | } 122 | c.Cache.Delete(e.FlowID) 123 | } else { 124 | myC = cval.(Context) 125 | myC = append(myC, e.JSONLine) 126 | c.Cache.Set(e.FlowID, myC, cache.DefaultExpiration) 127 | } 128 | } else { 129 | if e.EventType != types.EventTypeFlow { 130 | myC = append(myC, e.JSONLine) 131 | c.Cache.Set(e.FlowID, myC, cache.DefaultExpiration) 132 | } 133 | } 134 | c.i++ 135 | if c.i%DebugOutputInterval == 0 { 136 | count := c.Cache.ItemCount() 137 | c.Logger.WithFields(log.Fields{ 138 | "n": count, 139 | }).Debugf("cache size after another %d events", DebugOutputInterval) 140 | c.i = 0 141 | } 142 | return nil 143 | } 144 | 145 | func (c *ContextCollector) runCounter() { 146 | sTime := time.Now() 147 | for { 148 | time.Sleep(500 * time.Millisecond) 149 | select { 150 | case <-c.StopCounterChan: 151 | close(c.StoppedCounterChan) 152 | return 153 | default: 154 | if c.StatsEncoder == nil || time.Since(sTime) < c.StatsEncoder.SubmitPeriod { 155 | continue 156 | } 157 | c.StatsEncoder.Submit(c.PerfStats) 158 | c.StatsLock.Lock() 159 | c.PerfStats.JSONBytes = 0 160 | c.PerfStats.Flows = 0 161 | c.PerfStats.Events = 0 162 | sTime = time.Now() 163 | c.StatsLock.Unlock() 164 | } 165 | } 166 | } 167 | 168 | // GetName returns the name of the handler 169 | func (c *ContextCollector) GetName() string { 170 | return "Context collector" 171 | } 172 | 173 | // GetEventTypes returns a slice of event type strings that this handler 174 | // should be applied to 175 | func (c *ContextCollector) GetEventTypes() []string { 176 | return []string{"*"} 177 | } 178 | 179 | // Run starts the metrics collection and submission in the ContextCollector. 180 | func (c *ContextCollector) Run() { 181 | if !c.Running { 182 | c.StopChan = make(chan bool) 183 | c.StopCounterChan = make(chan bool) 184 | c.StoppedCounterChan = make(chan bool) 185 | go c.runCounter() 186 | c.Running = true 187 | } 188 | } 189 | 190 | // Stop stops the metrics collection and submission in the ContextCollector. 191 | func (c *ContextCollector) Stop(stoppedChan chan bool) { 192 | if c.Running { 193 | close(c.StopCounterChan) 194 | <-c.StoppedCounterChan 195 | c.StoppedChan = stoppedChan 196 | close(c.StopChan) 197 | c.Running = false 198 | } 199 | } 200 | 201 | // SubmitStats registers a PerformanceStatsEncoder for runtime stats submission. 202 | func (c *ContextCollector) SubmitStats(sc *util.PerformanceStatsEncoder) { 203 | c.StatsEncoder = sc 204 | } 205 | 206 | // AddFlowListener registers flowChan as a channel to emit a 'flow' Entry on 207 | // whenever a marked flow is forwarded 208 | func (c *ContextCollector) AddFlowListener(flowChan chan types.Entry) { 209 | c.FlowListeners = append(c.FlowListeners, flowChan) 210 | } 211 | --------------------------------------------------------------------------------