├── .gitignore ├── cmd └── clogger │ ├── build │ └── buildvars.go │ ├── config │ ├── cli.go │ ├── graph.go │ └── config.go │ └── main.go ├── test.tengo ├── internal ├── filters │ ├── interfaces.go │ ├── ratelimit_filter_test.go │ ├── tengo_filter.go │ ├── registry.go │ └── ratelimit_filter.go ├── inputs │ ├── interfaces.go │ ├── parse │ │ ├── interfaces.go │ │ ├── newline_parser.go │ │ ├── json_parser.go │ │ └── newline_parser_test.go │ ├── journald_test.go │ ├── go_input.go │ ├── registry.go │ ├── journald.go │ └── socket.go ├── outputs │ ├── format │ │ ├── json_formatter.go │ │ ├── interfaces.go │ │ └── console_formatter.go │ ├── devnull_output.go │ ├── sender_test.go │ ├── stdout.go │ ├── registry.go │ ├── file_output.go │ ├── interfaces.go │ ├── socket.go │ └── sender.go ├── clogger │ ├── batch_test.go │ ├── messages.go │ └── tls.go ├── metrics │ └── metrics.go ├── pipeline │ ├── pipeline_benchmark_test.go │ ├── pipeline_test.go │ └── pipeline.go └── tracing │ └── api.go ├── config.dot ├── Makefile ├── docs └── things_i_want_in_a_logging_system.md ├── go.mod ├── testutils ├── mock_inputs │ ├── journald.go │ └── inputter.go └── mock_outputs │ └── outputter.go ├── README.md ├── LICENSE └── go.sum /.gitignore: -------------------------------------------------------------------------------- 1 | /clogger 2 | *.out 3 | *.test -------------------------------------------------------------------------------- /cmd/clogger/build/buildvars.go: -------------------------------------------------------------------------------- 1 | package build 2 | 3 | var GitHash string 4 | -------------------------------------------------------------------------------- /test.tengo: -------------------------------------------------------------------------------- 1 | shouldDrop := false 2 | if message["test"] { 3 | shouldDrop = true 4 | } -------------------------------------------------------------------------------- /cmd/clogger/config/cli.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | var CLI struct { 4 | Server struct { 5 | MetricsAddress string `help:"The Address to serve Prometheus Metrics on" default:":4280"` 6 | } `cmd:"" help:"Start the Logging Server" default:"1"` 7 | } 8 | -------------------------------------------------------------------------------- /internal/filters/interfaces.go: -------------------------------------------------------------------------------- 1 | package filters 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/sinkingpoint/clogger/internal/clogger" 7 | ) 8 | 9 | type Filter interface { 10 | Filter(ctx context.Context, msg *clogger.Message) (shouldDrop bool, err error) 11 | } 12 | -------------------------------------------------------------------------------- /config.dot: -------------------------------------------------------------------------------- 1 | digraph pipeline { 2 | Socket [type=tcp parser=newline] 3 | FileOutput [type=file format=console path="cats.txt"] 4 | FilterTests [type=tengo file="test.tengo"] 5 | SocketOutput [type=tcp destination="localhost:8080" format=json] 6 | 7 | Socket -> FilterTests -> SocketOutput 8 | SocketOutput -> FileOutput [type=Buffer] 9 | } 10 | -------------------------------------------------------------------------------- /internal/inputs/interfaces.go: -------------------------------------------------------------------------------- 1 | package inputs 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/sinkingpoint/clogger/internal/clogger" 7 | ) 8 | 9 | // An Inputter is a thing that is able to read messages from somewhere 10 | type Inputter interface { 11 | Init(ctx context.Context) error 12 | GetBatch(ctx context.Context) (*clogger.MessageBatch, error) 13 | Close(ctx context.Context) error 14 | } 15 | 16 | type RecvConfig struct{} 17 | 18 | func NewRecvConfig() RecvConfig { 19 | return RecvConfig{} 20 | } 21 | -------------------------------------------------------------------------------- /internal/outputs/format/json_formatter.go: -------------------------------------------------------------------------------- 1 | package format 2 | 3 | import ( 4 | "encoding/json" 5 | 6 | "github.com/sinkingpoint/clogger/internal/clogger" 7 | ) 8 | 9 | type JSONFormatter struct { 10 | NewlineDelimited bool 11 | } 12 | 13 | func (j *JSONFormatter) Format(m *clogger.Message) ([]byte, error) { 14 | data, err := json.Marshal(m.ParsedFields) 15 | if err != nil { 16 | return nil, err 17 | } 18 | 19 | if j.NewlineDelimited { 20 | data = append(data, byte('\n')) 21 | } 22 | 23 | return data, nil 24 | } 25 | -------------------------------------------------------------------------------- /internal/inputs/parse/interfaces.go: -------------------------------------------------------------------------------- 1 | package parse 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | 8 | "github.com/sinkingpoint/clogger/internal/clogger" 9 | ) 10 | 11 | type InputParser interface { 12 | ParseStream(ctx context.Context, bytes io.ReadCloser, flushChan chan clogger.Message) error 13 | } 14 | 15 | func GetParserFromString(s string, args map[string]string) (InputParser, error) { 16 | switch s { 17 | case "json": 18 | return &JSONParser{}, nil 19 | case "newline": 20 | return &NewlineParser{}, nil 21 | } 22 | 23 | return nil, fmt.Errorf("no formatter named `%s` found", s) 24 | } 25 | -------------------------------------------------------------------------------- /internal/outputs/devnull_output.go: -------------------------------------------------------------------------------- 1 | package outputs 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/sinkingpoint/clogger/internal/clogger" 7 | ) 8 | 9 | type DevNullOutput struct { 10 | SendConfig 11 | } 12 | 13 | func (d *DevNullOutput) GetSendConfig() SendConfig { 14 | return d.SendConfig 15 | } 16 | 17 | // FlushToOutput takes a buffer of messages, and pushes them somewhere 18 | func (d *DevNullOutput) FlushToOutput(ctx context.Context, messages *clogger.MessageBatch) (OutputResult, error) { 19 | return OUTPUT_SUCCESS, nil 20 | } 21 | 22 | func (d *DevNullOutput) Close(ctx context.Context) error { 23 | return nil 24 | } 25 | -------------------------------------------------------------------------------- /internal/clogger/batch_test.go: -------------------------------------------------------------------------------- 1 | package clogger_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/sinkingpoint/clogger/internal/clogger" 7 | ) 8 | 9 | func TestGetMessageBatch(t *testing.T) { 10 | batch := clogger.GetMessageBatch(10) 11 | if batch == nil || cap(batch.Messages) < 10 { 12 | t.Fatal("Failed to get batch of size 10") 13 | } 14 | 15 | if len(batch.Messages) != 0 { 16 | t.Fatal("Batch wasn't reset") 17 | } 18 | 19 | clogger.PutMessageBatch(batch) 20 | 21 | batch = clogger.GetMessageBatch(12) 22 | 23 | if batch == nil || cap(batch.Messages) < 12 { 24 | t.Fatalf("Failed to get batch of size 12 - got size %d", cap(batch.Messages)) 25 | } 26 | 27 | if len(batch.Messages) != 0 { 28 | t.Fatal("Batch wasn't reset") 29 | } 30 | 31 | clogger.PutMessageBatch(batch) 32 | } 33 | -------------------------------------------------------------------------------- /internal/inputs/parse/newline_parser.go: -------------------------------------------------------------------------------- 1 | package parse 2 | 3 | import ( 4 | "bufio" 5 | "context" 6 | "io" 7 | "time" 8 | 9 | "github.com/sinkingpoint/clogger/internal/clogger" 10 | "github.com/sinkingpoint/clogger/internal/tracing" 11 | ) 12 | 13 | type NewlineParser struct{} 14 | 15 | func (j *NewlineParser) ParseStream(ctx context.Context, bytes io.ReadCloser, flushChan chan clogger.Message) error { 16 | _, span := tracing.GetTracer().Start(ctx, "NewlineParser.ParseStream") 17 | defer span.End() 18 | 19 | scanner := bufio.NewScanner(bytes) 20 | for scanner.Scan() { 21 | line := scanner.Text() 22 | flushChan <- clogger.Message{ 23 | MonoTimestamp: time.Now().UnixNano(), 24 | ParsedFields: map[string]interface{}{ 25 | clogger.MESSAGE_FIELD: line, 26 | }, 27 | } 28 | } 29 | 30 | return scanner.Err() 31 | } 32 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | SHELL := /bin/bash 2 | VERSION := $(shell git describe --tags --long --always --dirty="-dev") 3 | VERSION_FLAGS := -ldflags='-X "github.com/sinkingpoint/clogger/cmd/clogger/build.GitHash=$(VERSION)"' 4 | 5 | .PHONY: genmocks 6 | genmocks: 7 | mockgen -source=./internal/inputs/interfaces.go -destination testutils/mock_inputs/inputter.go Inputter 8 | mockgen -source=./internal/inputs/journald.go -destination testutils/mock_inputs/journald.go JournalDReader 9 | mockgen -source=./internal/outputs/interfaces.go -destination testutils/mock_outputs/outputter.go Outputter 10 | 11 | .PHONY: bench 12 | bench: 13 | go test -bench=. -cpuprofile profile_cpu.out ./internal/pipeline 14 | 15 | .PHONY: test 16 | test: 17 | gotip test ./... -timeout 2s 18 | 19 | .PHONY: commit 20 | commit: test 21 | gotip fmt ./... 22 | gotip mod tidy 23 | 24 | .PHONY: build 25 | build: 26 | gotip build $(VERSION_FLAGS) ./cmd/clogger 27 | -------------------------------------------------------------------------------- /internal/inputs/parse/json_parser.go: -------------------------------------------------------------------------------- 1 | package parse 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "io" 7 | "time" 8 | 9 | "github.com/sinkingpoint/clogger/internal/clogger" 10 | "github.com/sinkingpoint/clogger/internal/tracing" 11 | ) 12 | 13 | type JSONParser struct{} 14 | 15 | func (j *JSONParser) ParseStream(ctx context.Context, bytes io.ReadCloser, flushChan chan clogger.Message) error { 16 | _, span := tracing.GetTracer().Start(ctx, "JSONParser.ParseStream") 17 | defer span.End() 18 | 19 | dec := json.NewDecoder(bytes) 20 | for { 21 | rawMessage := map[string]interface{}{} 22 | err := dec.Decode(&rawMessage) 23 | if err != nil { 24 | if err != io.EOF { 25 | span.RecordError(err) 26 | return err 27 | } 28 | break 29 | } 30 | 31 | span.AddEvent("New Message") 32 | 33 | message := clogger.NewMessage() 34 | message.ParsedFields = rawMessage 35 | message.MonoTimestamp = time.Now().UnixNano() 36 | 37 | flushChan <- message 38 | } 39 | 40 | return nil 41 | } 42 | -------------------------------------------------------------------------------- /internal/inputs/parse/newline_parser_test.go: -------------------------------------------------------------------------------- 1 | package parse_test 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "io/ioutil" 7 | "strings" 8 | "testing" 9 | 10 | "github.com/sinkingpoint/clogger/internal/clogger" 11 | "github.com/sinkingpoint/clogger/internal/inputs/parse" 12 | ) 13 | 14 | func TestNewLineParser(t *testing.T) { 15 | data := []string{ 16 | "test", 17 | "test1", 18 | "test2", 19 | } 20 | reader := ioutil.NopCloser(bytes.NewReader([]byte(strings.Join(data, "\n")))) 21 | 22 | parser := parse.NewlineParser{} 23 | c := make(chan clogger.Message, 10) 24 | 25 | err := parser.ParseStream(context.Background(), reader, c) 26 | if err != nil { 27 | t.Fatalf("Error found when parsing input: %s", err.Error()) 28 | } 29 | 30 | close(c) 31 | 32 | numMessages := 0 33 | for msg := range c { 34 | msgField := msg.ParsedFields["message"] 35 | if msgField != data[numMessages] { 36 | t.Errorf("Expected %s, got %s", data[numMessages], msgField) 37 | } 38 | numMessages += 1 39 | } 40 | 41 | if numMessages != len(data) { 42 | t.Fatalf("Expected %d messages, got %d", len(data), numMessages) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /docs/things_i_want_in_a_logging_system.md: -------------------------------------------------------------------------------- 1 | # Things I want in a Logging System 2 | 3 | This is a vague design doc based on what I want in a logging system: 4 | 5 | - _fast_ - should be able to saturate a 1G link assuming input and output have capacity for such production/consumption 6 | - _configurable_ - it should allow arbitrary transformations on the incoming log data 7 | - _safe_ - memory safe, and without the ability to kill itself with ooms etc 8 | 9 | ## Features 10 | 11 | - reading from sources including kafka, udp, tcp (with optional tls), files, directories/globs, and straight from JournalD, Docker, etc 12 | - writing to destinations including elasticsearch, kafka, and the same system 13 | - reading/writing in both JSON and Binary formats (capnp) 14 | - Extention mechanisms with Python to allow full control over log manipulation 15 | - ability to handle arbitary structured messages 16 | - ability to send and handle backpressure if a receiver is overloaded 17 | - configurable buffering destinations, including disk and remote locations 18 | - ability for extentions to report their own metrics, as well as metrics, logs, and distributed tracing support -------------------------------------------------------------------------------- /internal/outputs/format/interfaces.go: -------------------------------------------------------------------------------- 1 | package format 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | 7 | "github.com/sinkingpoint/clogger/internal/clogger" 8 | ) 9 | 10 | type Formatter interface { 11 | Format(m *clogger.Message) ([]byte, error) 12 | } 13 | 14 | func GetFormatterFromString(s string, args map[string]string) (Formatter, error) { 15 | switch s { 16 | case "json": 17 | var err error 18 | newlines := false 19 | if n, ok := args["newlines"]; ok { 20 | newlines, err = strconv.ParseBool(n) 21 | if err != nil { 22 | return nil, fmt.Errorf("invalid bool `%s` for newline delimiting in JSON output - expected true or false", n) 23 | } 24 | } 25 | 26 | return &JSONFormatter{ 27 | NewlineDelimited: newlines, 28 | }, nil 29 | case "console": 30 | var err error 31 | color := false 32 | if c, ok := args["color"]; ok { 33 | color, err = strconv.ParseBool(c) 34 | if err != nil { 35 | return nil, fmt.Errorf("invalid bool `%s` for color in Console output - expected true or false", c) 36 | } 37 | } 38 | 39 | return &ConsoleFormatter{ 40 | color, 41 | }, nil 42 | } 43 | 44 | return nil, fmt.Errorf("no formatter named `%s` found", s) 45 | } 46 | -------------------------------------------------------------------------------- /internal/metrics/metrics.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "net/http" 5 | 6 | "github.com/prometheus/client_golang/prometheus" 7 | "github.com/prometheus/client_golang/prometheus/promhttp" 8 | ) 9 | 10 | var ( 11 | MessagesProcessed = prometheus.NewCounterVec(prometheus.CounterOpts{ 12 | Namespace: "clogger", 13 | Name: "messages_processed", 14 | Help: "A Counter that represents how many messages have passed through the given step in the pipeline", 15 | }, []string{ 16 | "step_name", 17 | "step_type", 18 | }) 19 | 20 | FilterDropped = prometheus.NewCounterVec(prometheus.CounterOpts{ 21 | Namespace: "clogger", 22 | Name: "filter_dropped", 23 | Help: "The number of messages dropped by the given filter", 24 | }, []string{ 25 | "step_name", 26 | }) 27 | 28 | OutputState = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 29 | Namespace: "clogger", 30 | Name: "output_state", 31 | Help: "A Boolean that is 1 when the given output step is in the given state, and zero otherwise", 32 | }, []string{ 33 | "step_name", 34 | "state", 35 | }) 36 | ) 37 | 38 | func InitMetrics(listenAddress string) { 39 | prometheus.MustRegister(MessagesProcessed, FilterDropped, OutputState) 40 | 41 | http.Handle("/metrics", promhttp.Handler()) 42 | go http.ListenAndServe(listenAddress, nil) 43 | } 44 | -------------------------------------------------------------------------------- /internal/outputs/format/console_formatter.go: -------------------------------------------------------------------------------- 1 | package format 2 | 3 | import ( 4 | "fmt" 5 | "runtime" 6 | "strings" 7 | 8 | "github.com/sinkingpoint/clogger/internal/clogger" 9 | ) 10 | 11 | const termReset = "\033[0m" 12 | const termGreen = "\033[32m" 13 | const termCyan = "\033[36m" 14 | 15 | type ConsoleFormatter struct { 16 | color bool 17 | } 18 | 19 | func (j *ConsoleFormatter) ColorOutput(s interface{}, field string) string { 20 | sStr := fmt.Sprint(s) 21 | if !j.color || runtime.GOOS == "windows" { 22 | return sStr 23 | } 24 | 25 | switch field { 26 | case "key": 27 | return fmt.Sprintf("%s%s%s", termGreen, sStr, termReset) 28 | case "message": 29 | return fmt.Sprintf("%s%s%s", termCyan, sStr, termReset) 30 | default: 31 | return sStr 32 | } 33 | } 34 | 35 | func (j *ConsoleFormatter) Format(m *clogger.Message) ([]byte, error) { 36 | parts := make([]string, 0, len(m.ParsedFields)) 37 | 38 | // Hoist the message field to the front 39 | if msg, ok := m.ParsedFields[clogger.MESSAGE_FIELD]; ok { 40 | parts = append(parts, j.ColorOutput(msg, "message")) 41 | } 42 | 43 | for k, v := range m.ParsedFields { 44 | if k == clogger.MESSAGE_FIELD { 45 | continue 46 | } 47 | 48 | parts = append(parts, fmt.Sprintf("%s=%s", j.ColorOutput(k, "key"), j.ColorOutput(v, "value"))) 49 | } 50 | 51 | return []byte(strings.Join(parts, " ")), nil 52 | } 53 | -------------------------------------------------------------------------------- /internal/inputs/journald_test.go: -------------------------------------------------------------------------------- 1 | package inputs_test 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/golang/mock/gomock" 8 | "github.com/sinkingpoint/clogger/internal/clogger" 9 | "github.com/sinkingpoint/clogger/internal/inputs" 10 | "github.com/sinkingpoint/clogger/testutils/mock_inputs" 11 | ) 12 | 13 | func TestJournalDInput(t *testing.T) { 14 | ctrl := gomock.NewController(t) 15 | defer ctrl.Finish() 16 | 17 | mockJournalD := mock_inputs.NewMockJournalDReader(ctrl) 18 | mockJournalD.EXPECT().GetEntry(context.Background()).DoAndReturn(func(ctx context.Context) (clogger.Message, error) { 19 | return clogger.Message{ 20 | MonoTimestamp: 10, 21 | }, nil 22 | }).MinTimes(2) 23 | 24 | flushChan := make(clogger.MessageChannel) 25 | 26 | journalDInput, _ := inputs.NewJournalDInputWithReader(inputs.RecvConfig{}, mockJournalD) 27 | 28 | go func() { 29 | for { 30 | batch, _ := journalDInput.GetBatch(context.Background()) 31 | flushChan <- batch 32 | } 33 | }() 34 | 35 | batch := []clogger.Message{} 36 | messages := <-flushChan 37 | message2 := <-flushChan 38 | batch = append(batch, messages.Messages...) 39 | batch = append(batch, message2.Messages...) 40 | 41 | clogger.PutMessageBatch(messages) 42 | clogger.PutMessageBatch(message2) 43 | 44 | if len(batch) != 2 { 45 | t.Errorf("Expected to fetch two messages, got %d", len(batch)) 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /internal/filters/ratelimit_filter_test.go: -------------------------------------------------------------------------------- 1 | package filters_test 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/sinkingpoint/clogger/internal/clogger" 9 | "github.com/sinkingpoint/clogger/internal/filters" 10 | ) 11 | 12 | func TestRateLimitFilter(t *testing.T) { 13 | filter := filters.NewRateLimitFilter(filters.RateLimitFilterConfig{ 14 | PartitionKey: "test", 15 | Rate: 1, 16 | }) 17 | 18 | if shouldDrop, _ := filter.Filter(context.Background(), &clogger.Message{ 19 | MonoTimestamp: time.Now().UnixNano(), 20 | ParsedFields: map[string]interface{}{ 21 | "test": "a", 22 | }, 23 | }); shouldDrop { 24 | t.Fatal("Message got filtered when it shouldn't have") 25 | } 26 | 27 | // Uses the same key as above so should fail 28 | if shouldDrop, _ := filter.Filter(context.Background(), &clogger.Message{ 29 | MonoTimestamp: time.Now().UnixNano(), 30 | ParsedFields: map[string]interface{}{ 31 | "test": "a", 32 | }, 33 | }); !shouldDrop { 34 | t.Fatal("Message didn't get filtered when it should have") 35 | } 36 | 37 | // Uses a different key, so should pass 38 | if shouldDrop, _ := filter.Filter(context.Background(), &clogger.Message{ 39 | MonoTimestamp: time.Now().UnixNano(), 40 | ParsedFields: map[string]interface{}{ 41 | "test": "b", 42 | }, 43 | }); shouldDrop { 44 | t.Fatal("Message didn't get filtered when it should have") 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /internal/inputs/go_input.go: -------------------------------------------------------------------------------- 1 | package inputs 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/sinkingpoint/clogger/internal/clogger" 8 | "github.com/sinkingpoint/clogger/internal/tracing" 9 | ) 10 | 11 | type GoInput struct { 12 | c chan string 13 | } 14 | 15 | func NewGoInput() *GoInput { 16 | return &GoInput{ 17 | c: make(chan string), 18 | } 19 | } 20 | 21 | func (g *GoInput) Init(ctx context.Context) error { 22 | return nil 23 | } 24 | 25 | func (s *GoInput) Close(ctx context.Context) error { 26 | close(s.c) 27 | return nil 28 | } 29 | 30 | func (s *GoInput) Enqueue(msg string) { 31 | s.c <- msg 32 | } 33 | 34 | func (g *GoInput) GetBatch(ctx context.Context) (*clogger.MessageBatch, error) { 35 | _, span := tracing.GetTracer().Start(ctx, "GoInput.Run") 36 | defer span.End() 37 | 38 | select { 39 | case <-ctx.Done(): 40 | return nil, nil 41 | case msg := <-g.c: 42 | numMessages := len(g.c) 43 | batch := clogger.GetMessageBatch(numMessages + 1) 44 | batch.Messages = append(batch.Messages, clogger.Message{ 45 | MonoTimestamp: time.Now().UnixNano(), 46 | ParsedFields: map[string]interface{}{ 47 | clogger.MESSAGE_FIELD: msg, 48 | }, 49 | }) 50 | 51 | for i := 0; i < numMessages; i++ { 52 | batch.Messages = append(batch.Messages, clogger.Message{ 53 | MonoTimestamp: time.Now().UnixNano(), 54 | ParsedFields: map[string]interface{}{ 55 | clogger.MESSAGE_FIELD: msg, 56 | }, 57 | }) 58 | } 59 | 60 | return batch, nil 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /internal/outputs/sender_test.go: -------------------------------------------------------------------------------- 1 | package outputs_test 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/golang/mock/gomock" 9 | "github.com/sinkingpoint/clogger/internal/clogger" 10 | "github.com/sinkingpoint/clogger/internal/outputs" 11 | "github.com/sinkingpoint/clogger/internal/outputs/format" 12 | "github.com/sinkingpoint/clogger/testutils/mock_outputs" 13 | ) 14 | 15 | // TestSenderFlushesOnFullBuffer tests that when the buffer is full, 16 | // if we queue more messages, then the buffer gets flushed first 17 | func TestSenderFlushesOnFullBuffer(t *testing.T) { 18 | ctrl := gomock.NewController(t) 19 | defer ctrl.Finish() 20 | 21 | mockOutput := mock_outputs.NewMockOutputter(ctrl) 22 | // We expect to flush the buffer exactly once 23 | mockOutput.EXPECT().FlushToOutput(gomock.Any(), gomock.Any()).Times(1) 24 | mockOutput.EXPECT().GetSendConfig().Return(outputs.SendConfig{ 25 | FlushInterval: 10 * time.Second, 26 | BatchSize: 2, 27 | Formatter: &format.JSONFormatter{}, 28 | }).Times(1) 29 | 30 | s := outputs.NewSender("test", mockOutput) 31 | 32 | batch := clogger.GetMessageBatch(2) 33 | batch.Messages = append(batch.Messages, clogger.NewMessage(), clogger.NewMessage()) 34 | 35 | // Fill up the queue 36 | s.QueueMessages(context.Background(), batch) 37 | 38 | batch = clogger.GetMessageBatch(1) 39 | batch.Messages = append(batch.Messages, clogger.NewMessage()) 40 | 41 | // Try and send another message, which should flush the buffer of the previous two messages 42 | s.QueueMessages(context.Background(), batch) 43 | } 44 | -------------------------------------------------------------------------------- /cmd/clogger/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | "os/signal" 6 | "runtime" 7 | "syscall" 8 | 9 | "github.com/alecthomas/kong" 10 | "github.com/rs/zerolog/log" 11 | 12 | "github.com/sinkingpoint/clogger/cmd/clogger/build" 13 | "github.com/sinkingpoint/clogger/cmd/clogger/config" 14 | "github.com/sinkingpoint/clogger/internal/metrics" 15 | "github.com/sinkingpoint/clogger/internal/pipeline" 16 | ) 17 | 18 | func closeHandler(p *pipeline.Pipeline) { 19 | c := make(chan os.Signal) 20 | signal.Notify(c, os.Interrupt, syscall.SIGTERM) 21 | go func() { 22 | <-c 23 | log.Info().Msg("Got SIGTERM, cleanly shutting down pipeline") 24 | p.Kill() 25 | 26 | // Call Goexit instead of os.Exit to run `defer`s 27 | // https://github.com/golang/go/issues/38261#issuecomment-609448473 28 | runtime.Goexit() 29 | }() 30 | } 31 | 32 | func RunServer() { 33 | // tracing.InitTracing(tracing.TracingConfig{ 34 | // ServiceName: "clogger", 35 | // SamplingRate: 0.1, 36 | // }) 37 | 38 | metrics.InitMetrics(config.CLI.Server.MetricsAddress) 39 | 40 | pipeline, err := config.LoadConfigFile("config.dot") 41 | if err != nil { 42 | log.Fatal().Err(err).Msg("Failed to load config") 43 | } 44 | 45 | closeHandler(pipeline) 46 | pipeline.Run() 47 | pipeline.Wait() 48 | } 49 | 50 | func main() { 51 | log.Info().Str("version", build.GitHash).Msg("Started Clogger") 52 | 53 | defer func() { 54 | log.Info().Msg("Clogger exiting...") 55 | }() 56 | 57 | ctx := kong.Parse(&config.CLI) 58 | 59 | switch ctx.Command() { 60 | case "server": 61 | RunServer() 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /internal/pipeline/pipeline_benchmark_test.go: -------------------------------------------------------------------------------- 1 | package pipeline_test 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | "time" 7 | 8 | "github.com/sinkingpoint/clogger/internal/filters" 9 | "github.com/sinkingpoint/clogger/internal/inputs" 10 | "github.com/sinkingpoint/clogger/internal/outputs" 11 | "github.com/sinkingpoint/clogger/internal/outputs/format" 12 | "github.com/sinkingpoint/clogger/internal/pipeline" 13 | "github.com/sinkingpoint/clogger/internal/tracing" 14 | ) 15 | 16 | func BenchmarkPipeline(b *testing.B) { 17 | b.ReportAllocs() 18 | 19 | tracing.InitTracing(tracing.TracingConfig{ 20 | ServiceName: "clogger", 21 | SamplingRate: 1, 22 | }) 23 | 24 | input := inputs.NewGoInput() 25 | outputter := outputs.DevNullOutput{ 26 | SendConfig: outputs.SendConfig{ 27 | FlushInterval: time.Second, 28 | BatchSize: 10, 29 | Formatter: &format.ConsoleFormatter{}, 30 | }, 31 | } 32 | filter, err := filters.NewTengoFilterFromString([]byte(` 33 | shouldDrop := false`)) 34 | 35 | if err != nil { 36 | b.Fatal(err) 37 | } 38 | 39 | pipeline := pipeline.NewPipeline(map[string]inputs.Inputter{ 40 | "test_input": input, 41 | }, map[string]outputs.Outputter{ 42 | "test_output": &outputter, 43 | }, map[string]filters.Filter{ 44 | "test_filter": filter, 45 | }, map[string][]pipeline.Link{ 46 | "test_input": {pipeline.NewLink("test_filter")}, 47 | "test_filter": {pipeline.NewLink("test_output")}, 48 | }) 49 | 50 | pipeline.Run() 51 | 52 | for i := 0; i < b.N; i++ { 53 | input.Enqueue(fmt.Sprintf("input %d", i)) 54 | } 55 | 56 | pipeline.Kill() 57 | } 58 | -------------------------------------------------------------------------------- /internal/clogger/messages.go: -------------------------------------------------------------------------------- 1 | package clogger 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | ) 7 | 8 | const DEFAULT_BATCH_SIZE = 100 9 | const DEFAULT_FLUSH_DURATION = 10 * time.Millisecond 10 | const MESSAGE_FIELD = "message" 11 | 12 | type MessageChannel = chan *MessageBatch 13 | type MessageBatch struct { 14 | Messages []Message 15 | } 16 | 17 | type Message struct { 18 | MonoTimestamp int64 19 | ParsedFields map[string]interface{} 20 | } 21 | 22 | func NewMessage() Message { 23 | return Message{ 24 | MonoTimestamp: time.Now().UnixNano(), 25 | ParsedFields: make(map[string]interface{}), 26 | } 27 | } 28 | 29 | func (m *Message) Reset() { 30 | for k := range m.ParsedFields { 31 | delete(m.ParsedFields, k) 32 | } 33 | } 34 | 35 | var batchPool sync.Pool 36 | 37 | func GetMessageBatch(size int) (batch *MessageBatch) { 38 | b := batchPool.Get() 39 | if b == nil { 40 | b = &MessageBatch{ 41 | Messages: make([]Message, size), 42 | } 43 | } 44 | 45 | batch = b.(*MessageBatch) 46 | if cap(batch.Messages) < size { 47 | batch.Messages = append(batch.Messages[:cap(batch.Messages)], make([]Message, size-cap(batch.Messages))...) 48 | } 49 | 50 | batch.Messages = batch.Messages[:0] 51 | 52 | return batch 53 | } 54 | 55 | func SizeOneBatch(m Message) *MessageBatch { 56 | batch := GetMessageBatch(1) 57 | batch.Messages = append(batch.Messages, m) 58 | 59 | return batch 60 | } 61 | 62 | func CloneBatch(m *MessageBatch) *MessageBatch { 63 | batch := GetMessageBatch(len(m.Messages)) 64 | for _, msg := range m.Messages { 65 | batch.Messages = append(batch.Messages, msg) 66 | } 67 | 68 | return batch 69 | } 70 | 71 | func PutMessageBatch(m *MessageBatch) { 72 | batchPool.Put(m) 73 | } 74 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/sinkingpoint/clogger 2 | 3 | go 1.17 4 | 5 | require ( 6 | github.com/rs/zerolog v1.26.0 7 | go.opentelemetry.io/contrib/propagators v0.21.0 8 | go.opentelemetry.io/otel v1.3.0 9 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0 10 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0 11 | go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.0.0-RC1 12 | go.opentelemetry.io/otel/sdk v1.3.0 13 | go.opentelemetry.io/otel/trace v1.3.0 14 | ) 15 | 16 | require ( 17 | github.com/beorn7/perks v1.0.1 // indirect 18 | github.com/cenkalti/backoff/v4 v4.1.2 // indirect 19 | github.com/cespare/xxhash/v2 v2.1.1 // indirect 20 | github.com/golang/protobuf v1.5.2 // indirect 21 | github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect 22 | github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect 23 | github.com/pkg/errors v0.9.1 // indirect 24 | github.com/prometheus/client_model v0.2.0 // indirect 25 | github.com/prometheus/common v0.26.0 // indirect 26 | github.com/prometheus/procfs v0.6.0 // indirect 27 | go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0 // indirect 28 | go.opentelemetry.io/proto/otlp v0.11.0 // indirect 29 | golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d // indirect 30 | golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e // indirect 31 | golang.org/x/text v0.3.6 // indirect 32 | google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect 33 | google.golang.org/grpc v1.42.0 // indirect 34 | google.golang.org/protobuf v1.27.1 // indirect 35 | ) 36 | 37 | require ( 38 | github.com/alecthomas/kong v0.2.22 39 | github.com/awalterschulze/gographviz v2.0.3+incompatible 40 | github.com/coreos/go-systemd/v22 v22.3.2 41 | github.com/d5/tengo/v2 v2.10.0 42 | github.com/go-logr/logr v1.2.1 // indirect 43 | github.com/go-logr/stdr v1.2.0 // indirect 44 | github.com/golang/mock v1.6.0 45 | github.com/prometheus/client_golang v1.11.0 46 | ) 47 | -------------------------------------------------------------------------------- /internal/clogger/tls.go: -------------------------------------------------------------------------------- 1 | package clogger 2 | 3 | import ( 4 | "crypto/tls" 5 | "crypto/x509" 6 | "fmt" 7 | "io/ioutil" 8 | "net" 9 | ) 10 | 11 | // TLSConfig is a quick config that contains various 12 | // configs that can be used to construct a TLS server 13 | type TLSConfig struct { 14 | cert *tls.Certificate 15 | caCerts *x509.CertPool 16 | } 17 | 18 | func NewTLSConfig(caFile, certPath, keyPath string) (conf TLSConfig, err error) { 19 | if certPath == "" && keyPath != "" { 20 | return conf, fmt.Errorf("invalid cert path - expected both key path _and_ cert path") 21 | } else if certPath != "" && keyPath == "" { 22 | return conf, fmt.Errorf("invalid key path - expected both key path _and_ cert path") 23 | } else if certPath != "" && keyPath != "" { 24 | cert, err := tls.LoadX509KeyPair(certPath, keyPath) 25 | if err != nil { 26 | return conf, err 27 | } 28 | 29 | conf.cert = &cert 30 | } 31 | 32 | if caFile != "" { 33 | cert, err := ioutil.ReadFile(caFile) 34 | if err != nil { 35 | return conf, err 36 | } 37 | conf.caCerts = x509.NewCertPool() 38 | conf.caCerts.AppendCertsFromPEM(cert) 39 | } 40 | 41 | return conf, err 42 | } 43 | 44 | func NewTLSConfigFromRaw(raw map[string]string) (TLSConfig, error) { 45 | caFile := raw["cafile"] 46 | certPath := raw["cert"] 47 | keyPath := raw["key"] 48 | 49 | return NewTLSConfig(caFile, certPath, keyPath) 50 | } 51 | 52 | func (t *TLSConfig) isEnabled() bool { 53 | return t.cert != nil || t.caCerts != nil 54 | } 55 | 56 | // WrapListener returns the given listener wrapped by the TLS config 57 | // If this TLS config is empty, this just returns the given wrapper 58 | func (t *TLSConfig) WrapListener(n net.Listener) net.Listener { 59 | if !t.isEnabled() { 60 | return n 61 | } 62 | 63 | conf := tls.Config{} 64 | 65 | if t.cert != nil { 66 | conf.Certificates = []tls.Certificate{*t.cert} 67 | } 68 | 69 | if t.caCerts != nil { 70 | conf.ClientCAs = t.caCerts 71 | } 72 | 73 | return tls.NewListener(n, &conf) 74 | } 75 | -------------------------------------------------------------------------------- /internal/outputs/stdout.go: -------------------------------------------------------------------------------- 1 | package outputs 2 | 3 | import ( 4 | "context" 5 | "os" 6 | 7 | "github.com/sinkingpoint/clogger/internal/clogger" 8 | "github.com/sinkingpoint/clogger/internal/tracing" 9 | "go.opentelemetry.io/otel/attribute" 10 | ) 11 | 12 | // StdOutputterConfig is a shim around SendConfig for now 13 | // mainly so that we can extend it in the future if necessary 14 | type StdOutputterConfig struct { 15 | SendConfig 16 | } 17 | 18 | // StdOutputter is an Outputter that takes messages from the input stream 19 | // and pushes them to stdout (fd 0) 20 | type StdOutputter struct { 21 | SendConfig 22 | } 23 | 24 | // NewStdOutputter constructs a new StdOutputter from the given Config 25 | func NewStdOutputter(conf StdOutputterConfig) (*StdOutputter, error) { 26 | return &StdOutputter{ 27 | SendConfig: conf.SendConfig, 28 | }, nil 29 | } 30 | 31 | func (s *StdOutputter) GetSendConfig() SendConfig { 32 | return s.SendConfig 33 | } 34 | 35 | func (s *StdOutputter) Close(ctx context.Context) error { 36 | return nil 37 | } 38 | 39 | func (s *StdOutputter) FlushToOutput(ctx context.Context, messages *clogger.MessageBatch) (OutputResult, error) { 40 | _, span := tracing.GetTracer().Start(ctx, "StdOutputter.FlushToOutput") 41 | defer span.End() 42 | 43 | span.SetAttributes(attribute.Int("batch_size", len(messages.Messages))) 44 | 45 | var firstError error 46 | 47 | for _, msg := range messages.Messages { 48 | // Add in the timestamp so that it gets pushed 49 | msg.ParsedFields["auth_timestamp"] = msg.MonoTimestamp 50 | s, err := s.Formatter.Format(&msg) 51 | if err != nil { 52 | if firstError == nil { 53 | firstError = err 54 | } 55 | 56 | continue 57 | } 58 | 59 | // TODO: Pool these byte arrays 60 | os.Stdout.Write(s) 61 | os.Stdout.Write([]byte("\n")) 62 | } 63 | 64 | // OUTPUT_SUCCESS here so that we don't retry - it's likely that the errors are bad data, or a bug in the formatter 65 | // either way, retrying would be pointless 66 | return OUTPUT_SUCCESS, firstError 67 | } 68 | -------------------------------------------------------------------------------- /internal/outputs/registry.go: -------------------------------------------------------------------------------- 1 | package outputs 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | type outputterConstructor = func(rawConf interface{}) (Outputter, error) 8 | type configConstructor = func(map[string]string) (interface{}, error) 9 | 10 | var outputsRegistry = newRegistry() 11 | 12 | func init() { 13 | outputsRegistry.Register("stdout", func(rawConf map[string]string) (interface{}, error) { 14 | conf, err := NewSendConfigFromRaw(rawConf) 15 | if err != nil { 16 | return nil, err 17 | } 18 | 19 | return StdOutputterConfig{ 20 | SendConfig: conf, 21 | }, nil 22 | }, func(rawConf interface{}) (Outputter, error) { 23 | conf, ok := rawConf.(StdOutputterConfig) 24 | if !ok { 25 | return nil, fmt.Errorf("invalid config passed to StdOutputter") 26 | } 27 | 28 | return NewStdOutputter(conf) 29 | }) 30 | } 31 | 32 | type outputterRegistry struct { 33 | constructorRegistry map[string]outputterConstructor 34 | configRegistry map[string]configConstructor 35 | } 36 | 37 | func newRegistry() outputterRegistry { 38 | return outputterRegistry{ 39 | constructorRegistry: make(map[string]outputterConstructor), 40 | configRegistry: make(map[string]configConstructor), 41 | } 42 | } 43 | 44 | func (r *outputterRegistry) Register(name string, configGen configConstructor, constructor outputterConstructor) { 45 | r.constructorRegistry[name] = constructor 46 | r.configRegistry[name] = configGen 47 | } 48 | 49 | func HasConstructorFor(s string) bool { 50 | _, ok := outputsRegistry.configRegistry[s] 51 | return ok 52 | } 53 | 54 | func Construct(name string, config map[string]string) (Outputter, error) { 55 | if configMaker, ok := outputsRegistry.configRegistry[name]; ok { 56 | config, err := configMaker(config) 57 | if err != nil { 58 | return nil, err 59 | } 60 | 61 | if inputMaker, ok := outputsRegistry.constructorRegistry[name]; ok { 62 | return inputMaker(config) 63 | } else { 64 | return nil, fmt.Errorf("failed to find outputter `%s`", name) 65 | } 66 | } else { 67 | return nil, fmt.Errorf("failed to find outputter `%s`", name) 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /testutils/mock_inputs/journald.go: -------------------------------------------------------------------------------- 1 | // Code generated by MockGen. DO NOT EDIT. 2 | // Source: ./internal/inputs/journald.go 3 | 4 | // Package mock_inputs is a generated GoMock package. 5 | package mock_inputs 6 | 7 | import ( 8 | context "context" 9 | reflect "reflect" 10 | 11 | gomock "github.com/golang/mock/gomock" 12 | clogger "github.com/sinkingpoint/clogger/internal/clogger" 13 | ) 14 | 15 | // MockJournalDReader is a mock of JournalDReader interface. 16 | type MockJournalDReader struct { 17 | ctrl *gomock.Controller 18 | recorder *MockJournalDReaderMockRecorder 19 | } 20 | 21 | // MockJournalDReaderMockRecorder is the mock recorder for MockJournalDReader. 22 | type MockJournalDReaderMockRecorder struct { 23 | mock *MockJournalDReader 24 | } 25 | 26 | // NewMockJournalDReader creates a new mock instance. 27 | func NewMockJournalDReader(ctrl *gomock.Controller) *MockJournalDReader { 28 | mock := &MockJournalDReader{ctrl: ctrl} 29 | mock.recorder = &MockJournalDReaderMockRecorder{mock} 30 | return mock 31 | } 32 | 33 | // EXPECT returns an object that allows the caller to indicate expected use. 34 | func (m *MockJournalDReader) EXPECT() *MockJournalDReaderMockRecorder { 35 | return m.recorder 36 | } 37 | 38 | // Close mocks base method. 39 | func (m *MockJournalDReader) Close() { 40 | m.ctrl.T.Helper() 41 | m.ctrl.Call(m, "Close") 42 | } 43 | 44 | // Close indicates an expected call of Close. 45 | func (mr *MockJournalDReaderMockRecorder) Close() *gomock.Call { 46 | mr.mock.ctrl.T.Helper() 47 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockJournalDReader)(nil).Close)) 48 | } 49 | 50 | // GetEntry mocks base method. 51 | func (m *MockJournalDReader) GetEntry(ctx context.Context) (clogger.Message, error) { 52 | m.ctrl.T.Helper() 53 | ret := m.ctrl.Call(m, "GetEntry", ctx) 54 | ret0, _ := ret[0].(clogger.Message) 55 | ret1, _ := ret[1].(error) 56 | return ret0, ret1 57 | } 58 | 59 | // GetEntry indicates an expected call of GetEntry. 60 | func (mr *MockJournalDReaderMockRecorder) GetEntry(ctx interface{}) *gomock.Call { 61 | mr.mock.ctrl.T.Helper() 62 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEntry", reflect.TypeOf((*MockJournalDReader)(nil).GetEntry), ctx) 63 | } 64 | -------------------------------------------------------------------------------- /internal/pipeline/pipeline_test.go: -------------------------------------------------------------------------------- 1 | package pipeline_test 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/golang/mock/gomock" 9 | "github.com/sinkingpoint/clogger/internal/clogger" 10 | "github.com/sinkingpoint/clogger/internal/filters" 11 | "github.com/sinkingpoint/clogger/internal/inputs" 12 | "github.com/sinkingpoint/clogger/internal/outputs" 13 | "github.com/sinkingpoint/clogger/internal/pipeline" 14 | "github.com/sinkingpoint/clogger/testutils/mock_inputs" 15 | "github.com/sinkingpoint/clogger/testutils/mock_outputs" 16 | ) 17 | 18 | func TestPipeline(t *testing.T) { 19 | ctrl := gomock.NewController(t) 20 | defer ctrl.Finish() 21 | 22 | mockInput := mock_inputs.NewMockInputter(ctrl) 23 | mockInput.EXPECT().Close(gomock.Any()).Times(1) 24 | mockInput.EXPECT().Init(gomock.Any()).Times(1) 25 | mockInput.EXPECT().GetBatch(gomock.Any()).DoAndReturn(func(ctx context.Context) (*clogger.MessageBatch, error) { 26 | batch := clogger.GetMessageBatch(3) 27 | batch.Messages = append(batch.Messages, []clogger.Message{ 28 | { 29 | MonoTimestamp: 0, 30 | }, 31 | { 32 | MonoTimestamp: 1, 33 | }, 34 | { 35 | MonoTimestamp: 2, 36 | }, 37 | }...) 38 | 39 | return batch, nil 40 | }).MaxTimes(1) 41 | 42 | mockOutput := mock_outputs.NewMockOutputter(ctrl) 43 | mockOutput.EXPECT().GetSendConfig().Return(outputs.SendConfig{ 44 | FlushInterval: time.Millisecond * 100, 45 | BatchSize: 3, 46 | }).Times(1) 47 | 48 | mockOutput.EXPECT().Close(gomock.Any()).Times(1) 49 | 50 | mockOutput.EXPECT().FlushToOutput(gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, batch *clogger.MessageBatch) (outputs.OutputResult, error) { 51 | if len(batch.Messages) != 3 { 52 | t.Errorf("Buffer wasn't completly flushed - expected 3 messages, got %d", len(batch.Messages)) 53 | } 54 | return outputs.OUTPUT_SUCCESS, nil 55 | }).Times(1) 56 | 57 | pipeline := pipeline.NewPipeline(map[string]inputs.Inputter{ 58 | "test_input": mockInput, 59 | }, map[string]outputs.Outputter{ 60 | "test_output": mockOutput, 61 | }, map[string]filters.Filter{}, map[string][]pipeline.Link{ 62 | "test_input": {pipeline.NewLink("test_output")}, 63 | }) 64 | 65 | pipeline.Run() 66 | pipeline.Kill() 67 | } 68 | -------------------------------------------------------------------------------- /internal/outputs/file_output.go: -------------------------------------------------------------------------------- 1 | package outputs 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | 8 | "github.com/rs/zerolog/log" 9 | "github.com/sinkingpoint/clogger/internal/clogger" 10 | ) 11 | 12 | type FileOutputConfig struct { 13 | SendConfig 14 | Path string 15 | } 16 | 17 | type FileOutput struct { 18 | SendConfig 19 | file *os.File 20 | } 21 | 22 | func newFileOutputConfigFromRaw(rawConf map[string]string) (FileOutputConfig, error) { 23 | conf, err := NewSendConfigFromRaw(rawConf) 24 | if err != nil { 25 | return FileOutputConfig{}, err 26 | } 27 | 28 | if path, ok := rawConf["path"]; ok { 29 | return FileOutputConfig{ 30 | SendConfig: conf, 31 | Path: path, 32 | }, nil 33 | } 34 | 35 | return FileOutputConfig{}, fmt.Errorf("missing `path` required for FileOutput") 36 | } 37 | 38 | func NewFileOutput(conf FileOutputConfig) (*FileOutput, error) { 39 | file, err := os.OpenFile(conf.Path, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0666) 40 | 41 | if err != nil { 42 | return nil, err 43 | } 44 | 45 | return &FileOutput{ 46 | SendConfig: conf.SendConfig, 47 | file: file, 48 | }, nil 49 | } 50 | 51 | func (f *FileOutput) Close(ctx context.Context) error { 52 | return f.file.Close() 53 | } 54 | 55 | func (f *FileOutput) GetSendConfig() SendConfig { 56 | return f.SendConfig 57 | } 58 | 59 | func (f *FileOutput) FlushToOutput(ctx context.Context, messages *clogger.MessageBatch) (OutputResult, error) { 60 | for _, msg := range messages.Messages { 61 | data, err := f.SendConfig.Formatter.Format(&msg) 62 | if err != nil { 63 | log.Warn().Err(err).Msg("Failed to format message") 64 | continue 65 | } 66 | 67 | _, err = f.file.Write(data) 68 | if err != nil { 69 | log.Warn().Err(err).Msg("Failed to write message") 70 | continue 71 | } 72 | } 73 | 74 | return OUTPUT_SUCCESS, nil 75 | } 76 | 77 | func init() { 78 | outputsRegistry.Register("file", func(rawConf map[string]string) (interface{}, error) { 79 | conf, err := newFileOutputConfigFromRaw(rawConf) 80 | if err != nil { 81 | return nil, err 82 | } 83 | 84 | return conf, nil 85 | }, func(conf interface{}) (Outputter, error) { 86 | if c, ok := conf.(FileOutputConfig); ok { 87 | return NewFileOutput(c) 88 | } 89 | 90 | return nil, fmt.Errorf("invalid config passed to file input") 91 | }) 92 | } 93 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Clogger 2 | 3 | Clogger is a WIP version of my idealized logging system. In many ways, it is similar to [syslog-ng](https://github.com/syslog-ng/syslog-ng), [FluentBit](https://fluentbit.io/), or [LogStash](https://www.elastic.co/logstash/), but tailored with features that I want in a logging system. 4 | 5 | ## Features 6 | 7 | Some of the notable features that already exist: 8 | 9 | - Arbitrary filters, written in [Tengo](http://github.com/d5/tengo) 10 | - Deep Observability - Clogger comes with Metrics, Logs, and Tracing out of the box to help debug and monitor pipelines 11 | - Buffer locations - In the case that an output destination is down, buffer outputs can be configured as an alternative location to send messages to 12 | 13 | ## Configuration 14 | 15 | Any valid clogger configuration is also a valid [GraphViz DOT](https://graphviz.org/doc/info/lang.html) file, meaning that you can directly render out your configurations into diagrams of your pipeline. 16 | 17 | Inputs, Outputs, and Filters all form nodes in the graph, with edges being the pipes between them. Properties of a node are defined in the DOT attributes, e.g. to construct a Unix Socket Input (an input that receives data over a UNIX socket), you can use the following (note that attributes with special chars have to be quoted): 18 | 19 | ``` 20 | MyInput [type=unix listen="/run/clogger.sock"] 21 | ``` 22 | 23 | The one mandatory attribute on each node is the `type` attribute - this defines what kind of input, output, or filter it is. Beyond that, each node type can define its own attributes (such as the above Unix input defining a `listen` attribute to determine where to place the socket). 24 | 25 | You could also construct a StdOutput (An output that sends messages to the local stdout) similarly: 26 | 27 | ``` 28 | MyOutput [type=stdout format=console color=true] 29 | ``` 30 | 31 | Note that we've specified that we want to output a Colored Console output instead of the default JSON. 32 | 33 | We can string them together into a complete config like so: 34 | 35 | ``` 36 | digraph pipeline { 37 | MyInput [type=unix listen="/run/clogger/clogger.sock"] 38 | MyOutput [type=stdout format=console color=true] 39 | 40 | MyInput -> MyOutput 41 | } 42 | ``` 43 | 44 | Which creates a Clogger instance that reads data from a Unix socket and writes it to the console 45 | 46 | -------------------------------------------------------------------------------- /internal/filters/tengo_filter.go: -------------------------------------------------------------------------------- 1 | package filters 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io/ioutil" 7 | 8 | "github.com/d5/tengo/v2" 9 | "github.com/sinkingpoint/clogger/internal/clogger" 10 | "github.com/sinkingpoint/clogger/internal/tracing" 11 | ) 12 | 13 | type TengoFilterConfig struct { 14 | Path string 15 | } 16 | 17 | type TengoFilter struct { 18 | compiled *tengo.Compiled 19 | failOpen bool 20 | } 21 | 22 | func NewTengoFilterFromString(s []byte) (*TengoFilter, error) { 23 | script := tengo.NewScript(s) 24 | script.Add("message", nil) 25 | 26 | compiled, err := script.Compile() 27 | if err != nil { 28 | return nil, err 29 | } 30 | 31 | return &TengoFilter{ 32 | compiled: compiled, 33 | }, nil 34 | } 35 | 36 | func NewTengoFilterFromConf(conf TengoFilterConfig) (*TengoFilter, error) { 37 | bytes, err := ioutil.ReadFile(conf.Path) 38 | if err != nil { 39 | return nil, err 40 | } 41 | 42 | return NewTengoFilterFromString(bytes) 43 | } 44 | 45 | func (t *TengoFilter) Filter(ctx context.Context, msg *clogger.Message) (shouldDrop bool, err error) { 46 | ctx, span := tracing.GetTracer().Start(ctx, "TengoFilter.Filter") 47 | defer span.End() 48 | 49 | if err := t.compiled.Set("message", msg.ParsedFields); err != nil { 50 | return t.failOpen, err 51 | } 52 | 53 | _, exeSpan := tracing.GetTracer().Start(ctx, "TengoFilter.Filter") 54 | if err := t.compiled.Run(); err != nil { 55 | return t.failOpen, err 56 | } 57 | exeSpan.End() 58 | 59 | if message := t.compiled.Get("message").Map(); message != nil { 60 | msg.ParsedFields = message 61 | } 62 | 63 | shouldDrop = !t.failOpen 64 | if t.compiled.IsDefined("shouldDrop") { 65 | shouldDrop = t.compiled.Get("shouldDrop").Bool() 66 | } 67 | err = t.compiled.Get("err").Error() 68 | 69 | return shouldDrop, err 70 | } 71 | 72 | func init() { 73 | filtersRegistry.Register("tengo", func(rawConf map[string]string) (interface{}, error) { 74 | if path, ok := rawConf["file"]; ok { 75 | return TengoFilterConfig{ 76 | Path: path, 77 | }, nil 78 | } else { 79 | return nil, fmt.Errorf("missing required configuration `file` for Tengo filter") 80 | } 81 | }, func(rawConf interface{}) (Filter, error) { 82 | if conf, ok := rawConf.(TengoFilterConfig); ok { 83 | return NewTengoFilterFromConf(conf) 84 | } else { 85 | return nil, fmt.Errorf("BUG: invalid type for Tengo filter configuration (expected TengoFilterConfig)") 86 | } 87 | }) 88 | } 89 | -------------------------------------------------------------------------------- /internal/filters/registry.go: -------------------------------------------------------------------------------- 1 | package filters 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | ) 7 | 8 | // configConstructor takes the raw config map and constructs a relevant structured config 9 | // that will be used to construct a Filter in an filterConstructor 10 | type configConstructor = func(rawConf map[string]string) (interface{}, error) 11 | 12 | // filterConstructor is a function that takes a config (outputted from a configConstructor) 13 | // and returns a filter generated from that config 14 | type filterConstructor = func(rawConf interface{}) (Filter, error) 15 | 16 | var filtersRegistry = newRegistry() 17 | 18 | // filterRegistry is a registry of functions that can be used to construct Filters 19 | // from string configs 20 | type filterRegistry struct { 21 | constructorRegistry map[string]filterConstructor 22 | configRegistry map[string]configConstructor 23 | } 24 | 25 | func newRegistry() filterRegistry { 26 | return filterRegistry{ 27 | constructorRegistry: make(map[string]filterConstructor), 28 | configRegistry: make(map[string]configConstructor), 29 | } 30 | } 31 | 32 | // Register is a convenience method that registers the given constructors against the name 33 | // so that we can construct things with those constructors 34 | func (r *filterRegistry) Register(name string, configGen configConstructor, constructor filterConstructor) { 35 | r.constructorRegistry[name] = constructor 36 | r.configRegistry[name] = configGen 37 | } 38 | 39 | // Construct constructs a Filter from the given name and config map, 40 | // returning an error if the name or config is invalid 41 | func Construct(name string, config map[string]string) (Filter, error) { 42 | name = strings.ToLower(name) 43 | if configMaker, ok := filtersRegistry.configRegistry[name]; ok { 44 | // Construction is in two steps, because this makes the code a bit cleaner 45 | 46 | // First, parse the unstructured config into a proper struct 47 | // This allows us to do all the validation up front and not in the constructor of the actual object 48 | config, err := configMaker(config) 49 | if err != nil { 50 | return nil, err 51 | } 52 | 53 | // Second, use that config struct to construct the actual filter 54 | if filterMaker, ok := filtersRegistry.constructorRegistry[name]; ok { 55 | return filterMaker(config) 56 | } else { 57 | return nil, fmt.Errorf("failed to find filter `%s`", name) 58 | } 59 | } else { 60 | return nil, fmt.Errorf("failed to find filter `%s`", name) 61 | } 62 | } 63 | 64 | func HasConstructorFor(name string) bool { 65 | _, ok := filtersRegistry.configRegistry[name] 66 | return ok 67 | } 68 | -------------------------------------------------------------------------------- /internal/inputs/registry.go: -------------------------------------------------------------------------------- 1 | package inputs 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | ) 7 | 8 | // configConstructor takes the raw config map and constructs a relevant structured config 9 | // that will be used to construct an Inputter in an inputterConstructor 10 | type configConstructor = func(rawConf map[string]string) (interface{}, error) 11 | 12 | // inputterConstructor is a function that takes a config (outputted from a configConstructor) 13 | // and returns an inputter generated from that config 14 | type inputterConstructor = func(rawConf interface{}) (Inputter, error) 15 | 16 | var inputsRegistry = newRegistry() 17 | 18 | // inputterRegistry is a registry of functions that can be used to construct Inputters 19 | // from string configs 20 | type inputterRegistry struct { 21 | constructorRegistry map[string]inputterConstructor 22 | configRegistry map[string]configConstructor 23 | } 24 | 25 | func newRegistry() inputterRegistry { 26 | return inputterRegistry{ 27 | constructorRegistry: make(map[string]inputterConstructor), 28 | configRegistry: make(map[string]configConstructor), 29 | } 30 | } 31 | 32 | // Register is a convenience method that registers the given constructors against the name 33 | // so that we can construct things with those constructors 34 | func (r *inputterRegistry) Register(name string, configGen configConstructor, constructor inputterConstructor) { 35 | r.constructorRegistry[name] = constructor 36 | r.configRegistry[name] = configGen 37 | } 38 | 39 | func HasConstructorFor(name string) bool { 40 | _, ok := inputsRegistry.configRegistry[name] 41 | return ok 42 | } 43 | 44 | // Construct constructs an Inputter from the given name and config map, 45 | // returning an error if the name or config is invalid 46 | func Construct(name string, config map[string]string) (Inputter, error) { 47 | name = strings.ToLower(name) 48 | if configMaker, ok := inputsRegistry.configRegistry[name]; ok { 49 | // Construction is in two steps, because this makes the code a bit cleaner 50 | 51 | // First, parse the unstructured config into a proper struct 52 | // This allows us to do all the validation up front and not in the constructor of the actual object 53 | config, err := configMaker(config) 54 | if err != nil { 55 | return nil, err 56 | } 57 | 58 | // Second, use that config struct to construct the actual inputter 59 | if inputMaker, ok := inputsRegistry.constructorRegistry[name]; ok { 60 | return inputMaker(config) 61 | } else { 62 | return nil, fmt.Errorf("failed to find inputter `%s`", name) 63 | } 64 | } else { 65 | return nil, fmt.Errorf("failed to find inputter `%s`", name) 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /testutils/mock_inputs/inputter.go: -------------------------------------------------------------------------------- 1 | // Code generated by MockGen. DO NOT EDIT. 2 | // Source: ./internal/inputs/interfaces.go 3 | 4 | // Package mock_inputs is a generated GoMock package. 5 | package mock_inputs 6 | 7 | import ( 8 | context "context" 9 | reflect "reflect" 10 | 11 | gomock "github.com/golang/mock/gomock" 12 | clogger "github.com/sinkingpoint/clogger/internal/clogger" 13 | ) 14 | 15 | // MockInputter is a mock of Inputter interface. 16 | type MockInputter struct { 17 | ctrl *gomock.Controller 18 | recorder *MockInputterMockRecorder 19 | } 20 | 21 | // MockInputterMockRecorder is the mock recorder for MockInputter. 22 | type MockInputterMockRecorder struct { 23 | mock *MockInputter 24 | } 25 | 26 | // NewMockInputter creates a new mock instance. 27 | func NewMockInputter(ctrl *gomock.Controller) *MockInputter { 28 | mock := &MockInputter{ctrl: ctrl} 29 | mock.recorder = &MockInputterMockRecorder{mock} 30 | return mock 31 | } 32 | 33 | // EXPECT returns an object that allows the caller to indicate expected use. 34 | func (m *MockInputter) EXPECT() *MockInputterMockRecorder { 35 | return m.recorder 36 | } 37 | 38 | // Close mocks base method. 39 | func (m *MockInputter) Close(ctx context.Context) error { 40 | m.ctrl.T.Helper() 41 | ret := m.ctrl.Call(m, "Close", ctx) 42 | ret0, _ := ret[0].(error) 43 | return ret0 44 | } 45 | 46 | // Close indicates an expected call of Close. 47 | func (mr *MockInputterMockRecorder) Close(ctx interface{}) *gomock.Call { 48 | mr.mock.ctrl.T.Helper() 49 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockInputter)(nil).Close), ctx) 50 | } 51 | 52 | // GetBatch mocks base method. 53 | func (m *MockInputter) GetBatch(ctx context.Context) (*clogger.MessageBatch, error) { 54 | m.ctrl.T.Helper() 55 | ret := m.ctrl.Call(m, "GetBatch", ctx) 56 | ret0, _ := ret[0].(*clogger.MessageBatch) 57 | ret1, _ := ret[1].(error) 58 | return ret0, ret1 59 | } 60 | 61 | // GetBatch indicates an expected call of GetBatch. 62 | func (mr *MockInputterMockRecorder) GetBatch(ctx interface{}) *gomock.Call { 63 | mr.mock.ctrl.T.Helper() 64 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBatch", reflect.TypeOf((*MockInputter)(nil).GetBatch), ctx) 65 | } 66 | 67 | // Init mocks base method. 68 | func (m *MockInputter) Init(ctx context.Context) error { 69 | m.ctrl.T.Helper() 70 | ret := m.ctrl.Call(m, "Init", ctx) 71 | ret0, _ := ret[0].(error) 72 | return ret0 73 | } 74 | 75 | // Init indicates an expected call of Init. 76 | func (mr *MockInputterMockRecorder) Init(ctx interface{}) *gomock.Call { 77 | mr.mock.ctrl.T.Helper() 78 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Init", reflect.TypeOf((*MockInputter)(nil).Init), ctx) 79 | } 80 | -------------------------------------------------------------------------------- /testutils/mock_outputs/outputter.go: -------------------------------------------------------------------------------- 1 | // Code generated by MockGen. DO NOT EDIT. 2 | // Source: ./internal/outputs/interfaces.go 3 | 4 | // Package mock_outputs is a generated GoMock package. 5 | package mock_outputs 6 | 7 | import ( 8 | context "context" 9 | reflect "reflect" 10 | 11 | gomock "github.com/golang/mock/gomock" 12 | clogger "github.com/sinkingpoint/clogger/internal/clogger" 13 | outputs "github.com/sinkingpoint/clogger/internal/outputs" 14 | ) 15 | 16 | // MockOutputter is a mock of Outputter interface. 17 | type MockOutputter struct { 18 | ctrl *gomock.Controller 19 | recorder *MockOutputterMockRecorder 20 | } 21 | 22 | // MockOutputterMockRecorder is the mock recorder for MockOutputter. 23 | type MockOutputterMockRecorder struct { 24 | mock *MockOutputter 25 | } 26 | 27 | // NewMockOutputter creates a new mock instance. 28 | func NewMockOutputter(ctrl *gomock.Controller) *MockOutputter { 29 | mock := &MockOutputter{ctrl: ctrl} 30 | mock.recorder = &MockOutputterMockRecorder{mock} 31 | return mock 32 | } 33 | 34 | // EXPECT returns an object that allows the caller to indicate expected use. 35 | func (m *MockOutputter) EXPECT() *MockOutputterMockRecorder { 36 | return m.recorder 37 | } 38 | 39 | // Close mocks base method. 40 | func (m *MockOutputter) Close(ctx context.Context) error { 41 | m.ctrl.T.Helper() 42 | ret := m.ctrl.Call(m, "Close", ctx) 43 | ret0, _ := ret[0].(error) 44 | return ret0 45 | } 46 | 47 | // Close indicates an expected call of Close. 48 | func (mr *MockOutputterMockRecorder) Close(ctx interface{}) *gomock.Call { 49 | mr.mock.ctrl.T.Helper() 50 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockOutputter)(nil).Close), ctx) 51 | } 52 | 53 | // FlushToOutput mocks base method. 54 | func (m *MockOutputter) FlushToOutput(ctx context.Context, messages *clogger.MessageBatch) (outputs.OutputResult, error) { 55 | m.ctrl.T.Helper() 56 | ret := m.ctrl.Call(m, "FlushToOutput", ctx, messages) 57 | ret0, _ := ret[0].(outputs.OutputResult) 58 | ret1, _ := ret[1].(error) 59 | return ret0, ret1 60 | } 61 | 62 | // FlushToOutput indicates an expected call of FlushToOutput. 63 | func (mr *MockOutputterMockRecorder) FlushToOutput(ctx, messages interface{}) *gomock.Call { 64 | mr.mock.ctrl.T.Helper() 65 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FlushToOutput", reflect.TypeOf((*MockOutputter)(nil).FlushToOutput), ctx, messages) 66 | } 67 | 68 | // GetSendConfig mocks base method. 69 | func (m *MockOutputter) GetSendConfig() outputs.SendConfig { 70 | m.ctrl.T.Helper() 71 | ret := m.ctrl.Call(m, "GetSendConfig") 72 | ret0, _ := ret[0].(outputs.SendConfig) 73 | return ret0 74 | } 75 | 76 | // GetSendConfig indicates an expected call of GetSendConfig. 77 | func (mr *MockOutputterMockRecorder) GetSendConfig() *gomock.Call { 78 | mr.mock.ctrl.T.Helper() 79 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSendConfig", reflect.TypeOf((*MockOutputter)(nil).GetSendConfig)) 80 | } 81 | -------------------------------------------------------------------------------- /internal/tracing/api.go: -------------------------------------------------------------------------------- 1 | package tracing 2 | 3 | import ( 4 | "context" 5 | "os" 6 | 7 | "github.com/rs/zerolog/log" 8 | "go.opentelemetry.io/contrib/propagators/b3" 9 | "go.opentelemetry.io/otel" 10 | "go.opentelemetry.io/otel/exporters/otlp/otlptrace" 11 | "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" 12 | "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" 13 | "go.opentelemetry.io/otel/propagation" 14 | "go.opentelemetry.io/otel/sdk/resource" 15 | tracesdk "go.opentelemetry.io/otel/sdk/trace" 16 | semconv "go.opentelemetry.io/otel/semconv/v1.4.0" 17 | "go.opentelemetry.io/otel/trace" 18 | ) 19 | 20 | const TRACING_INSTRUMENTATION_NAME = "clogger" 21 | 22 | func GetTracer() trace.Tracer { 23 | return otel.Tracer(TRACING_INSTRUMENTATION_NAME) 24 | } 25 | 26 | type TracingConfig struct { 27 | // The name of the service that is doing tracing 28 | ServiceName string 29 | 30 | // If true, only log spans, don't send them off (Default: false) 31 | Debug bool 32 | 33 | // The rate at which to sample (0 - 1), where 0 (default) is no sampling (send no spans) 34 | // and 1 is send all the spans 35 | SamplingRate float64 36 | 37 | // The span propagation format (Defaults to B2) 38 | Propagator propagation.TextMapPropagator 39 | } 40 | 41 | func InitTracing(config TracingConfig) *tracesdk.TracerProvider { 42 | var sampler tracesdk.Sampler 43 | 44 | // Treat > 1 as 1 and < 0 as 0 45 | if config.SamplingRate >= 1 { 46 | sampler = tracesdk.AlwaysSample() 47 | } else if config.SamplingRate <= 0 { 48 | sampler = tracesdk.NeverSample() 49 | } else { 50 | sampler = tracesdk.TraceIDRatioBased(config.SamplingRate) 51 | } 52 | 53 | // Default the propagator to B3 54 | if config.Propagator == nil { 55 | config.Propagator = b3.B3{} 56 | } 57 | 58 | otel.SetTextMapPropagator(config.Propagator) 59 | 60 | apiKey := os.Getenv("HONEYCOMB_API_KEY") 61 | dataset := os.Getenv("HONEYCOMB_DATASET") 62 | otlpEndpoint := os.Getenv("OTLP_ENDPOINT") 63 | var exporter tracesdk.SpanExporter 64 | var err error 65 | if apiKey != "" && dataset != "" && !config.Debug { 66 | log.Info().Bool("debug", config.Debug).Msg("Initializing honeycomb tracing") 67 | client := otlptracehttp.NewClient( 68 | otlptracehttp.WithHeaders( 69 | map[string]string{ 70 | "x-honeycomb-team": apiKey, 71 | "x-honeycomb-dataset": dataset, 72 | }, 73 | ), 74 | otlptracehttp.WithEndpoint("api.honeycomb.io:443"), 75 | ) 76 | exporter, err = otlptrace.New(context.Background(), client) 77 | } else if otlpEndpoint != "" && !config.Debug { 78 | log.Info().Bool("debug", config.Debug).Msg("Initializing generic otlp tracing") 79 | client := otlptracehttp.NewClient( 80 | otlptracehttp.WithEndpoint(otlpEndpoint), 81 | otlptracehttp.WithInsecure(), 82 | ) 83 | exporter, err = otlptrace.New(context.Background(), client) 84 | } else { 85 | log.Info().Bool("debug", config.Debug).Msg("Initializing stdout tracing") 86 | exporter, err = stdouttrace.New(stdouttrace.WithPrettyPrint()) 87 | } 88 | 89 | if err != nil { 90 | log.Info().Bool("debug", config.Debug).Err(err).Msg("Error initializing tracing") 91 | } 92 | 93 | tp := tracesdk.NewTracerProvider( 94 | tracesdk.WithBatcher(exporter), 95 | tracesdk.WithSampler(sampler), 96 | tracesdk.WithResource(resource.NewWithAttributes( 97 | semconv.SchemaURL, 98 | semconv.ServiceNameKey.String(config.ServiceName), 99 | )), 100 | ) 101 | 102 | otel.SetTracerProvider(tp) 103 | 104 | return tp 105 | } 106 | -------------------------------------------------------------------------------- /cmd/clogger/config/graph.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | ) 7 | 8 | type node struct { 9 | name string 10 | attrs map[string]string 11 | } 12 | 13 | type edge struct { 14 | from string 15 | to string 16 | attrs map[string]string 17 | } 18 | 19 | type ConfigGraph struct { 20 | name string 21 | attrs map[string]string 22 | subGraphs map[string]ConfigGraph 23 | nodes map[string]node 24 | edges []edge 25 | } 26 | 27 | func newConfigGraph() ConfigGraph { 28 | return ConfigGraph{ 29 | name: "", 30 | attrs: make(map[string]string), 31 | subGraphs: make(map[string]ConfigGraph), 32 | nodes: make(map[string]node), 33 | edges: make([]edge, 0, 10), 34 | } 35 | } 36 | 37 | func (c *ConfigGraph) SetStrict(strict bool) error { 38 | return nil 39 | } 40 | 41 | func (c *ConfigGraph) SetDir(directed bool) error { 42 | return nil 43 | } 44 | 45 | func (c *ConfigGraph) SetName(name string) error { 46 | c.name = name 47 | return nil 48 | } 49 | 50 | func (c *ConfigGraph) AddPortEdge(src, srcPort, dst, dstPort string, directed bool, attrs map[string]string) error { 51 | return c.AddEdge(src, dst, directed, attrs) 52 | } 53 | 54 | func (c *ConfigGraph) AddEdge(src, dst string, directed bool, attrs map[string]string) error { 55 | if !directed { 56 | return fmt.Errorf("edges in the Config Graph must be directed") 57 | } 58 | 59 | c.edges = append(c.edges, edge{ 60 | from: src, 61 | to: dst, 62 | attrs: attrs, 63 | }) 64 | 65 | return nil 66 | } 67 | 68 | func (c *ConfigGraph) AddNode(parentGraph string, name string, attrs map[string]string) error { 69 | if parentGraph == c.name { 70 | if _, ok := c.nodes[name]; ok { 71 | return fmt.Errorf("config graph already contains a node called `%s`", name) 72 | } 73 | 74 | for i := range attrs { 75 | attrs[i] = strings.Trim(attrs[i], "\"") 76 | } 77 | 78 | c.nodes[name] = node{ 79 | name, 80 | attrs, 81 | } 82 | } else { 83 | // Node must be in a subgraph 84 | // NOTE: This only supports one level of nesting 85 | if sub, ok := c.subGraphs[parentGraph]; ok { 86 | sub.AddNode(parentGraph, name, attrs) 87 | } else { 88 | return fmt.Errorf("failed to find subgraph `%s` to add node", parentGraph) 89 | } 90 | } 91 | 92 | return nil 93 | } 94 | 95 | func (c *ConfigGraph) AddAttr(parentGraph string, field, value string) error { 96 | if parentGraph == c.name { 97 | if _, ok := c.attrs[field]; ok { 98 | return fmt.Errorf("graph already has an attribute `%s`", field) 99 | } 100 | 101 | value := strings.Trim(value, "\"") 102 | 103 | c.attrs[field] = value 104 | } else { 105 | if sub, ok := c.subGraphs[parentGraph]; ok { 106 | sub.AddAttr(parentGraph, field, value) 107 | } else { 108 | return fmt.Errorf("failed to find subgraph `%s` to add node", parentGraph) 109 | } 110 | } 111 | return nil 112 | } 113 | 114 | func (c *ConfigGraph) AddSubGraph(parentGraph string, name string, attrs map[string]string) error { 115 | if parentGraph == c.name { 116 | if _, ok := c.attrs[name]; ok { 117 | return fmt.Errorf("graph already has an subgraph `%s`", name) 118 | } 119 | 120 | graph := newConfigGraph() 121 | graph.name = name 122 | graph.attrs = attrs 123 | 124 | c.subGraphs[name] = graph 125 | } else { 126 | // We only support one level of nesting for now, error if we're trying to add a subgraph to a subgraph 127 | return fmt.Errorf("clogger only supports one level of subgraph nesting in configs") 128 | } 129 | return nil 130 | } 131 | 132 | func (c *ConfigGraph) String() string { 133 | return "" 134 | } 135 | -------------------------------------------------------------------------------- /internal/filters/ratelimit_filter.go: -------------------------------------------------------------------------------- 1 | package filters 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strconv" 7 | "sync" 8 | "time" 9 | 10 | "github.com/sinkingpoint/clogger/internal/clogger" 11 | ) 12 | 13 | const MICROSECS_PER_SEC = 1_000_000 14 | 15 | type TokenBucket struct { 16 | tokens int 17 | rate int 18 | tokensLock sync.Mutex 19 | lastCheck time.Time 20 | } 21 | 22 | func (t *TokenBucket) AddNewTokens() { 23 | t.tokensLock.Lock() 24 | defer t.tokensLock.Unlock() 25 | 26 | uSecsSinceLastCheck := time.Since(t.lastCheck).Microseconds() 27 | newTokens := uSecsSinceLastCheck * int64(t.rate) / MICROSECS_PER_SEC 28 | 29 | if newTokens > 0 { 30 | t.tokens += int(newTokens) 31 | 32 | if t.tokens > t.rate { 33 | t.tokens = t.rate 34 | } 35 | 36 | t.lastCheck = time.Now() 37 | } 38 | } 39 | 40 | func (t *TokenBucket) TryConsumeTokens(numTokens int) (withinRatelimit bool) { 41 | t.tokensLock.Lock() 42 | defer t.tokensLock.Unlock() 43 | 44 | if t.tokens >= numTokens { 45 | t.tokens -= numTokens 46 | return true 47 | } 48 | 49 | return false 50 | } 51 | 52 | func NewTokenBucket(rate int, startFull bool) *TokenBucket { 53 | tokens := 0 54 | if startFull { 55 | tokens = rate 56 | } 57 | 58 | return &TokenBucket{ 59 | tokens: tokens, 60 | rate: rate, 61 | tokensLock: sync.Mutex{}, 62 | lastCheck: time.Now(), 63 | } 64 | } 65 | 66 | type RateLimitFilterConfig struct { 67 | PartitionKey string 68 | Rate int 69 | } 70 | 71 | func NewRateLimitFilterConfigFromRaw(raw map[string]string) (RateLimitFilterConfig, error) { 72 | var partitionKey string 73 | if key, ok := raw["partition_key"]; ok { 74 | partitionKey = key 75 | } else { 76 | return RateLimitFilterConfig{}, fmt.Errorf("missing `partition_key` in RateLimitFilter") 77 | } 78 | 79 | var rate int 80 | if rateStr, ok := raw["rate"]; ok { 81 | if val, err := strconv.Atoi(rateStr); err == nil { 82 | if val <= 0 { 83 | return RateLimitFilterConfig{}, fmt.Errorf("invalid rate in RateLimitFilter - expected a positive int, got %d", val) 84 | } 85 | rate = val 86 | } else { 87 | return RateLimitFilterConfig{}, fmt.Errorf("invalid rate in RateLimitFilter - expected an int, got `%s`", rateStr) 88 | } 89 | } else { 90 | return RateLimitFilterConfig{}, fmt.Errorf("missing `rate` in RateLimitFilter") 91 | } 92 | 93 | return RateLimitFilterConfig{ 94 | Rate: rate, 95 | PartitionKey: partitionKey, 96 | }, nil 97 | } 98 | 99 | type RateLimitFilter struct { 100 | RateLimitFilterConfig 101 | buckets map[string]*TokenBucket 102 | bucketsLock sync.Mutex 103 | } 104 | 105 | func NewRateLimitFilter(conf RateLimitFilterConfig) *RateLimitFilter { 106 | return &RateLimitFilter{ 107 | RateLimitFilterConfig: conf, 108 | buckets: make(map[string]*TokenBucket), 109 | bucketsLock: sync.Mutex{}, 110 | } 111 | } 112 | 113 | func (r *RateLimitFilter) Filter(ctx context.Context, msg *clogger.Message) (shouldDrop bool, err error) { 114 | key := fmt.Sprint(msg.ParsedFields[r.PartitionKey]) 115 | r.bucketsLock.Lock() 116 | defer r.bucketsLock.Unlock() 117 | 118 | tokenBucket := r.buckets[key] 119 | if tokenBucket == nil { 120 | r.buckets[key] = NewTokenBucket(r.Rate, true) 121 | tokenBucket = r.buckets[key] 122 | } 123 | tokenBucket.AddNewTokens() 124 | hadTokenForMsg := tokenBucket.TryConsumeTokens(1) 125 | 126 | return !hadTokenForMsg, nil 127 | } 128 | 129 | func init() { 130 | filtersRegistry.Register("ratelimit", func(rawConf map[string]string) (interface{}, error) { 131 | return NewRateLimitFilterConfigFromRaw(rawConf) 132 | }, func(rawConf interface{}) (Filter, error) { 133 | if conf, ok := rawConf.(RateLimitFilterConfig); ok { 134 | return NewRateLimitFilter(conf), nil 135 | } else { 136 | return nil, fmt.Errorf("BUG: invalid type for RateLimit filter configuration (expected RateLimitFilterConfig)") 137 | } 138 | }) 139 | } 140 | -------------------------------------------------------------------------------- /internal/outputs/interfaces.go: -------------------------------------------------------------------------------- 1 | package outputs 2 | 3 | import ( 4 | "context" 5 | "strconv" 6 | "time" 7 | 8 | "github.com/rs/zerolog/log" 9 | "github.com/sinkingpoint/clogger/internal/clogger" 10 | "github.com/sinkingpoint/clogger/internal/outputs/format" 11 | ) 12 | 13 | const DEFAULT_BATCH_SIZE = 1000 14 | const DEFAULT_FLUSH_INTERVAL = time.Millisecond * 100 15 | 16 | type OutputResult int 17 | 18 | const ( 19 | // OUTPUT_SUCCESS indicates that the data was sucessfully sent to the output 20 | OUTPUT_SUCCESS OutputResult = iota 21 | 22 | // OUTPUT_TRANSIENT_FAILURE indicates that we failed to send data to the output, but should retry (with exponential backoff) 23 | OUTPUT_TRANSIENT_FAILURE 24 | 25 | // OUTPUT_LONG_FAILURE indicates that we failed to send data to the output and we should 26 | // buffer it to the buffer destination, if configured - this failure is likely to take a while to resolve 27 | OUTPUT_LONG_FAILURE 28 | ) 29 | 30 | // allOutputs is a convenience map to take results and return their strings 31 | // so that we can iterate all the possible OutputResults for metrics 32 | var allOutputs = map[OutputResult]string{ 33 | OUTPUT_SUCCESS: OUTPUT_SUCCESS.ToString(), 34 | OUTPUT_TRANSIENT_FAILURE: OUTPUT_TRANSIENT_FAILURE.ToString(), 35 | OUTPUT_LONG_FAILURE: OUTPUT_LONG_FAILURE.ToString(), 36 | } 37 | 38 | func (o OutputResult) ToString() string { 39 | switch o { 40 | case OUTPUT_SUCCESS: 41 | return "success" 42 | case OUTPUT_TRANSIENT_FAILURE: 43 | return "transient_failure" 44 | case OUTPUT_LONG_FAILURE: 45 | return "long_failure" 46 | } 47 | 48 | log.Fatal().Int("output_result", int(o)).Msg("Missing implementation of `ToString` for OutputResult") 49 | return "NOT_IMPLEMENTED" 50 | } 51 | 52 | // SendConfig is a config that specifies the base fields 53 | // for all outputs 54 | type SendConfig struct { 55 | // FlushInterval is the maximum time to buffer messages before outputting 56 | FlushInterval time.Duration 57 | 58 | // BatchSize is the maximum number of messages to store in the buffer before outputting 59 | BatchSize int 60 | 61 | // Formatter is the method that converts Messages into byte streams to be piped downstream 62 | Formatter format.Formatter 63 | } 64 | 65 | // NewSendConfigFromRaw is a convenience method to construct SendConfigs from raw configs 66 | // that might have been loaded from things like the config file 67 | func NewSendConfigFromRaw(rawConf map[string]string) (SendConfig, error) { 68 | conf := SendConfig{ 69 | FlushInterval: DEFAULT_FLUSH_INTERVAL, 70 | BatchSize: DEFAULT_BATCH_SIZE, 71 | Formatter: &format.JSONFormatter{}, 72 | } 73 | 74 | var err error 75 | if s, ok := rawConf["flush_interval"]; ok { 76 | conf.FlushInterval, err = time.ParseDuration(s) 77 | if err != nil { 78 | return SendConfig{}, err 79 | } 80 | } 81 | 82 | if s, ok := rawConf["batch_size"]; ok { 83 | conf.BatchSize, err = strconv.Atoi(s) 84 | if err != nil { 85 | return SendConfig{}, err 86 | } 87 | } 88 | 89 | if s, ok := rawConf["format"]; ok { 90 | conf.Formatter, err = format.GetFormatterFromString(s, rawConf) 91 | if err != nil { 92 | return SendConfig{}, err 93 | } 94 | } 95 | 96 | return conf, nil 97 | } 98 | 99 | // An Outputter is a thing that can take messages and push them somewhere else 100 | type Outputter interface { 101 | // GetSendConfig returns the base send config of this Outputter 102 | GetSendConfig() SendConfig 103 | 104 | // FlushToOutput takes a buffer of messages, and pushes them somewhere 105 | FlushToOutput(ctx context.Context, messages *clogger.MessageBatch) (OutputResult, error) 106 | 107 | Close(ctx context.Context) error 108 | } 109 | 110 | // StartOutputter starts up a go routine that handles all the input to the given output + buffering etc 111 | func StartOutputter(name string, inputChan clogger.MessageChannel, send Outputter, bufferChannel clogger.MessageChannel) { 112 | s := NewSender(name, send) 113 | s.BufferChannel = bufferChannel 114 | ticker := time.NewTicker(s.FlushInterval) 115 | outer: 116 | for { 117 | select { 118 | case <-ticker.C: 119 | s.Flush(context.Background(), false) 120 | case batch, ok := <-inputChan: 121 | if !ok { 122 | break outer 123 | } 124 | s.QueueMessages(context.Background(), batch) 125 | } 126 | } 127 | 128 | s.Flush(context.Background(), true) 129 | s.sender.Close(context.Background()) 130 | } 131 | -------------------------------------------------------------------------------- /internal/outputs/socket.go: -------------------------------------------------------------------------------- 1 | package outputs 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "net" 8 | "os" 9 | 10 | "github.com/rs/zerolog/log" 11 | "github.com/sinkingpoint/clogger/internal/clogger" 12 | "github.com/sinkingpoint/clogger/internal/inputs" 13 | ) 14 | 15 | type SocketOutputType int 16 | 17 | const ( 18 | UNIX_SOCKET_OUTPUT SocketOutputType = iota 19 | TCP_SOCKET_OUTPUT 20 | ) 21 | 22 | func (s SocketOutputType) ToString() string { 23 | switch s { 24 | case UNIX_SOCKET_OUTPUT: 25 | return "unix" 26 | case TCP_SOCKET_OUTPUT: 27 | return "tcp" 28 | } 29 | 30 | log.Panic().Int("type", int(s)).Msg("BUG: Unimplemented ToString for a SocketOutputType") 31 | return "unreachable" 32 | } 33 | 34 | type SocketOutputConfig struct { 35 | SendConfig 36 | ListenAddr string 37 | TLS *clogger.TLSConfig 38 | Type SocketOutputType 39 | } 40 | 41 | func parseSocketConfigFromRaw(rawConf map[string]string, ty SocketOutputType) (SocketOutputConfig, error) { 42 | var err error 43 | var destination string 44 | 45 | conf, err := NewSendConfigFromRaw(rawConf) 46 | if err != nil { 47 | return SocketOutputConfig{}, err 48 | } 49 | 50 | if path, ok := rawConf["destination"]; ok { 51 | destination = path 52 | } else { 53 | switch ty { 54 | case UNIX_SOCKET_OUTPUT: 55 | destination = inputs.DEFAULT_SOCKET_PATH 56 | case TCP_SOCKET_OUTPUT: 57 | destination = inputs.DEFAULT_LISTEN_ADDR 58 | } 59 | } 60 | 61 | tls, err := clogger.NewTLSConfigFromRaw(rawConf) 62 | if err != nil { 63 | return SocketOutputConfig{}, err 64 | } 65 | 66 | return SocketOutputConfig{ 67 | SendConfig: conf, 68 | ListenAddr: destination, 69 | Type: ty, 70 | TLS: &tls, 71 | }, nil 72 | } 73 | 74 | type socketOutput struct { 75 | conf SocketOutputConfig 76 | conn net.Conn 77 | } 78 | 79 | func NewSocketOutput(c SocketOutputConfig) *socketOutput { 80 | return &socketOutput{ 81 | conf: c, 82 | } 83 | } 84 | 85 | func (s *socketOutput) reconnect() bool { 86 | network := s.conf.Type.ToString() 87 | conn, err := net.Dial(network, s.conf.ListenAddr) 88 | if err != nil { 89 | s.conn = nil 90 | return false 91 | } 92 | 93 | s.conn = conn 94 | return true 95 | } 96 | 97 | func (s *socketOutput) Close(ctx context.Context) error { 98 | if s.conn != nil { 99 | err := s.conn.Close() 100 | s.conn = nil 101 | return err 102 | } 103 | 104 | return nil 105 | } 106 | 107 | func (s *socketOutput) GetSendConfig() SendConfig { 108 | return s.conf.SendConfig 109 | } 110 | 111 | func (s *socketOutput) FlushToOutput(ctx context.Context, messages *clogger.MessageBatch) (OutputResult, error) { 112 | if s.conn == nil { 113 | if !s.reconnect() { 114 | return OUTPUT_TRANSIENT_FAILURE, nil 115 | } 116 | } 117 | 118 | dataBuffer := []byte{} 119 | 120 | for _, msg := range messages.Messages { 121 | data, err := s.conf.Formatter.Format(&msg) 122 | if err != nil { 123 | log.Warn().Err(err).Msg("Failed to format message") 124 | continue 125 | } 126 | 127 | dataBuffer = append(dataBuffer, data...) 128 | } 129 | 130 | _, err := s.conn.Write(dataBuffer) 131 | if err != nil { 132 | s.conn = nil 133 | return OUTPUT_TRANSIENT_FAILURE, nil 134 | } 135 | 136 | return OUTPUT_SUCCESS, nil 137 | } 138 | 139 | func init() { 140 | outputsRegistry.Register("unix", func(rawConf map[string]string) (interface{}, error) { 141 | conf, err := parseSocketConfigFromRaw(rawConf, UNIX_SOCKET_OUTPUT) 142 | if err != nil { 143 | return nil, err 144 | } 145 | 146 | return conf, nil 147 | }, func(conf interface{}) (Outputter, error) { 148 | if c, ok := conf.(SocketOutputConfig); ok { 149 | 150 | if _, err := os.Stat(c.ListenAddr); !errors.Is(err, os.ErrNotExist) { 151 | // Delete the existing socket so we can remake it 152 | log.Info().Str("socket_path", c.ListenAddr).Msg("Cleaning up left behind socket") 153 | if err = os.Remove(c.ListenAddr); err != nil { 154 | return nil, err 155 | } 156 | } 157 | 158 | return NewSocketOutput(c), nil 159 | } 160 | 161 | return nil, fmt.Errorf("invalid config passed to socket input") 162 | }) 163 | 164 | outputsRegistry.Register("tcp", func(rawConf map[string]string) (interface{}, error) { 165 | conf, err := parseSocketConfigFromRaw(rawConf, TCP_SOCKET_OUTPUT) 166 | if err != nil { 167 | return nil, err 168 | } 169 | 170 | return conf, nil 171 | }, func(conf interface{}) (Outputter, error) { 172 | if c, ok := conf.(SocketOutputConfig); ok { 173 | return NewSocketOutput(c), nil 174 | } 175 | 176 | return nil, fmt.Errorf("invalid config passed to socket input") 177 | }) 178 | } 179 | -------------------------------------------------------------------------------- /cmd/clogger/config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | 7 | "github.com/awalterschulze/gographviz" 8 | "github.com/rs/zerolog/log" 9 | "github.com/sinkingpoint/clogger/internal/filters" 10 | "github.com/sinkingpoint/clogger/internal/inputs" 11 | "github.com/sinkingpoint/clogger/internal/outputs" 12 | "github.com/sinkingpoint/clogger/internal/pipeline" 13 | ) 14 | 15 | func LoadConfigFile(path string) (*pipeline.Pipeline, error) { 16 | body, err := ioutil.ReadFile(path) 17 | if err != nil { 18 | return nil, err 19 | } 20 | 21 | graphAst, err := gographviz.ParseString(string(body)) 22 | if err != nil { 23 | return nil, err 24 | } 25 | 26 | configGraph := newConfigGraph() 27 | if err := gographviz.Analyse(graphAst, &configGraph); err != nil { 28 | return nil, err 29 | } 30 | 31 | return configGraph.ToPipeline() 32 | } 33 | 34 | func (c *ConfigGraph) ToPipeline() (*pipeline.Pipeline, error) { 35 | if len(c.edges) == 0 { 36 | log.Warn().Msg("No connectors in this pipeline. It wont do anything") 37 | } 38 | 39 | inputsMemoize := make(map[string]inputs.Inputter) 40 | outputsMemoize := make(map[string]outputs.Outputter) 41 | filtersMemoize := make(map[string]filters.Filter) 42 | pipes := make(map[string][]pipeline.Link) 43 | for i := range c.edges { 44 | edge := c.edges[i] 45 | 46 | _, hasInput := inputsMemoize[edge.from] 47 | _, hasFilter := filtersMemoize[edge.from] 48 | 49 | // TODO @sinkingpoint: This is full of duplication and should be refactored 50 | ty := edge.attrs["type"] 51 | if ty == "Buffer" { 52 | if _, ok := outputsMemoize[edge.from]; !ok { 53 | fromData := c.nodes[edge.from] 54 | if ty, ok := fromData.attrs["type"]; ok { 55 | if outputs.HasConstructorFor(ty) { 56 | to, err := outputs.Construct(ty, fromData.attrs) 57 | if err != nil { 58 | return nil, err 59 | } 60 | 61 | outputsMemoize[fromData.name] = to 62 | } else { 63 | return nil, fmt.Errorf("no output type called `%s`", ty) 64 | } 65 | } else { 66 | return nil, fmt.Errorf("node `%s` is missing a `type` attribute", edge.from) 67 | } 68 | } 69 | 70 | toData := c.nodes[edge.to] 71 | if ty, ok := toData.attrs["type"]; ok { 72 | if outputs.HasConstructorFor(ty) { 73 | to, err := outputs.Construct(ty, toData.attrs) 74 | if err != nil { 75 | return nil, err 76 | } 77 | 78 | outputsMemoize[toData.name] = to 79 | } else { 80 | return nil, fmt.Errorf("no output type called `%s`", ty) 81 | } 82 | } else { 83 | return nil, fmt.Errorf("node `%s` is missing a `type` attribute", edge.from) 84 | } 85 | 86 | pipes[edge.from] = append(pipes[edge.from], pipeline.Link{ 87 | To: edge.to, 88 | Type: pipeline.LINK_TYPE_BUFFER, 89 | }) 90 | } else { 91 | if !hasFilter && !hasInput { 92 | fromData := c.nodes[edge.from] 93 | if ty, ok := fromData.attrs["type"]; ok { 94 | if inputs.HasConstructorFor(ty) { 95 | from, err := inputs.Construct(ty, fromData.attrs) 96 | if err != nil { 97 | return nil, err 98 | } 99 | 100 | inputsMemoize[fromData.name] = from 101 | } else if filters.HasConstructorFor(ty) { 102 | from, err := filters.Construct(ty, fromData.attrs) 103 | if err != nil { 104 | return nil, err 105 | } 106 | 107 | filtersMemoize[fromData.name] = from 108 | } else { 109 | return nil, fmt.Errorf("no such type type `%s`", ty) 110 | } 111 | } else { 112 | return nil, fmt.Errorf("node `%s` is missing a `type` attribute", edge.from) 113 | } 114 | } 115 | 116 | if _, ok := outputsMemoize[edge.to]; !ok { 117 | toData := c.nodes[edge.to] 118 | if ty, ok := toData.attrs["type"]; ok { 119 | if outputs.HasConstructorFor(ty) { 120 | to, err := outputs.Construct(ty, toData.attrs) 121 | if err != nil { 122 | return nil, err 123 | } 124 | 125 | outputsMemoize[toData.name] = to 126 | } else if filters.HasConstructorFor(ty) { 127 | to, err := filters.Construct(ty, toData.attrs) 128 | if err != nil { 129 | return nil, err 130 | } 131 | 132 | filtersMemoize[toData.name] = to 133 | } else { 134 | return nil, fmt.Errorf("no output or filter type called `%s`", ty) 135 | } 136 | } else { 137 | return nil, fmt.Errorf("node `%s` is missing a `type` attribute", edge.from) 138 | } 139 | } 140 | 141 | pipes[edge.from] = append(pipes[edge.from], pipeline.Link{ 142 | To: edge.to, 143 | Type: pipeline.LINK_TYPE_NORMAL, 144 | }) 145 | } 146 | } 147 | 148 | return pipeline.NewPipeline(inputsMemoize, outputsMemoize, filtersMemoize, pipes), nil 149 | } 150 | -------------------------------------------------------------------------------- /internal/inputs/journald.go: -------------------------------------------------------------------------------- 1 | package inputs 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/coreos/go-systemd/v22/sdjournal" 8 | "github.com/sinkingpoint/clogger/internal/clogger" 9 | "github.com/sinkingpoint/clogger/internal/tracing" 10 | ) 11 | 12 | // JournalDReader is an interface that reads messages off the JournalD stream 13 | type JournalDReader interface { 14 | // GetEntry reads a single message off of the end of the queue 15 | GetEntry(ctx context.Context) (clogger.Message, error) 16 | 17 | // Close is provided to clean up any sockets or anything when we exit 18 | Close() 19 | } 20 | 21 | // coreOSJournalDReader is the only journalDReader that we provide at the moment 22 | // it uses github.com/coreos/go-systemd to read off of the JournalD stream 23 | type coreOSJournalDReader struct { 24 | reader *sdjournal.Journal 25 | } 26 | 27 | // newCoreOSJournalDReader attempts to open a new reader on the journalD stream 28 | // erroring if we fail (e.g. if we're not on a systemd machine) 29 | func newCoreOSJournalDReader() (*coreOSJournalDReader, error) { 30 | reader, err := sdjournal.NewJournal() 31 | if err != nil { 32 | return nil, err 33 | } 34 | 35 | // SeekTail to push us to the end of the queue so that we only get new messages 36 | // and don't double count old ones 37 | err = reader.SeekTail() 38 | if err != nil { 39 | reader.Close() 40 | return nil, err 41 | } 42 | 43 | return &coreOSJournalDReader{ 44 | reader: reader, 45 | }, nil 46 | } 47 | 48 | func (c *coreOSJournalDReader) Close() { 49 | c.reader.Close() 50 | } 51 | 52 | func (c *coreOSJournalDReader) GetEntry(ctx context.Context) (clogger.Message, error) { 53 | _, span := tracing.GetTracer().Start(ctx, "CoreOSJournalDReader.GetEntry") 54 | defer span.End() 55 | 56 | // Attempt to progress to the next thing in the queue 57 | i, err := c.reader.Next() 58 | 59 | if err != nil { 60 | return clogger.Message{}, err 61 | } 62 | 63 | // If there wasn't anything for us to read, just wait until there is 64 | for i <= 0 { 65 | // Indefinitely wait until we have a new message 66 | // Note: This blocks. Need to work out how to interrupt it 67 | c.reader.Wait(sdjournal.IndefiniteWait) 68 | i, err = c.reader.Next() 69 | 70 | if err != nil { 71 | return clogger.Message{}, err 72 | } 73 | } 74 | span.AddEvent("Finished Waiting") 75 | 76 | // Read the new message 77 | entry, err := c.reader.GetEntry() 78 | if err != nil { 79 | return clogger.Message{}, err 80 | } 81 | 82 | // Eugh. Turn the map[string]string into a map[string]interface{} 83 | // Should find a better way to do this that doesn't require a whole reallocation of the map 84 | m2 := make(map[string]interface{}, len(entry.Fields)) 85 | for k, v := range entry.Fields { 86 | m2[k] = v 87 | } 88 | 89 | return clogger.Message{ 90 | MonoTimestamp: int64(entry.MonotonicTimestamp * 1000), 91 | ParsedFields: m2, 92 | }, nil 93 | } 94 | 95 | // JournalDInput is an Input that reads off of the JournalD stream 96 | type JournalDInput struct { 97 | RecvConfig 98 | reader JournalDReader 99 | } 100 | 101 | // NewJournalDInput constructs a JournalDInput with the given RecvConf 102 | // defaulting to the CoreOSJournalDReader 103 | func NewJournalDInput(conf RecvConfig) (*JournalDInput, error) { 104 | reader, err := newCoreOSJournalDReader() 105 | if err != nil { 106 | return nil, err 107 | } 108 | 109 | return &JournalDInput{ 110 | RecvConfig: conf, 111 | reader: reader, 112 | }, nil 113 | } 114 | 115 | // Constructs a JournalDInput with the given reader, incase we have any others 116 | // in the future 117 | func NewJournalDInputWithReader(conf RecvConfig, reader JournalDReader) (*JournalDInput, error) { 118 | return &JournalDInput{ 119 | RecvConfig: conf, 120 | reader: reader, 121 | }, nil 122 | } 123 | 124 | func (j *JournalDInput) Init(ctx context.Context) error { 125 | return nil 126 | } 127 | 128 | // Gets a single batch of messages off of the JournalD stream 129 | func (j *JournalDInput) GetBatch(ctx context.Context) (*clogger.MessageBatch, error) { 130 | msg, err := j.reader.GetEntry(ctx) 131 | 132 | if err != nil { 133 | return nil, err 134 | } 135 | 136 | if textMsg, ok := msg.ParsedFields["MESSAGE"]; ok { 137 | // Normalise JournalD Formatted message field to our one 138 | msg.ParsedFields[clogger.MESSAGE_FIELD] = textMsg 139 | delete(msg.ParsedFields, "MESSAGE") 140 | } 141 | 142 | return clogger.SizeOneBatch(msg), nil 143 | } 144 | 145 | func (j *JournalDInput) Close(ctx context.Context) error { 146 | j.reader.Close() 147 | 148 | return nil 149 | } 150 | 151 | func init() { 152 | // JournalDInput that reads data from the journald stream 153 | inputsRegistry.Register("journald", func(rawConf map[string]string) (interface{}, error) { 154 | conf := NewRecvConfig() 155 | 156 | return conf, nil 157 | }, func(conf interface{}) (Inputter, error) { 158 | if c, ok := conf.(RecvConfig); ok { 159 | return NewJournalDInput(c) 160 | } 161 | 162 | return nil, fmt.Errorf("invalid config passed to journald input") 163 | }) 164 | } 165 | -------------------------------------------------------------------------------- /internal/inputs/socket.go: -------------------------------------------------------------------------------- 1 | package inputs 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "net" 8 | "os" 9 | "sync" 10 | 11 | "github.com/rs/zerolog/log" 12 | "github.com/sinkingpoint/clogger/internal/clogger" 13 | "github.com/sinkingpoint/clogger/internal/inputs/parse" 14 | "github.com/sinkingpoint/clogger/internal/tracing" 15 | "go.opentelemetry.io/otel/attribute" 16 | ) 17 | 18 | type SocketInputType int 19 | 20 | const ( 21 | UNIX_SOCKET_INPUT SocketInputType = iota 22 | TCP_SOCKET_INPUT 23 | ) 24 | 25 | const DEFAULT_SOCKET_PATH = "/run/clogger/clogger.sock" 26 | const DEFAULT_LISTEN_ADDR = "localhost:4279" 27 | 28 | type SocketInputConfig struct { 29 | RecvConfig 30 | ListenAddr string 31 | TLS *clogger.TLSConfig 32 | Type SocketInputType 33 | Parser parse.InputParser 34 | } 35 | 36 | func parseSocketConfigFromRaw(conf map[string]string, ty SocketInputType) (SocketInputConfig, error) { 37 | var err error 38 | var socketListen string 39 | if path, ok := conf["listen"]; ok { 40 | socketListen = path 41 | } else { 42 | switch ty { 43 | case UNIX_SOCKET_INPUT: 44 | socketListen = DEFAULT_SOCKET_PATH 45 | case TCP_SOCKET_INPUT: 46 | socketListen = DEFAULT_LISTEN_ADDR 47 | } 48 | } 49 | 50 | parser, _ := parse.GetParserFromString("newline", conf) 51 | if parserName, ok := conf["parser"]; ok { 52 | parser, err = parse.GetParserFromString(parserName, conf) 53 | if err != nil { 54 | return SocketInputConfig{}, err 55 | } 56 | } 57 | 58 | tls, err := clogger.NewTLSConfigFromRaw(conf) 59 | if err != nil { 60 | return SocketInputConfig{}, err 61 | } 62 | 63 | return SocketInputConfig{ 64 | RecvConfig: NewRecvConfig(), 65 | ListenAddr: socketListen, 66 | Type: ty, 67 | Parser: parser, 68 | TLS: &tls, 69 | }, nil 70 | } 71 | 72 | type socketInput struct { 73 | conf SocketInputConfig 74 | internalChan chan clogger.Message 75 | listener net.Listener 76 | wg sync.WaitGroup 77 | } 78 | 79 | func NewSocketInput(c SocketInputConfig) *socketInput { 80 | return &socketInput{ 81 | conf: c, 82 | internalChan: make(chan clogger.Message, 10), 83 | wg: sync.WaitGroup{}, 84 | } 85 | } 86 | 87 | func (s *socketInput) handleConn(ctx context.Context, conn net.Conn) { 88 | ctx, span := tracing.GetTracer().Start(ctx, "SocketInput.handleConn") 89 | defer span.End() 90 | defer conn.Close() 91 | if err := s.conf.Parser.ParseStream(ctx, conn, s.internalChan); err != nil { 92 | span.RecordError(err) 93 | log.Debug().Err(err).Msg("Failed to parse incoming stream") 94 | } 95 | } 96 | 97 | func (s *socketInput) Init(ctx context.Context) error { 98 | var ty string 99 | switch s.conf.Type { 100 | case UNIX_SOCKET_INPUT: 101 | ty = "unix" 102 | case TCP_SOCKET_INPUT: 103 | ty = "tcp" 104 | } 105 | 106 | listener, err := net.Listen(ty, s.conf.ListenAddr) 107 | if err != nil { 108 | return err 109 | } 110 | 111 | s.listener = s.conf.TLS.WrapListener(listener) 112 | 113 | go func() { 114 | for { 115 | conn, err := listener.Accept() 116 | 117 | if err != nil { 118 | break 119 | } 120 | 121 | s.wg.Add(1) 122 | go func() { 123 | defer s.wg.Done() 124 | s.handleConn(ctx, conn) 125 | }() 126 | } 127 | }() 128 | 129 | return nil 130 | } 131 | 132 | func (s *socketInput) Close(ctx context.Context) error { 133 | s.listener.Close() 134 | s.wg.Wait() 135 | close(s.internalChan) 136 | log.Debug().Msg("Socket Inputter Closing... Waiting on child connections") 137 | 138 | return nil 139 | } 140 | 141 | func (s *socketInput) GetBatch(ctx context.Context) (*clogger.MessageBatch, error) { 142 | _, span := tracing.GetTracer().Start(ctx, "SocketInput.GetBatch") 143 | defer span.End() 144 | 145 | span.SetAttributes(attribute.String("socket_path", s.conf.ListenAddr)) 146 | 147 | select { 148 | case <-ctx.Done(): 149 | return nil, nil 150 | case msg := <-s.internalChan: 151 | numMessages := len(s.internalChan) + 1 152 | batch := clogger.GetMessageBatch(numMessages) 153 | batch.Messages = append(batch.Messages, msg) 154 | for i := 0; i < numMessages-1; i++ { 155 | batch.Messages = append(batch.Messages, <-s.internalChan) 156 | } 157 | 158 | return batch, nil 159 | } 160 | } 161 | 162 | func init() { 163 | inputsRegistry.Register("unix", func(rawConf map[string]string) (interface{}, error) { 164 | conf, err := parseSocketConfigFromRaw(rawConf, UNIX_SOCKET_INPUT) 165 | if err != nil { 166 | return nil, err 167 | } 168 | 169 | return conf, nil 170 | }, func(conf interface{}) (Inputter, error) { 171 | if c, ok := conf.(SocketInputConfig); ok { 172 | 173 | if _, err := os.Stat(c.ListenAddr); !errors.Is(err, os.ErrNotExist) { 174 | // Delete the existing socket so we can remake it 175 | log.Info().Str("socket_path", c.ListenAddr).Msg("Cleaning up left behind socket") 176 | if err = os.Remove(c.ListenAddr); err != nil { 177 | return nil, err 178 | } 179 | } 180 | 181 | return NewSocketInput(c), nil 182 | } 183 | 184 | return nil, fmt.Errorf("invalid config passed to socket input") 185 | }) 186 | 187 | inputsRegistry.Register("tcp", func(rawConf map[string]string) (interface{}, error) { 188 | conf, err := parseSocketConfigFromRaw(rawConf, TCP_SOCKET_INPUT) 189 | if err != nil { 190 | return nil, err 191 | } 192 | 193 | return conf, nil 194 | }, func(conf interface{}) (Inputter, error) { 195 | if c, ok := conf.(SocketInputConfig); ok { 196 | return NewSocketInput(c), nil 197 | } 198 | 199 | return nil, fmt.Errorf("invalid config passed to socket input") 200 | }) 201 | } 202 | -------------------------------------------------------------------------------- /internal/outputs/sender.go: -------------------------------------------------------------------------------- 1 | package outputs 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | "github.com/rs/zerolog/log" 9 | "github.com/sinkingpoint/clogger/internal/clogger" 10 | "github.com/sinkingpoint/clogger/internal/metrics" 11 | "github.com/sinkingpoint/clogger/internal/tracing" 12 | "go.opentelemetry.io/otel/attribute" 13 | ) 14 | 15 | type RetryConfig struct { 16 | MaxBackOffTries int 17 | BufferChannel clogger.MessageChannel 18 | currentState OutputResult 19 | lastRetryTime time.Time 20 | } 21 | 22 | // Sender encapsulates the functionality that all Outputters get for free i.e. Buffering 23 | type Sender struct { 24 | SendConfig 25 | RetryConfig 26 | name string 27 | sender Outputter 28 | buffer *clogger.MessageBatch 29 | lastFlushTime time.Time 30 | } 31 | 32 | func NewSender(name string, logic Outputter) *Sender { 33 | config := logic.GetSendConfig() 34 | return &Sender{ 35 | name: name, 36 | SendConfig: config, 37 | RetryConfig: RetryConfig{ 38 | MaxBackOffTries: 5, // arbitrary, just for testing. Must make this configurable 39 | currentState: OUTPUT_SUCCESS, 40 | }, 41 | buffer: clogger.GetMessageBatch(config.BatchSize), 42 | lastFlushTime: time.Now(), 43 | sender: logic, 44 | } 45 | } 46 | 47 | // QueueMessages takes the given messages and appends them to the buffer, 48 | // flushing as necessary 49 | func (s *Sender) QueueMessages(ctx context.Context, batch *clogger.MessageBatch) { 50 | ctx, span := tracing.GetTracer().Start(ctx, "Sender.QueueMessages") 51 | defer span.End() 52 | 53 | span.SetAttributes( 54 | attribute.Int("buffer_size", len(s.buffer.Messages)), 55 | attribute.Int("num_new_messages", len(batch.Messages)), 56 | attribute.Int("remaining_room", cap(s.buffer.Messages)-len(s.buffer.Messages)), 57 | ) 58 | 59 | metrics.MessagesProcessed.WithLabelValues(s.name, "output").Add(float64(len(batch.Messages))) 60 | 61 | chunks := 1 62 | 63 | batchMessages := batch.Messages 64 | 65 | for remainingRoom := cap(s.buffer.Messages) - len(s.buffer.Messages); remainingRoom < len(batch.Messages); remainingRoom = cap(s.buffer.Messages) - len(s.buffer.Messages) { 66 | // Chunk the data into buffer sized pieces 67 | chunks += 1 68 | s.buffer.Messages = append(s.buffer.Messages, batchMessages[:remainingRoom]...) 69 | s.Flush(ctx, false) 70 | batchMessages = batchMessages[remainingRoom:] 71 | } 72 | 73 | span.SetAttributes(attribute.Int("chunks", chunks)) 74 | 75 | clogger.PutMessageBatch(batch) 76 | 77 | s.buffer.Messages = append(s.buffer.Messages, batchMessages...) 78 | } 79 | 80 | func (s *Sender) transitionState(ctx context.Context, state OutputResult) { 81 | s.currentState = state 82 | 83 | for result, str := range allOutputs { 84 | if result == state { 85 | metrics.OutputState.WithLabelValues(s.name, str).Set(1) 86 | } else { 87 | metrics.OutputState.WithLabelValues(s.name, str).Set(0) 88 | } 89 | } 90 | } 91 | 92 | // handleLongFailure handles the buffer in the event that the main sender fails 93 | func (s *Sender) handleLongFailure(ctx context.Context) error { 94 | _, span := tracing.GetTracer().Start(ctx, "Sender.handleLongFailure") 95 | defer span.End() 96 | span.SetAttributes(attribute.Bool("has_bufferchannel", s.BufferChannel != nil), attribute.Int("buffer_size", len(s.buffer.Messages))) 97 | 98 | s.transitionState(ctx, OUTPUT_LONG_FAILURE) 99 | metrics.OutputState.WithLabelValues(s.name, "success").Set(0) 100 | 101 | if s.BufferChannel != nil { 102 | s.BufferChannel <- clogger.CloneBatch(s.buffer) 103 | } 104 | 105 | s.buffer.Messages = s.buffer.Messages[:0] 106 | 107 | return nil 108 | } 109 | 110 | // doExponentialRetry handles the case where we have transient failures that can be retried 111 | // Note: This has the potential to cause double counting of logs (at least once delivery) 112 | func (s *Sender) doExponentialRetry(ctx context.Context) error { 113 | ctx, span := tracing.GetTracer().Start(ctx, "Sender.doExponentialRetry") 114 | defer span.End() 115 | 116 | span.SetAttributes(attribute.Int("buffer_size", len(s.buffer.Messages))) 117 | // start at one because we assume we've already done one attempt at flushing 118 | // to get here 119 | backoffTime := time.Millisecond * 100 120 | 121 | for i := 1; i < s.MaxBackOffTries; i++ { 122 | time.Sleep(backoffTime) 123 | 124 | result, err := s.sender.FlushToOutput(ctx, s.buffer) 125 | if err != nil { 126 | log.Debug().Err(err).Int("output_result", int(result)).Msg("Failed to flush output") 127 | } 128 | 129 | switch result { 130 | case OUTPUT_SUCCESS: 131 | span.SetAttributes(attribute.Int("success_after", i)) 132 | s.buffer.Messages = s.buffer.Messages[:0] 133 | s.lastFlushTime = time.Now() 134 | 135 | s.transitionState(ctx, OUTPUT_SUCCESS) 136 | return nil 137 | case OUTPUT_TRANSIENT_FAILURE: 138 | backoffTime *= 2 139 | continue 140 | case OUTPUT_LONG_FAILURE: 141 | return s.handleLongFailure(ctx) 142 | } 143 | } 144 | 145 | return fmt.Errorf("did a backoff without success") 146 | } 147 | 148 | // Flush flushes the current buffer to the output stream 149 | func (s *Sender) Flush(ctx context.Context, final bool) { 150 | ctx, span := tracing.GetTracer().Start(ctx, "Sender.Flush") 151 | defer span.End() 152 | 153 | span.SetAttributes( 154 | attribute.Bool("final", final), 155 | attribute.String("last_flush_time", s.lastFlushTime.Format(time.RFC3339)), 156 | attribute.Int("buffer_size", len(s.buffer.Messages)), 157 | ) 158 | 159 | enoughTimeSinceLastFlush := time.Since(s.lastFlushTime) >= s.FlushInterval 160 | reachedBufferLimit := len(s.buffer.Messages) >= s.BatchSize 161 | if !enoughTimeSinceLastFlush && !reachedBufferLimit && !final { 162 | span.AddEvent("Skipping Flush - not ready yet") 163 | return 164 | } 165 | 166 | if len(s.buffer.Messages) > 0 { 167 | // Don't do any exponential backoff or anything if we know that we're in a long failure 168 | // just buffer it, but retry every minute or so incase we're back 169 | if s.currentState == OUTPUT_LONG_FAILURE && time.Since(s.lastRetryTime) < 30*time.Second { 170 | s.handleLongFailure(ctx) 171 | return 172 | } 173 | 174 | s.lastRetryTime = time.Now() 175 | 176 | result, err := s.sender.FlushToOutput(ctx, s.buffer) 177 | if err != nil { 178 | // We just log errors - retries etc should be controlled by the OutputResult return 179 | log.Debug().Err(err).Int("output_result", int(result)).Msg("Failed to flush output") 180 | } 181 | 182 | switch result { 183 | case OUTPUT_SUCCESS: 184 | s.buffer.Messages = s.buffer.Messages[:0] 185 | s.lastFlushTime = time.Now() 186 | s.transitionState(ctx, OUTPUT_SUCCESS) 187 | case OUTPUT_TRANSIENT_FAILURE: 188 | err := s.doExponentialRetry(ctx) 189 | if err != nil { 190 | log.Warn().Err(err).Msg("Fell through trying to do exponential backoff") 191 | s.handleLongFailure(ctx) 192 | } 193 | case OUTPUT_LONG_FAILURE: 194 | s.handleLongFailure(ctx) 195 | } 196 | } 197 | 198 | if final { 199 | clogger.PutMessageBatch(s.buffer) 200 | s.buffer = nil 201 | } 202 | } 203 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU LESSER GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | 9 | This version of the GNU Lesser General Public License incorporates 10 | the terms and conditions of version 3 of the GNU General Public 11 | License, supplemented by the additional permissions listed below. 12 | 13 | 0. Additional Definitions. 14 | 15 | As used herein, "this License" refers to version 3 of the GNU Lesser 16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU 17 | General Public License. 18 | 19 | "The Library" refers to a covered work governed by this License, 20 | other than an Application or a Combined Work as defined below. 21 | 22 | An "Application" is any work that makes use of an interface provided 23 | by the Library, but which is not otherwise based on the Library. 24 | Defining a subclass of a class defined by the Library is deemed a mode 25 | of using an interface provided by the Library. 26 | 27 | A "Combined Work" is a work produced by combining or linking an 28 | Application with the Library. The particular version of the Library 29 | with which the Combined Work was made is also called the "Linked 30 | Version". 31 | 32 | The "Minimal Corresponding Source" for a Combined Work means the 33 | Corresponding Source for the Combined Work, excluding any source code 34 | for portions of the Combined Work that, considered in isolation, are 35 | based on the Application, and not on the Linked Version. 36 | 37 | The "Corresponding Application Code" for a Combined Work means the 38 | object code and/or source code for the Application, including any data 39 | and utility programs needed for reproducing the Combined Work from the 40 | Application, but excluding the System Libraries of the Combined Work. 41 | 42 | 1. Exception to Section 3 of the GNU GPL. 43 | 44 | You may convey a covered work under sections 3 and 4 of this License 45 | without being bound by section 3 of the GNU GPL. 46 | 47 | 2. Conveying Modified Versions. 48 | 49 | If you modify a copy of the Library, and, in your modifications, a 50 | facility refers to a function or data to be supplied by an Application 51 | that uses the facility (other than as an argument passed when the 52 | facility is invoked), then you may convey a copy of the modified 53 | version: 54 | 55 | a) under this License, provided that you make a good faith effort to 56 | ensure that, in the event an Application does not supply the 57 | function or data, the facility still operates, and performs 58 | whatever part of its purpose remains meaningful, or 59 | 60 | b) under the GNU GPL, with none of the additional permissions of 61 | this License applicable to that copy. 62 | 63 | 3. Object Code Incorporating Material from Library Header Files. 64 | 65 | The object code form of an Application may incorporate material from 66 | a header file that is part of the Library. You may convey such object 67 | code under terms of your choice, provided that, if the incorporated 68 | material is not limited to numerical parameters, data structure 69 | layouts and accessors, or small macros, inline functions and templates 70 | (ten or fewer lines in length), you do both of the following: 71 | 72 | a) Give prominent notice with each copy of the object code that the 73 | Library is used in it and that the Library and its use are 74 | covered by this License. 75 | 76 | b) Accompany the object code with a copy of the GNU GPL and this license 77 | document. 78 | 79 | 4. Combined Works. 80 | 81 | You may convey a Combined Work under terms of your choice that, 82 | taken together, effectively do not restrict modification of the 83 | portions of the Library contained in the Combined Work and reverse 84 | engineering for debugging such modifications, if you also do each of 85 | the following: 86 | 87 | a) Give prominent notice with each copy of the Combined Work that 88 | the Library is used in it and that the Library and its use are 89 | covered by this License. 90 | 91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license 92 | document. 93 | 94 | c) For a Combined Work that displays copyright notices during 95 | execution, include the copyright notice for the Library among 96 | these notices, as well as a reference directing the user to the 97 | copies of the GNU GPL and this license document. 98 | 99 | d) Do one of the following: 100 | 101 | 0) Convey the Minimal Corresponding Source under the terms of this 102 | License, and the Corresponding Application Code in a form 103 | suitable for, and under terms that permit, the user to 104 | recombine or relink the Application with a modified version of 105 | the Linked Version to produce a modified Combined Work, in the 106 | manner specified by section 6 of the GNU GPL for conveying 107 | Corresponding Source. 108 | 109 | 1) Use a suitable shared library mechanism for linking with the 110 | Library. A suitable mechanism is one that (a) uses at run time 111 | a copy of the Library already present on the user's computer 112 | system, and (b) will operate properly with a modified version 113 | of the Library that is interface-compatible with the Linked 114 | Version. 115 | 116 | e) Provide Installation Information, but only if you would otherwise 117 | be required to provide such information under section 6 of the 118 | GNU GPL, and only to the extent that such information is 119 | necessary to install and execute a modified version of the 120 | Combined Work produced by recombining or relinking the 121 | Application with a modified version of the Linked Version. (If 122 | you use option 4d0, the Installation Information must accompany 123 | the Minimal Corresponding Source and Corresponding Application 124 | Code. If you use option 4d1, you must provide the Installation 125 | Information in the manner specified by section 6 of the GNU GPL 126 | for conveying Corresponding Source.) 127 | 128 | 5. Combined Libraries. 129 | 130 | You may place library facilities that are a work based on the 131 | Library side by side in a single library together with other library 132 | facilities that are not Applications and are not covered by this 133 | License, and convey such a combined library under terms of your 134 | choice, if you do both of the following: 135 | 136 | a) Accompany the combined library with a copy of the same work based 137 | on the Library, uncombined with any other library facilities, 138 | conveyed under the terms of this License. 139 | 140 | b) Give prominent notice with the combined library that part of it 141 | is a work based on the Library, and explaining where to find the 142 | accompanying uncombined form of the same work. 143 | 144 | 6. Revised Versions of the GNU Lesser General Public License. 145 | 146 | The Free Software Foundation may publish revised and/or new versions 147 | of the GNU Lesser General Public License from time to time. Such new 148 | versions will be similar in spirit to the present version, but may 149 | differ in detail to address new problems or concerns. 150 | 151 | Each version is given a distinguishing version number. If the 152 | Library as you received it specifies that a certain numbered version 153 | of the GNU Lesser General Public License "or any later version" 154 | applies to it, you have the option of following the terms and 155 | conditions either of that published version or of any later version 156 | published by the Free Software Foundation. If the Library as you 157 | received it does not specify a version number of the GNU Lesser 158 | General Public License, you may choose any version of the GNU Lesser 159 | General Public License ever published by the Free Software Foundation. 160 | 161 | If the Library as you received it specifies that a proxy can decide 162 | whether future versions of the GNU Lesser General Public License shall 163 | apply, that proxy's public statement of acceptance of any version is 164 | permanent authorization for you to choose that version for the 165 | Library. -------------------------------------------------------------------------------- /internal/pipeline/pipeline.go: -------------------------------------------------------------------------------- 1 | package pipeline 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "sync" 7 | "time" 8 | 9 | "github.com/rs/zerolog/log" 10 | "github.com/sinkingpoint/clogger/internal/clogger" 11 | "github.com/sinkingpoint/clogger/internal/filters" 12 | "github.com/sinkingpoint/clogger/internal/inputs" 13 | "github.com/sinkingpoint/clogger/internal/metrics" 14 | "github.com/sinkingpoint/clogger/internal/outputs" 15 | ) 16 | 17 | type LinkType int 18 | 19 | const ( 20 | LINK_TYPE_NORMAL LinkType = iota 21 | LINK_TYPE_BUFFER 22 | ) 23 | 24 | type Link struct { 25 | To string 26 | Type LinkType 27 | } 28 | 29 | func NewLink(to string) Link { 30 | return Link{ 31 | To: to, 32 | Type: LINK_TYPE_NORMAL, 33 | } 34 | } 35 | 36 | func NewBufferLink(to string) Link { 37 | return Link{ 38 | To: to, 39 | Type: LINK_TYPE_BUFFER, 40 | } 41 | } 42 | 43 | type Pipeline struct { 44 | Inputs map[string]inputs.Inputter 45 | Filters map[string]filters.Filter 46 | Outputs map[string]outputs.Outputter 47 | Pipes map[string][]Link 48 | RevPipes map[string][]Link 49 | killChannel chan bool 50 | debug bool 51 | 52 | channels map[string]clogger.MessageChannel 53 | closedLock sync.Mutex 54 | closed map[string]bool 55 | 56 | wg sync.WaitGroup 57 | } 58 | 59 | func NewPipeline(inputs map[string]inputs.Inputter, outputs map[string]outputs.Outputter, filters map[string]filters.Filter, pipes map[string][]Link) *Pipeline { 60 | revPipes := make(map[string][]Link, len(pipes)) 61 | 62 | for from, tos := range pipes { 63 | for _, to := range tos { 64 | revPipes[to.To] = append(revPipes[to.To], Link{ 65 | To: from, 66 | Type: to.Type, 67 | }) 68 | } 69 | } 70 | 71 | return &Pipeline{ 72 | Inputs: inputs, 73 | Outputs: outputs, 74 | Filters: filters, 75 | Pipes: pipes, 76 | RevPipes: revPipes, 77 | debug: false, 78 | killChannel: make(chan bool, 1), 79 | closed: make(map[string]bool, len(inputs)+len(outputs)+len(filters)), 80 | closedLock: sync.Mutex{}, 81 | channels: make(map[string]clogger.MessageChannel, len(inputs)+len(filters)+len(outputs)), 82 | wg: sync.WaitGroup{}, 83 | } 84 | } 85 | 86 | func (p *Pipeline) handleClose(chanName string) { 87 | toHandle := []string{chanName} 88 | p.closedLock.Lock() 89 | defer p.closedLock.Unlock() 90 | p.closed[chanName] = true 91 | 92 | for len(toHandle) > 0 { 93 | chanName = toHandle[len(toHandle)-1] 94 | toHandle = toHandle[:len(toHandle)-1] 95 | outer: 96 | for _, dest := range p.Pipes[chanName] { 97 | if _, closed := p.closed[dest.To]; closed { 98 | continue 99 | } 100 | 101 | for _, src := range p.RevPipes[dest.To] { 102 | if _, closed := p.closed[src.To]; !closed { 103 | continue outer 104 | } 105 | } 106 | 107 | if c := p.channels[dest.To]; c != nil { 108 | close(p.channels[dest.To]) 109 | } 110 | toHandle = append(toHandle, dest.To) 111 | } 112 | } 113 | } 114 | 115 | func (p *Pipeline) Kill() { 116 | p.killChannel <- true 117 | p.wg.Wait() 118 | } 119 | 120 | func (p *Pipeline) Wait() { 121 | p.wg.Wait() 122 | } 123 | 124 | func (p *Pipeline) Run() { 125 | inputWg := sync.WaitGroup{} 126 | filterWg := sync.WaitGroup{} 127 | inputCloseChannels := map[string]chan bool{} 128 | 129 | for name, output := range p.Outputs { 130 | if _, ok := p.channels[name]; !ok { 131 | p.channels[name] = make(clogger.MessageChannel, 10) 132 | } 133 | p.wg.Add(1) 134 | 135 | var bufferChannel clogger.MessageChannel 136 | 137 | for _, pipe := range p.Pipes[name] { 138 | if pipe.Type == LINK_TYPE_BUFFER { 139 | if _, ok := p.channels[pipe.To]; !ok { 140 | p.channels[pipe.To] = make(clogger.MessageChannel, 10) 141 | } 142 | 143 | bufferChannel = p.channels[pipe.To] 144 | } else { 145 | log.Panic().Msg("BUG: Found output link that isn't a buffer link") 146 | } 147 | } 148 | 149 | go func(name string, output outputs.Outputter, pipe clogger.MessageChannel) { 150 | defer p.wg.Done() 151 | outputs.StartOutputter(name, pipe, output, bufferChannel) 152 | p.handleClose(name) 153 | }(name, output, p.channels[name]) 154 | } 155 | 156 | for name, filter := range p.Filters { 157 | p.channels[name] = make(clogger.MessageChannel, 10) 158 | filterWg.Add(1) 159 | go func(name string, filter filters.Filter, inputPipe clogger.MessageChannel) { 160 | defer filterWg.Done() 161 | for batch := range inputPipe { 162 | currentIndex := 0 163 | for _, msg := range batch.Messages { 164 | shouldDrop, err := filter.Filter(context.Background(), &msg) 165 | if err != nil { 166 | log.Warn().Err(err).Msg("Filter failed") 167 | } 168 | 169 | if !shouldDrop { 170 | batch.Messages[currentIndex] = msg 171 | currentIndex += 1 172 | } 173 | } 174 | 175 | metrics.FilterDropped.WithLabelValues(name).Add(float64(len(batch.Messages) - currentIndex)) 176 | metrics.MessagesProcessed.WithLabelValues(name, "filter").Add(float64(len(batch.Messages))) 177 | 178 | batch.Messages = batch.Messages[:currentIndex] 179 | 180 | processedLinks := 0 181 | 182 | for _, link := range p.Pipes[name] { 183 | if processedLinks >= 1 { 184 | batch = clogger.CloneBatch(batch) 185 | } 186 | p.channels[link.To] <- clogger.CloneBatch(batch) 187 | processedLinks += 1 188 | } 189 | } 190 | p.handleClose(name) 191 | 192 | log.Debug().Str("filter_name", name).Msg("Filter exited") 193 | }(name, filter, p.channels[name]) 194 | } 195 | 196 | for name, input := range p.Inputs { 197 | inputCloseChannels[name] = make(chan bool) 198 | 199 | err := input.Init(context.Background()) 200 | if err != nil { 201 | log.Error().Str("step_name", name).Err(err).Msg("Failed to start input") 202 | p.handleClose(name) 203 | } 204 | 205 | inputWg.Add(2) 206 | go func(name string, input inputs.Inputter, killChannel chan bool) { 207 | defer inputWg.Done() 208 | 209 | ctx, cancel := context.WithCancel(context.Background()) 210 | cancelled := false 211 | 212 | go func() { 213 | defer inputWg.Done() 214 | <-killChannel 215 | close(killChannel) 216 | cancelled = true 217 | cancel() 218 | }() 219 | 220 | for { 221 | batch, err := input.GetBatch(ctx) 222 | 223 | if err != nil { 224 | log.Warn().Err(err).Str("step_name", name).Msg("Failed to get batch from input") 225 | continue 226 | } 227 | 228 | if batch != nil { 229 | metrics.MessagesProcessed.WithLabelValues(name, "input").Add(float64(len(batch.Messages))) 230 | processedLinks := 0 231 | 232 | for _, link := range p.Pipes[name] { 233 | if processedLinks >= 1 { 234 | batch = clogger.CloneBatch(batch) 235 | } 236 | p.channels[link.To] <- clogger.CloneBatch(batch) 237 | processedLinks += 1 238 | } 239 | } 240 | 241 | if cancelled { 242 | break 243 | } 244 | } 245 | 246 | input.Close(context.Background()) 247 | p.handleClose(name) 248 | }(name, input, inputCloseChannels[name]) 249 | } 250 | 251 | if p.debug { 252 | go func() { 253 | for { 254 | outputStr := "" 255 | for name, pipe := range p.channels { 256 | outputStr += fmt.Sprintf("[Step %s %d/%d] ", name, len(pipe), cap(pipe)) 257 | } 258 | 259 | fmt.Println(outputStr) 260 | time.Sleep(1 * time.Second) 261 | } 262 | }() 263 | } 264 | 265 | // A note on ordering here (UPDATE THIS IF YOU CHANGE ANYTHING BELOW THIS LINE): 266 | // 1. We kill the inputs so we stop enqueuing new messages, and then wait for all inputs to exit 267 | // 2. The closing of the input channels kills the aggregator channels that read from the inputs, to flush all messages to the outputs 268 | // 3. Once all the aggregator channels are closed, we close the firehose channel, to flush all the messages to the outputs 269 | // 4. The firehose channel being closed closes the pipeline channel that reads from the firehose 270 | // 5. In closing, the pipeline channel closes all the output channels 271 | // 6. The output channels being closed forces the outputs to flush and exit 272 | go func() { 273 | <-p.killChannel 274 | for name := range p.Inputs { 275 | inputCloseChannels[name] <- true 276 | } 277 | 278 | inputWg.Wait() 279 | filterWg.Wait() 280 | p.wg.Wait() 281 | }() 282 | } 283 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= 2 | cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= 3 | github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= 4 | github.com/alecthomas/kong v0.2.22 h1:lRcQYT2/yJ+coDNA5ws0mRL0pwSqjbP/6AcRkyKhomk= 5 | github.com/alecthomas/kong v0.2.22/go.mod h1:uzxf/HUh0tj43x1AyJROl3JT7SgsZ5m+icOv1csRhc0= 6 | github.com/alecthomas/repr v0.0.0-20210801044451-80ca428c5142 h1:8Uy0oSf5co/NZXje7U1z8Mpep++QJOldL2hs/sBQf48= 7 | github.com/alecthomas/repr v0.0.0-20210801044451-80ca428c5142/go.mod h1:2kn6fqh/zIyPLmm3ugklbEi5hg5wS435eygvNfaDQL8= 8 | github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= 9 | github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= 10 | github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= 11 | github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= 12 | github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= 13 | github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= 14 | github.com/awalterschulze/gographviz v2.0.3+incompatible h1:9sVEXJBJLwGX7EQVhLm2elIKCm7P2YHFC8v6096G09E= 15 | github.com/awalterschulze/gographviz v2.0.3+incompatible/go.mod h1:GEV5wmg4YquNw7v1kkyoX9etIk8yVmXj+AkDHuuETHs= 16 | github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= 17 | github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= 18 | github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= 19 | github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= 20 | github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= 21 | github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= 22 | github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= 23 | github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= 24 | github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 25 | github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= 26 | github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= 27 | github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= 28 | github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= 29 | github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= 30 | github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= 31 | github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= 32 | github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= 33 | github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= 34 | github.com/d5/tengo/v2 v2.10.0 h1:gR3VwfJDBlffV8WzfSNNJ7WJtWduwbTKlAu14cA2fRs= 35 | github.com/d5/tengo/v2 v2.10.0/go.mod h1:XRGjEs5I9jYIKTxly6HCF8oiiilk5E/RYXOZ5b0DZC8= 36 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 37 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 38 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 39 | github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= 40 | github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= 41 | github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= 42 | github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= 43 | github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= 44 | github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= 45 | github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= 46 | github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= 47 | github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= 48 | github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= 49 | github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= 50 | github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= 51 | github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= 52 | github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= 53 | github.com/go-logr/logr v1.2.1 h1:DX7uPQ4WgAWfoh+NGGlbJQswnYIVvz0SRlLS3rPZQDA= 54 | github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= 55 | github.com/go-logr/stdr v1.2.0 h1:j4LrlVXgrbIWO83mmQUnK0Hi+YnbD+vzrE1z/EphbFE= 56 | github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= 57 | github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= 58 | github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= 59 | github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= 60 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= 61 | github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= 62 | github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= 63 | github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= 64 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 65 | github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 66 | github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 67 | github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= 68 | github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= 69 | github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= 70 | github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= 71 | github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= 72 | github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= 73 | github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= 74 | github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= 75 | github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= 76 | github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= 77 | github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= 78 | github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= 79 | github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= 80 | github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 81 | github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 82 | github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 83 | github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 84 | github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 85 | github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 86 | github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= 87 | github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 88 | github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= 89 | github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 90 | github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= 91 | github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= 92 | github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= 93 | github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= 94 | github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= 95 | github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= 96 | github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= 97 | github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= 98 | github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= 99 | github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= 100 | github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= 101 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 102 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 103 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 104 | github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= 105 | github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= 106 | github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 107 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 108 | github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= 109 | github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= 110 | github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= 111 | github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= 112 | github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 113 | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 114 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= 115 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 116 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 117 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 118 | github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= 119 | github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= 120 | github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= 121 | github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= 122 | github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= 123 | github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= 124 | github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= 125 | github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= 126 | github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= 127 | github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= 128 | github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= 129 | github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= 130 | github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= 131 | github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= 132 | github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= 133 | github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= 134 | github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= 135 | github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= 136 | github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= 137 | github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= 138 | github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= 139 | github.com/rs/zerolog v1.26.0 h1:ORM4ibhEZeTeQlCojCK2kPz1ogAY4bGs4tD+SaAdGaE= 140 | github.com/rs/zerolog v1.26.0/go.mod h1:yBiM87lvSqX8h0Ww4sdzNSkVYZ8dL2xjZJG1lAuGZEo= 141 | github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= 142 | github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= 143 | github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= 144 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 145 | github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 146 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 147 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 148 | github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= 149 | github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= 150 | github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= 151 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 152 | github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= 153 | github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= 154 | go.opentelemetry.io/contrib/propagators v0.21.0 h1:Wnio4Ffi9MoLrUkN/J5yqtHf2F9a7wa2VClFkKcQcOk= 155 | go.opentelemetry.io/contrib/propagators v0.21.0/go.mod h1:7QCSkXB+JDNZfohtRS0z3qnY+zPjFbe01o4iyEoPmRk= 156 | go.opentelemetry.io/otel v1.0.0-RC1/go.mod h1:x9tRa9HK4hSSq7jf2TKbqFbtt58/TGk0f9XiEYISI1I= 157 | go.opentelemetry.io/otel v1.3.0 h1:APxLf0eiBwLl+SOXiJJCVYzA1OOJNyAoV8C5RNRyy7Y= 158 | go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= 159 | go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0 h1:R/OBkMoGgfy2fLhs2QhkCI1w4HLEQX92GCcJB6SSdNk= 160 | go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= 161 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0 h1:giGm8w67Ja7amYNfYMdme7xSp2pIxThWopw8+QP51Yk= 162 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8= 163 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0 h1:Ydage/P0fRrSPpZeCVxzjqGcI6iVmG2xb43+IR8cjqM= 164 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= 165 | go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.0.0-RC1 h1:SEfJImgKQ5TP2aTJwN08qhS8oFlYWr/neECGsyuxKWg= 166 | go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.0.0-RC1/go.mod h1:TAM/UYjVd1UdaifWkof3qj9cCW9oINemHfj0K6yodSo= 167 | go.opentelemetry.io/otel/oteltest v1.0.0-RC1 h1:G685iP3XiskCwk/z0eIabL55XUl2gk0cljhGk9sB0Yk= 168 | go.opentelemetry.io/otel/oteltest v1.0.0-RC1/go.mod h1:+eoIG0gdEOaPNftuy1YScLr1Gb4mL/9lpDkZ0JjMRq4= 169 | go.opentelemetry.io/otel/sdk v1.0.0-RC1/go.mod h1:kj6yPn7Pgt5ByRuwesbaWcRLA+V7BSDg3Hf8xRvsvf8= 170 | go.opentelemetry.io/otel/sdk v1.3.0 h1:3278edCoH89MEJ0Ky8WQXVmDQv3FX4ZJ3Pp+9fJreAI= 171 | go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= 172 | go.opentelemetry.io/otel/trace v1.0.0-RC1/go.mod h1:86UHmyHWFEtWjfWPSbu0+d0Pf9Q6e1U+3ViBOc+NXAg= 173 | go.opentelemetry.io/otel/trace v1.3.0 h1:doy8Hzb1RJ+I3yFhtDmwNc7tIyw1tNMOIsyPzp1NOGY= 174 | go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= 175 | go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= 176 | go.opentelemetry.io/proto/otlp v0.11.0 h1:cLDgIBTf4lLOlztkhzAEdQsJ4Lj+i5Wc9k6Nn0K1VyU= 177 | go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= 178 | golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= 179 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 180 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 181 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 182 | golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= 183 | golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= 184 | golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= 185 | golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= 186 | golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 187 | golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 188 | golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 189 | golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 190 | golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 191 | golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 192 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 193 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 194 | golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 195 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 196 | golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= 197 | golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= 198 | golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= 199 | golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d h1:20cMwl2fHAzkJMEA+8J4JgqBQcQGzbisXo31MIeenXI= 200 | golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= 201 | golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= 202 | golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= 203 | golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= 204 | golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 205 | golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 206 | golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 207 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 208 | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 209 | golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 210 | golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 211 | golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 212 | golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 213 | golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 214 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 215 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 216 | golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 217 | golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 218 | golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 219 | golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 220 | golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 221 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 222 | golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 223 | golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 224 | golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 225 | golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 226 | golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 227 | golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 228 | golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e h1:WUoyKPm6nCo1BnNUvPGnFG3T5DUVem42yDJZZ4CNxMA= 229 | golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 230 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 231 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 232 | golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= 233 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 234 | golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= 235 | golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 236 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 237 | golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 238 | golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= 239 | golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= 240 | golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= 241 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 242 | golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= 243 | golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= 244 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 245 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 246 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 247 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= 248 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 249 | google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= 250 | google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= 251 | google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= 252 | google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= 253 | google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= 254 | google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= 255 | google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= 256 | google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= 257 | google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= 258 | google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= 259 | google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= 260 | google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= 261 | google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= 262 | google.golang.org/grpc v1.42.0 h1:XT2/MFpuPFsEX2fWh3YQtHkZ+WYZFQRfaUgLZYj/p6A= 263 | google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= 264 | google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= 265 | google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= 266 | google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= 267 | google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= 268 | google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= 269 | google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= 270 | google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= 271 | google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= 272 | google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= 273 | google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= 274 | google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= 275 | google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= 276 | google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= 277 | gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= 278 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 279 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 280 | gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 281 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 282 | gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 283 | gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 284 | gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 285 | gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 286 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 287 | gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= 288 | gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 289 | honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 290 | honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 291 | --------------------------------------------------------------------------------