├── Godeps
├── Godeps.json
└── Readme
├── LICENSE
├── README.md
├── buffer
└── buffer.go
├── examples
├── example.config.yml
├── example.elasticsearch-output.yml
├── example.filebeat-to-redis.yml
└── example.s3-output.yml
├── images
└── logzoom.png
├── input
├── filebeat
│ ├── filebeat.go
│ ├── lumberjack.go
│ └── parser.go
├── input.go
└── redis
│ └── redis.go
├── main.go
├── output
├── elasticsearch
│ ├── elasticsearch.go
│ └── index-template.go
├── output.go
├── redis
│ └── redis.go
├── s3
│ └── s3.go
├── tcp
│ └── tcp.go
└── websocket
│ ├── template.go
│ └── websocket.go
├── route
└── route.go
└── server
├── config.go
├── rand.go
└── server.go
/Godeps/Godeps.json:
--------------------------------------------------------------------------------
1 | {
2 | "ImportPath": "github.com/packetzoom/logzoom",
3 | "GoVersion": "go1.6",
4 | "GodepVersion": "v60",
5 | "Deps": [
6 | {
7 | "ImportPath": "github.com/adjust/redismq",
8 | "Rev": "c82d9b6313449b9fb76e13bf600e1db37dc14c0c"
9 | },
10 | {
11 | "ImportPath": "github.com/aws/aws-sdk-go/aws",
12 | "Comment": "v1.0.10",
13 | "Rev": "9ec7da8e4a0ddb21abc7137529e19fdf74f2bd61"
14 | },
15 | {
16 | "ImportPath": "github.com/aws/aws-sdk-go/aws/awserr",
17 | "Comment": "v1.0.10",
18 | "Rev": "9ec7da8e4a0ddb21abc7137529e19fdf74f2bd61"
19 | },
20 | {
21 | "ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil",
22 | "Comment": "v1.0.10",
23 | "Rev": "9ec7da8e4a0ddb21abc7137529e19fdf74f2bd61"
24 | },
25 | {
26 | "ImportPath": "github.com/aws/aws-sdk-go/aws/client",
27 | "Comment": "v1.0.10",
28 | "Rev": "9ec7da8e4a0ddb21abc7137529e19fdf74f2bd61"
29 | },
30 | {
31 | "ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata",
32 | "Comment": "v1.0.10",
33 | "Rev": "9ec7da8e4a0ddb21abc7137529e19fdf74f2bd61"
34 | },
35 | {
36 | "ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers",
37 | "Comment": "v1.0.10",
38 | "Rev": "9ec7da8e4a0ddb21abc7137529e19fdf74f2bd61"
39 | },
40 | {
41 | "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials",
42 | "Comment": "v1.0.10",
43 | "Rev": "9ec7da8e4a0ddb21abc7137529e19fdf74f2bd61"
44 | },
45 | {
46 | "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
47 | "Comment": "v1.0.10",
48 | "Rev": "9ec7da8e4a0ddb21abc7137529e19fdf74f2bd61"
49 | },
50 | {
51 | "ImportPath": "github.com/aws/aws-sdk-go/aws/defaults",
52 | "Comment": "v1.0.10",
53 | "Rev": "9ec7da8e4a0ddb21abc7137529e19fdf74f2bd61"
54 | },
55 | {
56 | "ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata",
57 | "Comment": "v1.0.10",
58 | "Rev": "9ec7da8e4a0ddb21abc7137529e19fdf74f2bd61"
59 | },
60 | {
61 | "ImportPath": "github.com/aws/aws-sdk-go/aws/request",
62 | "Comment": "v1.0.10",
63 | "Rev": "9ec7da8e4a0ddb21abc7137529e19fdf74f2bd61"
64 | },
65 | {
66 | "ImportPath": "github.com/aws/aws-sdk-go/aws/session",
67 | "Comment": "v1.0.10",
68 | "Rev": "9ec7da8e4a0ddb21abc7137529e19fdf74f2bd61"
69 | },
70 | {
71 | "ImportPath": "github.com/aws/aws-sdk-go/private/endpoints",
72 | "Comment": "v1.0.10",
73 | "Rev": "9ec7da8e4a0ddb21abc7137529e19fdf74f2bd61"
74 | },
75 | {
76 | "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query",
77 | "Comment": "v1.0.10",
78 | "Rev": "9ec7da8e4a0ddb21abc7137529e19fdf74f2bd61"
79 | },
80 | {
81 | "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil",
82 | "Comment": "v1.0.10",
83 | "Rev": "9ec7da8e4a0ddb21abc7137529e19fdf74f2bd61"
84 | },
85 | {
86 | "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest",
87 | "Comment": "v1.0.10",
88 | "Rev": "9ec7da8e4a0ddb21abc7137529e19fdf74f2bd61"
89 | },
90 | {
91 | "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restxml",
92 | "Comment": "v1.0.10",
93 | "Rev": "9ec7da8e4a0ddb21abc7137529e19fdf74f2bd61"
94 | },
95 | {
96 | "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
97 | "Comment": "v1.0.10",
98 | "Rev": "9ec7da8e4a0ddb21abc7137529e19fdf74f2bd61"
99 | },
100 | {
101 | "ImportPath": "github.com/aws/aws-sdk-go/private/signer/v4",
102 | "Comment": "v1.0.10",
103 | "Rev": "9ec7da8e4a0ddb21abc7137529e19fdf74f2bd61"
104 | },
105 | {
106 | "ImportPath": "github.com/aws/aws-sdk-go/private/waiter",
107 | "Comment": "v1.0.10",
108 | "Rev": "9ec7da8e4a0ddb21abc7137529e19fdf74f2bd61"
109 | },
110 | {
111 | "ImportPath": "github.com/aws/aws-sdk-go/service/s3",
112 | "Comment": "v1.0.10",
113 | "Rev": "9ec7da8e4a0ddb21abc7137529e19fdf74f2bd61"
114 | },
115 | {
116 | "ImportPath": "github.com/aws/aws-sdk-go/service/s3/s3iface",
117 | "Comment": "v1.0.10",
118 | "Rev": "9ec7da8e4a0ddb21abc7137529e19fdf74f2bd61"
119 | },
120 | {
121 | "ImportPath": "github.com/aws/aws-sdk-go/service/s3/s3manager",
122 | "Comment": "v1.0.10",
123 | "Rev": "9ec7da8e4a0ddb21abc7137529e19fdf74f2bd61"
124 | },
125 | {
126 | "ImportPath": "github.com/go-ini/ini",
127 | "Comment": "v1.8.6",
128 | "Rev": "afbd495e5aaea13597b5e14fe514ddeaa4d76fc3"
129 | },
130 | {
131 | "ImportPath": "github.com/jehiah/go-strftime",
132 | "Rev": "2efbe75097a505e2789f7e39cb9da067b5be8e3e"
133 | },
134 | {
135 | "ImportPath": "github.com/jmespath/go-jmespath",
136 | "Comment": "0.2.2-2-gc01cf91",
137 | "Rev": "c01cf91b011868172fdcd9f41838e80c9d716264"
138 | },
139 | {
140 | "ImportPath": "github.com/paulbellamy/ratecounter",
141 | "Rev": "5a11f585a31379765c190c033b6ad39956584447"
142 | },
143 | {
144 | "ImportPath": "golang.org/x/net/websocket",
145 | "Rev": "6c89489cafabcbc76df9dbf84ebf07204673fecf"
146 | },
147 | {
148 | "ImportPath": "gopkg.in/bsm/ratelimit.v1",
149 | "Rev": "f14ad9c78b155f69b480cfa41cb655259baac260"
150 | },
151 | {
152 | "ImportPath": "gopkg.in/olivere/elastic.v2",
153 | "Comment": "v2.0.26",
154 | "Rev": "a094e9ef25446294f7f79c48b39f0d004849f696"
155 | },
156 | {
157 | "ImportPath": "gopkg.in/olivere/elastic.v2/uritemplates",
158 | "Comment": "v2.0.26",
159 | "Rev": "a094e9ef25446294f7f79c48b39f0d004849f696"
160 | },
161 | {
162 | "ImportPath": "gopkg.in/redis.v3",
163 | "Comment": "v3.2.27",
164 | "Rev": "dd1ac33826064181d7abaa20d976eab31ec12b58"
165 | },
166 | {
167 | "ImportPath": "gopkg.in/redis.v3/internal/consistenthash",
168 | "Comment": "v3.2.27",
169 | "Rev": "dd1ac33826064181d7abaa20d976eab31ec12b58"
170 | },
171 | {
172 | "ImportPath": "gopkg.in/redis.v3/internal/hashtag",
173 | "Comment": "v3.2.27",
174 | "Rev": "dd1ac33826064181d7abaa20d976eab31ec12b58"
175 | },
176 | {
177 | "ImportPath": "gopkg.in/yaml.v2",
178 | "Rev": "f7716cbe52baa25d2e9b0d0da546fcf909fc16b4"
179 | }
180 | ]
181 | }
182 |
--------------------------------------------------------------------------------
/Godeps/Readme:
--------------------------------------------------------------------------------
1 | This directory tree is generated automatically by godep.
2 |
3 | Please do not edit.
4 |
5 | See https://github.com/tools/godep for more information.
6 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2016 PacketZoom, Inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in
13 | all copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 | THE SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # LogZoom - A Lumberjack => Logstash indexer in Go
2 |
3 | LogZoom is a lightweight, Lumberjack-compliant log indexer based off the fine
4 | work of Hailo's [Logslam](https://github.com/hailocab/logslam). It accepts
5 | the Lumberjack v2 protocol, which is currently supported by [Elastic's Filebeat]
6 | (https://github.com/elastic/beats).
7 |
8 | It was written with the intention of being a smaller, efficient, and more reliable
9 | replacement for logstash and Fluentd.
10 |
11 | ### What does LogZoom do?
12 |
13 | Like Logstash, LogZoom receives JSON data from Filebeat via the [Lumberjack
14 | v2](https://github.com/elastic/libbeat/issues/279) protocol and inserts the
15 | data into different outputs. For example, let's say your application generated
16 | a JSON line for every event:
17 |
18 | ```json
19 | {"@timestamp":"2016-03-31T22:23:14+0000", "url": "http://www.google.com"}
20 | {"@timestamp":"2016-03-31T22:25:14+0000", "url": "http://www.bing.com"}
21 | {"@timestamp":"2016-03-31T22:26:14+0000", "url": "http://www.yahoo.com"}
22 | ```
23 |
24 | As the diagram shows, you can then run a single process of LogZoom to
25 | receive this data and insert to Elasticsearch, S3, etc:
26 |
27 |

28 |
29 | Unlike Logstash, however, LogZoom does not attempt to manipulate data in
30 | any shape or form. JSON data that arrives from Filebeat is directly sent to
31 | outputs as-is.
32 |
33 | Many users commonly use Logstash by adding a grok filter, ["currently the best
34 | way in logstash to parse crappy unstructured log
35 | data."](https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html)
36 | LogZoom currently does NOT support this use case; it is designed for
37 | software applications that generate structured data directly.
38 |
39 | For example, if you are trying to use Kibana, a frontend to Elasticsearch, you
40 | may need the `@timestamp` field, which Logstash typically inserts for
41 | you. With LogZoom, your application must generate this field in each JSON
42 | log line. The advantages of using this approach:
43 |
44 | 1. LogZoom doesn't have to decode the JSON, insert a new field, and encode
45 | the JSON again. Logstash and Fluentd spend a fair amount of CPU
46 | time doing this.
47 |
48 | 2. The application explicitly defines the semantics of `@timestamp`. When we
49 | used Logstash, we were confused that each record was stamped when the entry
50 | was received by the central Logstash process, not when it was generated by
51 | the client. This caused great confusion, as we would often see large gaps
52 | in data when the data was just marked with the wrong timestamp.
53 |
54 | ## Supported IO
55 |
56 | ### Inputs
57 |
58 | - Filebeat (Lumberjack V2 Protocol)
59 | - Redis Message Queue
60 |
61 | ### Outputs
62 |
63 | - Redis Message Queue
64 | - TCP Streaming
65 | - WebSocket Streaming
66 | - Elasticsearch
67 | - S3
68 |
69 | ## Getting Started
70 |
71 | ### 1. Create config
72 |
73 | Create a YAML config file specifying the desired input and outputs. An example
74 | config can be found in examples/example.config.yml:
75 |
76 | ```yaml
77 | inputs:
78 | filebeat:
79 | host: 0.0.0.0:7200
80 | ssl_crt: /etc/filebeat/filebeat.crt
81 | ssl_key: /etc/filebeat/filebeat.key
82 | outputs:
83 | tcp:
84 | host: :7201
85 | websocket:
86 | host: :7202
87 | elasticsearch:
88 | hosts:
89 | - http://localhost:9200
90 | ``````
91 |
92 | ### 2. Run the server
93 |
94 | ```
95 | $ go build
96 | $ $GOPATH/bin/logzoom -config=examples/example.config.yml
97 | 2016/04/07 20:22:50 Starting server
98 | 2016/04/07 20:22:50 Starting buffer
99 | 2016/04/07 20:22:50 Starting input filebeat
100 | 2016/04/07 20:22:50 Starting output tcp
101 | 2016/04/07 20:22:50 Starting output websocket
102 | 2016/04/07 20:22:50 Starting output elasticsearch
103 | 2016/04/07 20:22:50 Setting HTTP timeout to 1m0s
104 | 2016/04/07 20:22:50 Setting GZIP enabled: false
105 | 2016/04/07 20:22:50 Connected to Elasticsearch
106 | ```
107 |
108 | ### Streaming logs via TCP
109 |
110 | ```
111 | nc localhost 7201
112 | ```
113 |
114 | ### Streaming logs via WebSocket
115 |
116 | ```
117 | Connect to http://localhost:7202 in a browser.
118 | A list of known sources will be displayed.
119 | ```
120 |
121 | ### Elasticsearch support
122 |
123 | Note that currently only Elasticsearch 1.x is supported. If you need 2.x
124 | support, I think it is just a matter of updating LogZoom to use [Olliver
125 | Eilhard's 3.x client](https://github.com/olivere/elastic#releases).
126 |
--------------------------------------------------------------------------------
/buffer/buffer.go:
--------------------------------------------------------------------------------
1 | package buffer
2 |
3 | import (
4 | "log"
5 | "time"
6 | )
7 |
8 | const (
9 | bufSize = 100
10 | )
11 |
12 | type Sender interface {
13 | AddSubscriber(string, chan *Event) error
14 | DelSubscriber(string) error
15 | }
16 |
17 | // Taken from https://github.com/elasticsearch/logstash-forwarder/blob/master/event.go
18 | type Event struct {
19 | Source string `json:"source,omitempty"`
20 | Offset int64 `json:"offset,omitempty"`
21 | Line uint64 `json:"line,omitempty"`
22 | Text *string `json:"text,omitempty"`
23 | Fields *map[string]interface{}
24 | }
25 |
26 | // subscriber is some host that wants to receive events
27 | type subscriber struct {
28 | Name string
29 | Send chan *Event
30 | }
31 |
32 | type Buffer struct {
33 | send chan *Event
34 | subscribers map[string]*subscriber
35 | add chan *subscriber
36 | del chan string
37 | term chan bool
38 | ticker *time.Ticker
39 | }
40 |
41 | func New() *Buffer {
42 | return &Buffer{
43 | ticker: time.NewTicker(time.Duration(10) * time.Millisecond),
44 | send: make(chan *Event, bufSize),
45 | subscribers: make(map[string]*subscriber),
46 | add: make(chan *subscriber, 1),
47 | del: make(chan string, 1),
48 | term: make(chan bool, 1),
49 | }
50 | }
51 |
52 | func (b *Buffer) AddSubscriber(name string, ch chan *Event) error {
53 | b.add <- &subscriber{name, ch}
54 | return nil
55 | }
56 |
57 | func (b *Buffer) DelSubscriber(name string) error {
58 | b.del <- name
59 | return nil
60 | }
61 |
62 | func (b *Buffer) Publish(event *Event) {
63 | for _, sub := range b.subscribers {
64 | select {
65 | case sub.Send <- event:
66 | }
67 | }
68 | }
69 |
70 | func (b *Buffer) Send(event *Event) {
71 | b.send <- event
72 | }
73 |
74 | func (b *Buffer) Start() {
75 | for {
76 | select {
77 | case e := <-b.send:
78 | b.Publish(e)
79 | case s := <-b.add:
80 | if _, ok := b.subscribers[s.Name]; ok {
81 | log.Printf("A subscriber is already registered for %s\n", s.Name)
82 | continue
83 | }
84 | b.subscribers[s.Name] = s
85 | case h := <-b.del:
86 | delete(b.subscribers, h)
87 | case <-b.term:
88 | log.Println("Received on term chan")
89 | break
90 | case <-b.ticker.C:
91 | }
92 | }
93 | }
94 | func (b *Buffer) Stop() error {
95 | b.term <- true
96 | return nil
97 | }
98 |
--------------------------------------------------------------------------------
/examples/example.config.yml:
--------------------------------------------------------------------------------
1 | ---
2 | inputs:
3 | - all_filebeat:
4 | filebeat:
5 | host: 0.0.0.0:5000
6 | ssl_key: "/etc/filebeat/filebeat.key"
7 | ssl_crt: "/etc/filebeat/filebeat.crt"
8 |
9 | outputs:
10 | - tcp1:
11 | tcp:
12 | host: "127.0.0.1:6000"
13 | - ws1:
14 | websocket:
15 | host: 0.0.0.0:7200
16 | - es:
17 | elasticsearch:
18 | hosts:
19 | - http://localhost:9200
20 | index: "logstash"
21 | index_type: "type1"
22 | gzip_enabled: false
23 | info_log_enabled: false
24 | error_log_enabled: false
25 |
--------------------------------------------------------------------------------
/examples/example.elasticsearch-output.yml:
--------------------------------------------------------------------------------
1 | ---
2 | inputs:
3 | - redis_type1:
4 | redis:
5 | db: 10
6 | host: localhost
7 | input_queue: type1_elasticsearch
8 | port: 6379
9 | - redis_type2:
10 | redis:
11 | db: 10
12 | host: localhost
13 | input_queue: type2_elasticsearch
14 | port: 6379
15 |
16 | outputs:
17 | - type1_es:
18 | elasticsearch:
19 | hosts:
20 | - http://localhost:9200
21 | index: "logstash"
22 | index_type: "type1"
23 | gzip_enabled: false
24 | info_log_enabled: true
25 | error_log_enabled: true
26 | - type2_es:
27 | elasticsearch:
28 | hosts:
29 | - http://localhost:9200
30 | index: "logstash"
31 | index_type: "type2"
32 | gzip_enabled: false
33 | info_log_enabled: true
34 | error_log_enabled: true
35 |
36 | routes:
37 | - route1:
38 | input: redis_type1
39 | output: type1_es
40 | - route2:
41 | input: redis_type2
42 | output: type2_es
43 |
--------------------------------------------------------------------------------
/examples/example.filebeat-to-redis.yml:
--------------------------------------------------------------------------------
1 | ---
2 | inputs:
3 | - all_filebeat:
4 | filebeat:
5 | host: 0.0.0.0:5000
6 | ssl_crt: /etc/filebeat/filebeat.crt
7 | ssl_key: /etc/filebeat/filebeat.key
8 | outputs:
9 | - type1_redis:
10 | redis:
11 | db: 10
12 | host: localhost
13 | copy_queues: ["log_type1_elasticsearch", "log_type1_s3"]
14 | port: 6379
15 | - type2_redis:
16 | redis:
17 | db: 10
18 | host: localhost
19 | copy_queues: ["log_type2_elasticsearch", "log_type2_s3"]
20 | port: 6379
21 | routes:
22 | - route1:
23 | input: all_filebeat
24 | rules:
25 | log_type: "log_type1"
26 | output: log_type1_redis
27 | - route2:
28 | input: all_filebeat
29 | rules:
30 | log_type: "log_type2"
31 | output: log_type2_redis
32 |
--------------------------------------------------------------------------------
/examples/example.s3-output.yml:
--------------------------------------------------------------------------------
1 | ---
2 | inputs:
3 | - redis_type1:
4 | redis:
5 | db: 10
6 | host: localhost
7 | input_queue: type1_s3
8 | port: 6379
9 | - redis_type2:
10 | redis:
11 | db: 10
12 | host: localhost
13 | input_queue: type2_s3
14 | port: 6379
15 |
16 | outputs:
17 | - s3_type1:
18 | s3:
19 | aws_key_id_loc: < Your AWS Key ID File Loc >
20 | aws_sec_key_loc: < Your AWS Secret Access Key Loc >
21 | aws_s3_bucket: < Your AWS Bucket >
22 | aws_s3_region: < Your AWS Bucket Region >
23 | local_path: "local-path-to-dump-temp-file"
24 | s3_path: "path-in-bucket-to-put-the-file"
25 | time_slice_format: "%Y-%m-%d/%H%M"
26 | aws_s3_output_key: "%{path}/%{timeSlice}/%{hostname}_%{uuid}.gz"
27 | - s3_type2:
28 | s3:
29 | aws_key_id_loc: < Your AWS Key ID File Loc >
30 | aws_sec_key_loc: < Your AWS Secret Access Key Loc >
31 | aws_s3_bucket: < Your AWS Bucket >
32 | aws_s3_region: < Your AWS Bucket Region >
33 | local_path: "local-path-to-dump-temp-file"
34 | s3_path: "path-in-bucket-to-put-the-file"
35 | time_slice_format: "%Y-%m-%d/%H%M"
36 | aws_s3_output_key: "%{path}/%{timeSlice}/%{hostname}_%{uuid}.gz"
37 |
38 | routes:
39 | - route1:
40 | input: redis_type1
41 | output: s3_type1
42 | - route2:
43 | input: redis_type2
44 | output: s3_type2
45 |
--------------------------------------------------------------------------------
/images/logzoom.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/packetzoom/logzoom/f627f96f0989c3645c8f72257071d15fcdd76d18/images/logzoom.png
--------------------------------------------------------------------------------
/input/filebeat/filebeat.go:
--------------------------------------------------------------------------------
1 | package filebeat
2 |
3 | import (
4 | "github.com/packetzoom/logzoom/input"
5 | )
6 |
7 | func init() {
8 | input.Register("filebeat", New)
9 | }
10 |
--------------------------------------------------------------------------------
/input/filebeat/lumberjack.go:
--------------------------------------------------------------------------------
1 | package filebeat
2 |
3 | import (
4 | "crypto/tls"
5 | "fmt"
6 | "gopkg.in/yaml.v2"
7 | "log"
8 | "net"
9 |
10 | "github.com/packetzoom/logzoom/input"
11 | )
12 |
13 | type Config struct {
14 | Host string `yaml:"host"`
15 | SSLCrt string `yaml:"ssl_crt"`
16 | SSLKey string `yaml:"ssl_key"`
17 | SampleSize *int `yaml:"sample_size,omitempty"`
18 | }
19 |
20 | type LJServer struct {
21 | name string
22 | Config *Config
23 | r input.Receiver
24 | term chan bool
25 | }
26 |
27 | func New() input.Input {
28 | return &LJServer{term: make(chan bool, 1)}
29 | }
30 |
31 | // lumberConn handles an incoming connection from a lumberjack client
32 | func lumberConn(c net.Conn, r input.Receiver, sampleSize int) {
33 | defer c.Close()
34 | log.Printf("[%s] accepting lumberjack connection", c.RemoteAddr().String())
35 | NewParser(c, r, sampleSize).Parse()
36 | log.Printf("[%s] closing lumberjack connection", c.RemoteAddr().String())
37 | }
38 |
39 | func (lj *LJServer) Init(name string, config yaml.MapSlice, r input.Receiver) error {
40 | var ljConfig *Config
41 |
42 | // go-yaml doesn't have a great way to partially unmarshal YAML data
43 | // See https://github.com/go-yaml/yaml/issues/13
44 | yamlConfig, _ := yaml.Marshal(config)
45 |
46 | if err := yaml.Unmarshal(yamlConfig, &ljConfig); err != nil {
47 | return fmt.Errorf("Error parsing lumberjack config: %v", err)
48 | }
49 |
50 | lj.name = name
51 | lj.Config = ljConfig
52 | lj.r = r
53 |
54 | return nil
55 | }
56 |
57 | func (lj *LJServer) Start() error {
58 | cert, err := tls.LoadX509KeyPair(lj.Config.SSLCrt, lj.Config.SSLKey)
59 | if err != nil {
60 | return fmt.Errorf("Error loading keys: %v", err)
61 | }
62 |
63 | if lj.Config.SampleSize == nil {
64 | i := 100
65 | lj.Config.SampleSize = &i
66 | }
67 | log.Printf("[%s] Setting Sample Size to %d%%", lj.name, *lj.Config.SampleSize)
68 |
69 | conn, err := net.Listen("tcp", lj.Config.Host)
70 | if err != nil {
71 | return fmt.Errorf("Listener failed: %v", err)
72 | }
73 |
74 | config := tls.Config{Certificates: []tls.Certificate{cert}}
75 |
76 | ln := tls.NewListener(conn, &config)
77 |
78 | log.Printf("[%s] Started Lumberjack Instance", lj.name)
79 | for {
80 | select {
81 | case <-lj.term:
82 | log.Println("Lumberjack server received term signal")
83 | return nil
84 | default:
85 | conn, err := ln.Accept()
86 | if err != nil {
87 | log.Printf("Error accepting connection: %v", err)
88 | continue
89 | }
90 | go lumberConn(conn, lj.r, *lj.Config.SampleSize)
91 | }
92 | }
93 |
94 | return nil
95 | }
96 |
97 | func (lj *LJServer) Stop() error {
98 | lj.term <- true
99 | return nil
100 | }
101 |
--------------------------------------------------------------------------------
/input/filebeat/parser.go:
--------------------------------------------------------------------------------
1 | package filebeat
2 |
3 | import (
4 | "bytes"
5 | "compress/zlib"
6 | "encoding/binary"
7 | "encoding/json"
8 | "fmt"
9 | "io"
10 | "log"
11 | "net"
12 | "strconv"
13 | "strings"
14 | "time"
15 |
16 | "github.com/packetzoom/logzoom/buffer"
17 | "github.com/packetzoom/logzoom/input"
18 | "github.com/packetzoom/logzoom/server"
19 | )
20 |
21 | const (
22 | ack = "2A"
23 | maxKeyLen = 100 * 1024 * 1024 // 100 mb
24 | maxValueLen = 250 * 1024 * 1024 // 250 mb
25 | )
26 |
27 | type Parser struct {
28 | Conn net.Conn
29 | Recv input.Receiver
30 | wlen, plen uint32
31 | buffer io.Reader
32 | SampleSize int
33 | }
34 |
35 | func NewParser(c net.Conn, r input.Receiver, sampleSize int) *Parser {
36 | return &Parser{
37 | Conn: c,
38 | Recv: r,
39 | SampleSize: sampleSize,
40 | }
41 | }
42 |
43 | // ack acknowledges that the payload was received successfully
44 | func (p *Parser) ack(seq uint32) error {
45 | buffer := bytes.NewBuffer([]byte(ack))
46 | binary.Write(buffer, binary.BigEndian, seq)
47 | //log.Printf("Sending ACK with seq %d", seq)
48 |
49 | if _, err := p.Conn.Write(buffer.Bytes()); err != nil {
50 | return err
51 | }
52 |
53 | return nil
54 | }
55 |
56 | // readKV parses key value pairs from within the payload
57 | func (p *Parser) readKV() ([]byte, []byte, error) {
58 | var klen, vlen uint32
59 |
60 | // Read key len
61 | binary.Read(p.buffer, binary.BigEndian, &klen)
62 |
63 | if klen > maxKeyLen {
64 | return nil, nil, fmt.Errorf("key exceeds max len %d, got %d bytes", maxKeyLen, klen)
65 | }
66 |
67 | // Read key
68 | key := make([]byte, klen)
69 | _, err := p.buffer.Read(key)
70 | if err != nil {
71 | return nil, nil, err
72 | }
73 |
74 | // Read value len
75 | binary.Read(p.buffer, binary.BigEndian, &vlen)
76 | if vlen > maxValueLen {
77 | return nil, nil, fmt.Errorf("value exceeds max len %d, got %d bytes", maxValueLen, vlen)
78 | }
79 |
80 | // Read value
81 | value := make([]byte, vlen)
82 | _, err = p.buffer.Read(value)
83 | if err != nil {
84 | return nil, nil, err
85 | }
86 |
87 | return key, value, nil
88 | }
89 |
90 | // read parses the compressed data frame
91 | func (p *Parser) read() (uint32, error) {
92 | var seq, count uint32
93 | var k, v []byte
94 | var err error
95 |
96 | r, err := zlib.NewReader(p.Conn)
97 | if err != nil {
98 | return seq, err
99 | }
100 | defer r.Close()
101 |
102 | // Decompress
103 | buff := new(bytes.Buffer)
104 | io.Copy(buff, r)
105 | p.buffer = buff
106 |
107 | b := make([]byte, 2)
108 | for i := uint32(0); i < p.wlen; i++ {
109 | n, err := buff.Read(b)
110 | if err == io.EOF {
111 | return seq, err
112 | }
113 |
114 | if n == 0 {
115 | continue
116 | }
117 |
118 | switch string(b) {
119 | case "2D": // window size
120 | binary.Read(buff, binary.BigEndian, &seq)
121 | binary.Read(buff, binary.BigEndian, &count)
122 |
123 | var ev buffer.Event
124 | fields := make(map[string]interface{})
125 | fields["timestamp"] = time.Now().Format(time.RFC3339Nano)
126 |
127 | for j := uint32(0); j < count; j++ {
128 | if k, v, err = p.readKV(); err != nil {
129 | return seq, err
130 | }
131 | fields[string(k)] = string(v)
132 | }
133 |
134 | ev.Source = fmt.Sprintf("lumberjack://%s%s", fields["host"], fields["file"])
135 | ev.Offset, _ = strconv.ParseInt(fields["offset"].(string), 10, 64)
136 | ev.Line = uint64(seq)
137 | t := fields["line"].(string)
138 | ev.Text = &t
139 | ev.Fields = &fields
140 |
141 | // Send to the receiver which is a buffer. We block because...
142 | if server.RandInt(0, 100) < p.SampleSize {
143 | p.Recv.Send(&ev)
144 | }
145 |
146 | case "2J": // JSON
147 | //log.Printf("Got JSON data")
148 | binary.Read(buff, binary.BigEndian, &seq)
149 | binary.Read(buff, binary.BigEndian, &count)
150 | jsonData := make([]byte, count)
151 | _, err := p.buffer.Read(jsonData)
152 | //log.Printf("Got message: %s", jsonData)
153 |
154 | if err != nil {
155 | return seq, err
156 | }
157 |
158 | var ev buffer.Event
159 | var fields map[string]interface{}
160 | decoder := json.NewDecoder(strings.NewReader(string(jsonData)))
161 | decoder.UseNumber()
162 | err = decoder.Decode(&fields)
163 |
164 | if err != nil {
165 | return seq, err
166 | }
167 | ev.Source = fmt.Sprintf("lumberjack://%s%s", fields["host"], fields["file"])
168 | jsonNumber := fields["offset"].(json.Number)
169 | ev.Offset, _ = jsonNumber.Int64()
170 | ev.Line = uint64(seq)
171 | t := fields["message"].(string)
172 | ev.Text = &t
173 | ev.Fields = &fields
174 |
175 | // Send to the receiver which is a buffer. We block because...
176 | if server.RandInt(0, 100) < p.SampleSize {
177 | p.Recv.Send(&ev)
178 | }
179 |
180 | default:
181 | return seq, fmt.Errorf("unknown type: %s", b)
182 | }
183 | }
184 |
185 | return seq, nil
186 | }
187 |
188 | // Parse initialises the read loop and begins parsing the incoming request
189 | func (p *Parser) Parse() {
190 | b := make([]byte, 2)
191 |
192 | Read:
193 | for {
194 | n, err := p.Conn.Read(b)
195 |
196 | if err != nil || n == 0 {
197 | log.Printf("[%s] error reading %v", p.Conn.RemoteAddr().String(), err)
198 | break Read
199 | }
200 |
201 | switch string(b) {
202 | case "2W": // window length
203 | binary.Read(p.Conn, binary.BigEndian, &p.wlen)
204 | case "2C": // frame length
205 | binary.Read(p.Conn, binary.BigEndian, &p.plen)
206 | var seq uint32
207 | seq, err := p.read()
208 |
209 | if err != nil {
210 | log.Printf("[%s] error parsing %v", p.Conn.RemoteAddr().String(), err)
211 | break Read
212 | }
213 |
214 | if err := p.ack(seq); err != nil {
215 | log.Printf("[%s] error acking %v", p.Conn.RemoteAddr().String(), err)
216 | break Read
217 | }
218 | default:
219 | // This really shouldn't happen
220 | log.Printf("[%s] Received unknown type (%s): %s", p.Conn.RemoteAddr().String(), b, err)
221 | break Read
222 | }
223 | }
224 | }
225 |
--------------------------------------------------------------------------------
/input/input.go:
--------------------------------------------------------------------------------
1 | package input
2 |
3 | import (
4 | "fmt"
5 | "gopkg.in/yaml.v2"
6 |
7 | "github.com/packetzoom/logzoom/buffer"
8 | )
9 |
10 | type Receiver interface {
11 | Send(*buffer.Event)
12 | }
13 |
14 | type Input interface {
15 | Init(string, yaml.MapSlice, Receiver) error
16 | Start() error
17 | Stop() error
18 | }
19 |
20 | var (
21 | inputs = make(map[string]func()Input)
22 | )
23 |
24 | func Register(name string, constructor func()Input) error {
25 | if _, ok := inputs[name]; ok {
26 | return fmt.Errorf("Input %s already exists", name)
27 | }
28 | inputs[name] = constructor
29 | return nil
30 | }
31 |
32 | func Load(name string) (Input, error) {
33 | constructor, ok := inputs[name]
34 | if !ok {
35 | return nil, fmt.Errorf("Constructor %s not found", name)
36 | }
37 | return constructor(), nil
38 | }
39 |
--------------------------------------------------------------------------------
/input/redis/redis.go:
--------------------------------------------------------------------------------
1 | package redis
2 |
3 | import (
4 | "encoding/json"
5 | "errors"
6 | "fmt"
7 | "log"
8 | "strconv"
9 | "strings"
10 | "time"
11 |
12 | "github.com/adjust/redismq"
13 | "github.com/packetzoom/logzoom/buffer"
14 | "github.com/packetzoom/logzoom/input"
15 | "github.com/packetzoom/logzoom/server"
16 | "github.com/paulbellamy/ratecounter"
17 | "gopkg.in/yaml.v2"
18 | )
19 |
20 | const (
21 | recvBuffer = 10000
22 | )
23 |
24 | type Config struct {
25 | Host string `yaml:"host"`
26 | Port int `yaml:"port"`
27 | Db int64 `yaml:"db"`
28 | Password string `yaml:"password"`
29 | InputQueue string `yaml:"input_queue"`
30 | JsonDecode bool `yaml:"json_decode"`
31 | SampleSize *int `yaml:"sample_size,omitempty"`
32 | }
33 |
34 | type RedisInputServer struct {
35 | name string
36 | config Config
37 | receiver input.Receiver
38 | term chan bool
39 | }
40 |
41 | func init() {
42 | input.Register("redis", New)
43 | }
44 |
45 | func New() input.Input {
46 | return &RedisInputServer{term: make(chan bool, 1)}
47 | }
48 |
49 | func redisGet(redisServer *RedisInputServer, consumer *redismq.Consumer) error {
50 | consumer.ResetWorking()
51 | rateCounter := ratecounter.NewRateCounter(1 * time.Second)
52 |
53 | for {
54 | unacked := consumer.GetUnackedLength()
55 |
56 | if unacked > 0 {
57 | log.Printf("Requeued %d messages\n", unacked)
58 | consumer.RequeueWorking()
59 | }
60 |
61 | packages, err := consumer.MultiGet(recvBuffer)
62 |
63 | if err == nil {
64 | numPackages := len(packages)
65 |
66 | if numPackages > 0 {
67 | rateCounter.Incr(int64(numPackages))
68 | err = packages[numPackages-1].MultiAck()
69 |
70 | if err != nil {
71 | log.Println("Failed to ack", err)
72 | }
73 | }
74 |
75 | for i := range packages {
76 | var ev buffer.Event
77 | payload := string(packages[i].Payload)
78 | ev.Text = &payload
79 |
80 | if redisServer.config.JsonDecode {
81 | decoder := json.NewDecoder(strings.NewReader(payload))
82 | decoder.UseNumber()
83 |
84 | err = decoder.Decode(&ev.Fields)
85 |
86 | if err != nil {
87 | continue
88 | }
89 | }
90 |
91 | if server.RandInt(0, 100) < *redisServer.config.SampleSize {
92 | redisServer.receiver.Send(&ev)
93 | }
94 | }
95 | } else {
96 | log.Printf("Error reading from Redis: %s, sleeping", err)
97 | time.Sleep(2 * time.Second)
98 | }
99 | }
100 |
101 | return nil
102 | }
103 |
104 | func (redisServer *RedisInputServer) ValidateConfig(config *Config) error {
105 | if len(config.Host) == 0 {
106 | return errors.New("Missing Redis host")
107 | }
108 |
109 | if config.Port <= 0 {
110 | return errors.New("Missing Redis port")
111 | }
112 |
113 | if len(config.InputQueue) == 0 {
114 | return errors.New("Missing Redis input queue name")
115 | }
116 |
117 | if redisServer.config.SampleSize == nil {
118 | i := 100
119 | redisServer.config.SampleSize = &i
120 | }
121 | log.Printf("[%s] Setting Sample Size to %d", redisServer.name, *redisServer.config.SampleSize)
122 |
123 | return nil
124 | }
125 |
126 | func (redisServer *RedisInputServer) Init(name string, config yaml.MapSlice, receiver input.Receiver) error {
127 | var redisConfig *Config
128 |
129 | // go-yaml doesn't have a great way to partially unmarshal YAML data
130 | // See https://github.com/go-yaml/yaml/issues/13
131 | yamlConfig, _ := yaml.Marshal(config)
132 |
133 | if err := yaml.Unmarshal(yamlConfig, &redisConfig); err != nil {
134 | return fmt.Errorf("Error parsing Redis config: %v", err)
135 | }
136 |
137 | redisServer.name = name
138 | redisServer.config = *redisConfig
139 | redisServer.receiver = receiver
140 |
141 | if err := redisServer.ValidateConfig(redisConfig); err != nil {
142 | return fmt.Errorf("Error in config: %v", err)
143 | }
144 |
145 | return nil
146 | }
147 |
148 | func (redisServer *RedisInputServer) Start() error {
149 | log.Printf("Starting Redis input on input queue: %s, working queue: %s",
150 | redisServer.config.InputQueue,
151 | redisServer.config.InputQueue + "_working")
152 |
153 | port := strconv.Itoa(redisServer.config.Port)
154 |
155 | // Create Redis queue
156 | queue := redismq.CreateQueue(redisServer.config.Host,
157 | port,
158 | redisServer.config.Password,
159 | redisServer.config.Db,
160 | redisServer.config.InputQueue)
161 |
162 | consumer, err := queue.AddConsumer(redisServer.config.InputQueue + "_working")
163 |
164 | if err != nil {
165 | log.Println("Error opening Redis input")
166 | return err
167 | }
168 |
169 | go redisGet(redisServer, consumer)
170 |
171 | for {
172 | select {
173 | case <-redisServer.term:
174 | log.Println("Redis input server received term signal")
175 | return nil
176 | }
177 | }
178 |
179 | return nil
180 | }
181 |
182 | func (redisServer *RedisInputServer) Stop() error {
183 | redisServer.term <- true
184 | return nil
185 | }
186 |
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "flag"
5 | "fmt"
6 | "log"
7 | "os"
8 |
9 | _ "github.com/packetzoom/logzoom/input/filebeat"
10 | _ "github.com/packetzoom/logzoom/input/redis"
11 | _ "github.com/packetzoom/logzoom/output/elasticsearch"
12 | _ "github.com/packetzoom/logzoom/output/redis"
13 | _ "github.com/packetzoom/logzoom/output/s3"
14 | _ "github.com/packetzoom/logzoom/output/tcp"
15 | _ "github.com/packetzoom/logzoom/output/websocket"
16 | "github.com/packetzoom/logzoom/server"
17 | "net/http"
18 | _ "net/http/pprof"
19 | "runtime"
20 | "runtime/pprof"
21 | )
22 |
23 | var (
24 | config string
25 | memprofile, cpuprofile, httpprof *string
26 | )
27 |
28 | func init() {
29 | memprofile = flag.String("memprofile", "", "Write memory profile to this file")
30 | cpuprofile = flag.String("cpuprofile", "", "Write cpu profile to file")
31 | httpprof = flag.String("httpprof", "", "Start pprof http server")
32 | flag.StringVar(&config, "config", "", "Path to the config file")
33 | flag.Parse()
34 |
35 | if len(config) == 0 {
36 | fmt.Fprintln(os.Stderr, "Require a config file")
37 | flag.PrintDefaults()
38 | os.Exit(1)
39 | }
40 | }
41 |
42 | func writeHeapProfile(filename string) {
43 | f, err := os.Create(filename)
44 | if err != nil {
45 | log.Printf("Failed creating file %s: %s\n", filename, err)
46 | return
47 | }
48 | pprof.WriteHeapProfile(f)
49 | f.Close()
50 |
51 | log.Printf("Created memory profile file %s.\n", filename)
52 | }
53 |
54 | func Cleanup() {
55 | if *cpuprofile != "" {
56 | pprof.StopCPUProfile()
57 | }
58 |
59 | if *memprofile != "" {
60 | runtime.GC()
61 |
62 | writeHeapProfile(*memprofile)
63 |
64 | debugMemStats()
65 | }
66 | }
67 |
68 | func debugMemStats() {
69 | var m runtime.MemStats
70 | runtime.ReadMemStats(&m)
71 | log.Printf("Memory stats: In use: %d Total (even if freed): %d System: %d\n",
72 | m.Alloc, m.TotalAlloc, m.Sys)
73 | }
74 |
75 | func BeforeRun() {
76 | if *cpuprofile != "" {
77 | f, err := os.Create(*cpuprofile)
78 | if err != nil {
79 | log.Fatal(err)
80 | }
81 | pprof.StartCPUProfile(f)
82 | }
83 |
84 | if *httpprof != "" {
85 | go func() {
86 | log.Println("start pprof endpoint")
87 | log.Printf("finished pprof endpoint: %v\n", http.ListenAndServe(*httpprof, nil))
88 | }()
89 | }
90 | }
91 |
92 | func main() {
93 | defer Cleanup()
94 |
95 | BeforeRun()
96 |
97 | srv, err := server.New(config)
98 | if err != nil {
99 | fmt.Fprintln(os.Stderr, err.Error())
100 | os.Exit(1)
101 | }
102 |
103 | srv.Start()
104 | }
105 |
--------------------------------------------------------------------------------
/output/elasticsearch/elasticsearch.go:
--------------------------------------------------------------------------------
1 | package elasticsearch
2 |
3 | import (
4 | "encoding/json"
5 | "errors"
6 | "fmt"
7 | "log"
8 | "net/http"
9 | "os"
10 | "time"
11 | "golang.org/x/net/context"
12 |
13 | "github.com/packetzoom/logzoom/buffer"
14 | "github.com/packetzoom/logzoom/output"
15 | "github.com/packetzoom/logzoom/route"
16 | "github.com/packetzoom/logzoom/server"
17 | "github.com/paulbellamy/ratecounter"
18 | "gopkg.in/olivere/elastic.v5"
19 | "gopkg.in/yaml.v2"
20 | )
21 |
22 | const (
23 | defaultHost = "127.0.0.1"
24 | defaultIndexPrefix = "logstash"
25 | esFlushInterval = 10
26 | esRecvBuffer = 10000
27 | esSendBuffer = 10000
28 | esWorker = 20
29 | esBulkLimit = 10000
30 | )
31 |
32 | type Indexer struct {
33 | bulkProcessor *elastic.BulkProcessor
34 | indexPrefix string
35 | indexType string
36 | RateCounter *ratecounter.RateCounter
37 | lastDisplayUpdate time.Time
38 | }
39 |
40 | type Config struct {
41 | Hosts []string `yaml:"hosts"`
42 | IndexPrefix string `yaml:"index"`
43 | IndexType string `yaml:"index_type"`
44 | Timeout int `yaml:"timeout"`
45 | GzipEnabled bool `yaml:"gzip_enabled"`
46 | InfoLogEnabled bool `yaml:"info_log_enabled"`
47 | ErrorLogEnabled bool `yaml:"error_log_enabled"`
48 | SampleSize *int `yaml:"sample_size,omitempty"`
49 | }
50 |
51 | type ESServer struct {
52 | name string
53 | fields map[string]string
54 | config Config
55 | host string
56 | hosts []string
57 | b buffer.Sender
58 | term chan bool
59 | idx *Indexer
60 | }
61 |
62 | func init() {
63 | output.Register("elasticsearch", New)
64 | }
65 |
66 | func New() (output.Output) {
67 | return &ESServer{
68 | host: fmt.Sprintf("%s:%d", defaultHost, time.Now().Unix()),
69 | term: make(chan bool, 1),
70 | }
71 | }
72 |
73 | // Dummy discard, satisfies io.Writer without importing io or os.
74 | type DevNull struct{}
75 |
76 | func (DevNull) Write(p []byte) (int, error) {
77 | return len(p), nil
78 | }
79 |
80 | func indexName(idx string) string {
81 | if len(idx) == 0 {
82 | idx = defaultIndexPrefix
83 | }
84 |
85 | return fmt.Sprintf("%s-%s", idx, time.Now().Format("2006.01.02"))
86 | }
87 |
88 | func (i *Indexer) index(ev *buffer.Event) error {
89 | doc := *ev.Text
90 | idx := indexName(i.indexPrefix)
91 | typ := i.indexType
92 |
93 | request := elastic.NewBulkIndexRequest().Index(idx).Type(typ).Doc(doc)
94 | i.bulkProcessor.Add(request)
95 | i.RateCounter.Incr(1)
96 |
97 | return nil
98 | }
99 |
100 | func (e *ESServer) ValidateConfig(config *Config) error {
101 | if len(config.Hosts) == 0 {
102 | return errors.New("Missing hosts")
103 | }
104 |
105 | if len(config.IndexPrefix) == 0 {
106 | return errors.New("Missing index prefix (e.g. logstash)")
107 | }
108 |
109 | if len(config.IndexType) == 0 {
110 | return errors.New("Missing index type (e.g. logstash)")
111 | }
112 |
113 | if e.config.SampleSize == nil {
114 | i := 100
115 | e.config.SampleSize = &i
116 | }
117 | log.Printf("[%s] Setting Sample Size to %d", e.name, *e.config.SampleSize)
118 |
119 | return nil
120 | }
121 |
122 | func (e *ESServer) Init(name string, config yaml.MapSlice, b buffer.Sender, route route.Route) error {
123 | var esConfig *Config
124 |
125 | // go-yaml doesn't have a great way to partially unmarshal YAML data
126 | // See https://github.com/go-yaml/yaml/issues/13
127 | yamlConfig, _ := yaml.Marshal(config)
128 |
129 | if err := yaml.Unmarshal(yamlConfig, &esConfig); err != nil {
130 | return fmt.Errorf("Error parsing elasticsearch config: %v", err)
131 | }
132 |
133 | e.name = name
134 | e.fields = route.Fields
135 | e.config = *esConfig
136 | e.hosts = esConfig.Hosts
137 | e.b = b
138 |
139 | if err := e.ValidateConfig(esConfig); err != nil {
140 | return fmt.Errorf("Error in config: %v", err)
141 | }
142 |
143 | return nil
144 | }
145 |
146 | func readInputChannel(sampleSize int, idx *Indexer, receiveChan chan *buffer.Event) {
147 | select {
148 | case ev := <-receiveChan:
149 | if (server.RandInt(0, 100) < sampleSize) {
150 | idx.index(ev)
151 | }
152 | }
153 | }
154 |
155 | func (es *ESServer) insertIndexTemplate(client *elastic.Client) error {
156 | var template map[string]interface{}
157 | err := json.Unmarshal([]byte(IndexTemplate), &template)
158 |
159 | if err != nil {
160 | return err
161 | }
162 |
163 | template["template"] = es.config.IndexPrefix + "-*"
164 |
165 | inserter := elastic.NewIndicesPutTemplateService(client)
166 | inserter.Name(es.config.IndexPrefix)
167 | inserter.Create(true)
168 | inserter.BodyJson(template)
169 |
170 | response, err := inserter.Do(context.Background())
171 |
172 | if response != nil {
173 | log.Println("Inserted template response:", response.Acknowledged)
174 | }
175 |
176 | return err
177 | }
178 |
179 | func (es *ESServer) afterCommit(id int64, requests []elastic.BulkableRequest, response *elastic.BulkResponse, err error) {
180 | if (es.idx.RateCounter.Rate() > 0) {
181 | log.Printf("Flushed events to Elasticsearch, current rate: %d/s", es.idx.RateCounter.Rate())
182 | }
183 | }
184 |
185 | func (es *ESServer) Start() error {
186 | if (es.b == nil) {
187 | log.Printf("[%s] No Route is specified for this output", es.name)
188 | return nil
189 | }
190 | var client *elastic.Client
191 | var err error
192 |
193 | for {
194 | httpClient := http.DefaultClient
195 | timeout := 60 * time.Second
196 |
197 | if es.config.Timeout > 0 {
198 | timeout = time.Duration(es.config.Timeout) * time.Second
199 | }
200 |
201 | log.Printf("[%s] Setting HTTP timeout to %v", es.name, timeout)
202 | log.Printf("[%s] Setting GZIP enabled: %v", es.name, es.config.GzipEnabled)
203 |
204 | httpClient.Timeout = timeout
205 |
206 | var infoLogger, errorLogger *log.Logger
207 |
208 | if es.config.InfoLogEnabled {
209 | infoLogger = log.New(os.Stdout, "", log.LstdFlags)
210 | } else {
211 | infoLogger = log.New(new(DevNull), "", log.LstdFlags)
212 | }
213 |
214 | if es.config.ErrorLogEnabled {
215 | errorLogger = log.New(os.Stderr, "", log.LstdFlags)
216 | } else {
217 | errorLogger = log.New(new(DevNull), "", log.LstdFlags)
218 | }
219 |
220 | client, err = elastic.NewClient(elastic.SetURL(es.hosts...),
221 | elastic.SetHttpClient(httpClient),
222 | elastic.SetGzip(es.config.GzipEnabled),
223 | elastic.SetInfoLog(infoLogger),
224 | elastic.SetErrorLog(errorLogger))
225 |
226 | if err != nil {
227 | log.Printf("Error starting Elasticsearch: %s, will retry", err)
228 | time.Sleep(2 * time.Second)
229 | continue
230 | }
231 |
232 | es.insertIndexTemplate(client)
233 |
234 | break
235 | }
236 |
237 | log.Printf("Connected to Elasticsearch")
238 |
239 | // Add the client as a subscriber
240 | receiveChan := make(chan *buffer.Event, esRecvBuffer)
241 | es.b.AddSubscriber(es.host, receiveChan)
242 | defer es.b.DelSubscriber(es.host)
243 |
244 | rateCounter := ratecounter.NewRateCounter(1 * time.Second)
245 |
246 | // Create bulk processor
247 | bulkProcessor, err := client.BulkProcessor().
248 | After(es.afterCommit). // Function to call after commit
249 | Workers(esWorker). // # of workers
250 | BulkActions(esBulkLimit). // # of queued requests before committed
251 | BulkSize(-1). // No limit
252 | FlushInterval(esFlushInterval * time.Second). // autocommit every # seconds
253 | Stats(true). // gather statistics
254 | Do(context.Background())
255 |
256 | if err != nil {
257 | log.Println(err)
258 | }
259 |
260 | idx := &Indexer{bulkProcessor, es.config.IndexPrefix, es.config.IndexType, rateCounter, time.Now()}
261 | es.idx = idx
262 |
263 | for {
264 |
265 | readInputChannel(*es.config.SampleSize, idx, receiveChan)
266 |
267 | if len(es.term) > 0 {
268 | select {
269 | case <-es.term:
270 | log.Println("Elasticsearch received term signal")
271 | break
272 | }
273 | }
274 | }
275 |
276 | log.Println("Shutting down. Flushing existing events.")
277 | defer bulkProcessor.Close()
278 | return nil
279 | }
280 |
281 | func (es *ESServer) Stop() error {
282 | es.term <- true
283 | return nil
284 | }
285 |
--------------------------------------------------------------------------------
/output/elasticsearch/index-template.go:
--------------------------------------------------------------------------------
1 | package elasticsearch
2 |
3 | const IndexTemplate string = `
4 | {
5 | "template" : "logstash-*",
6 | "settings" : {
7 | "index.refresh_interval" : "5s"
8 | },
9 | "mappings" : {
10 | "_default_" : {
11 | "_all" : {"enabled" : true, "omit_norms" : true},
12 | "dynamic_templates" : [ {
13 | "message_field" : {
14 | "match" : "message",
15 | "match_mapping_type" : "text",
16 | "mapping" : {
17 | "type" : "text", "index" : "analyzed", "omit_norms" : true,
18 | "fielddata" : { "format" : "disabled" }
19 | }
20 | }
21 | }, {
22 | "string_fields" : {
23 | "match" : "*",
24 | "match_mapping_type" : "text",
25 | "mapping" : {
26 | "type" : "text", "index" : "analyzed", "omit_norms" : true,
27 | "fielddata" : { "format" : "disabled" },
28 | "fields" : {
29 | "raw" : {"type": "text", "index" : "not_analyzed", "doc_values" : true, "ignore_above" : 256}
30 | }
31 | }
32 | }
33 | }, {
34 | "float_fields" : {
35 | "match" : "*",
36 | "match_mapping_type" : "float",
37 | "mapping" : { "type" : "float", "doc_values" : true }
38 | }
39 | }, {
40 | "double_fields" : {
41 | "match" : "*",
42 | "match_mapping_type" : "double",
43 | "mapping" : { "type" : "double", "doc_values" : true }
44 | }
45 | }, {
46 | "byte_fields" : {
47 | "match" : "*",
48 | "match_mapping_type" : "byte",
49 | "mapping" : { "type" : "byte", "doc_values" : true }
50 | }
51 | }, {
52 | "short_fields" : {
53 | "match" : "*",
54 | "match_mapping_type" : "short",
55 | "mapping" : { "type" : "short", "doc_values" : true }
56 | }
57 | }, {
58 | "integer_fields" : {
59 | "match" : "*",
60 | "match_mapping_type" : "integer",
61 | "mapping" : { "type" : "integer", "doc_values" : true }
62 | }
63 | }, {
64 | "long_fields" : {
65 | "match" : "*",
66 | "match_mapping_type" : "long",
67 | "mapping" : { "type" : "long", "doc_values" : true }
68 | }
69 | }, {
70 | "date_fields" : {
71 | "match" : "*",
72 | "match_mapping_type" : "date",
73 | "mapping" : { "type" : "date", "doc_values" : true }
74 | }
75 | }, {
76 | "geo_point_fields" : {
77 | "match" : "*",
78 | "match_mapping_type" : "geo_point",
79 | "mapping" : { "type" : "geo_point", "doc_values" : true }
80 | }
81 | } ],
82 | "properties" : {
83 | "@timestamp": { "type": "date", "doc_values" : true },
84 | "@version": { "type": "text", "index": "not_analyzed", "doc_values" : true },
85 | "geoip" : {
86 | "type" : "object",
87 | "dynamic": true,
88 | "properties" : {
89 | "ip": { "type": "ip", "doc_values" : true },
90 | "location" : { "type" : "geo_point", "doc_values" : true },
91 | "latitude" : { "type" : "float", "doc_values" : true },
92 | "longitude" : { "type" : "float", "doc_values" : true }
93 | }
94 | }
95 | }
96 | }
97 | }
98 | }
99 | `
100 |
--------------------------------------------------------------------------------
/output/output.go:
--------------------------------------------------------------------------------
1 | package output
2 |
3 | import (
4 | "fmt"
5 | "gopkg.in/yaml.v2"
6 |
7 | "github.com/packetzoom/logzoom/buffer"
8 | "github.com/packetzoom/logzoom/route"
9 | )
10 |
11 | type Output interface {
12 | Init(string, yaml.MapSlice, buffer.Sender, route.Route) error
13 | Start() error
14 | Stop() error
15 | }
16 |
17 | var (
18 | outputs = make(map[string]func()Output)
19 | )
20 |
21 | func Register(name string, constructor func()Output) error {
22 | if _, ok := outputs[name]; ok {
23 | return fmt.Errorf("Output %s already exists", name)
24 | }
25 | outputs[name] = constructor
26 | return nil
27 | }
28 |
29 | func Load(name string) (Output, error) {
30 | constructor, ok := outputs[name]
31 | if !ok {
32 | return nil, fmt.Errorf("Output %s not found", name)
33 | }
34 | return constructor(), nil
35 | }
36 |
--------------------------------------------------------------------------------
/output/redis/redis.go:
--------------------------------------------------------------------------------
1 | package redis
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "log"
7 | "strconv"
8 | "time"
9 |
10 | "github.com/adjust/redismq"
11 | "github.com/packetzoom/logzoom/buffer"
12 | "github.com/packetzoom/logzoom/output"
13 | "github.com/packetzoom/logzoom/server"
14 | "github.com/packetzoom/logzoom/route"
15 | "github.com/paulbellamy/ratecounter"
16 |
17 | "gopkg.in/yaml.v2"
18 | )
19 |
20 | const (
21 | redisFlushInterval = 5
22 | rateDisplayInterval = 10
23 | recvBuffer = 10000
24 | )
25 |
26 | type Config struct {
27 | Host string `yaml:"host"`
28 | Port int `yaml:"port"`
29 | Db int64 `yaml:"db"`
30 | Password string `yaml:"password"`
31 | CopyQueues []string `yaml:"copy_queues"`
32 | SampleSize *int `yaml:"sample_size,omitempty"`
33 | }
34 |
35 | type RedisServer struct {
36 | name string
37 | fields map[string]string
38 | config Config
39 | sender buffer.Sender
40 | term chan bool
41 | }
42 |
43 | type RedisQueue struct {
44 | queue *redismq.BufferedQueue
45 | data chan string
46 | term chan bool
47 | ticker time.Ticker
48 | }
49 |
50 | func NewRedisQueue(config Config, key string) *RedisQueue {
51 | port := strconv.Itoa(config.Port)
52 |
53 | queue := redismq.CreateBufferedQueue(config.Host,
54 | port,
55 | config.Password,
56 | config.Db,
57 | key,
58 | recvBuffer)
59 | queue.Start()
60 |
61 | return &RedisQueue{queue: queue,
62 | data: make(chan string),
63 | term: make(chan bool),
64 | ticker: *time.NewTicker(time.Duration(redisFlushInterval) * time.Second)}
65 | }
66 |
67 | func (redisQueue *RedisQueue) insertToRedis(text string) error {
68 | err := redisQueue.queue.Put(text)
69 |
70 | if err != nil {
71 | fmt.Println("Error inserting data: ", err)
72 | return err
73 | }
74 |
75 | if len(redisQueue.queue.Buffer) > recvBuffer {
76 | return redisQueue.flushQueue()
77 | }
78 |
79 | return nil
80 | }
81 |
82 | func (redisQueue *RedisQueue) flushQueue() error {
83 | if len(redisQueue.queue.Buffer) > 0 {
84 | // log.Printf("Flushing %d events to Redis", len(redisQueue.queue.Buffer))
85 | }
86 |
87 | redisQueue.queue.FlushBuffer()
88 | return nil
89 | }
90 |
91 | func (redisQueue *RedisQueue) Start() {
92 | for {
93 | select {
94 | case text := <-redisQueue.data:
95 | redisQueue.insertToRedis(text)
96 | case <-redisQueue.ticker.C:
97 | redisQueue.flushQueue()
98 | case <-redisQueue.term:
99 | redisQueue.flushQueue()
100 | }
101 | }
102 |
103 | }
104 |
105 | func init() {
106 | output.Register("redis", New)
107 | }
108 |
109 | func New() output.Output {
110 | return &RedisServer{term: make(chan bool, 1)}
111 | }
112 |
113 | func (redisServer *RedisServer) ValidateConfig(config *Config) error {
114 | if len(config.Host) == 0 {
115 | return errors.New("Missing Redis host")
116 | }
117 |
118 | if config.Port <= 0 {
119 | return errors.New("Missing Redis port")
120 | }
121 |
122 | if len(config.CopyQueues) == 0 {
123 | return errors.New("Missing Redis output queues")
124 | }
125 |
126 | if redisServer.config.SampleSize == nil {
127 | i := 100
128 | redisServer.config.SampleSize = &i
129 | }
130 | log.Printf("[%s] Setting Sample Size to %d", redisServer.name, *redisServer.config.SampleSize)
131 |
132 | return nil
133 | }
134 |
135 | func (redisServer *RedisServer) Init(name string, config yaml.MapSlice, sender buffer.Sender, route route.Route) error {
136 | var redisConfig *Config
137 |
138 | // go-yaml doesn't have a great way to partially unmarshal YAML data
139 | // See https://github.com/go-yaml/yaml/issues/13
140 | yamlConfig, _ := yaml.Marshal(config)
141 |
142 | if err := yaml.Unmarshal(yamlConfig, &redisConfig); err != nil {
143 | return fmt.Errorf("Error parsing Redis config: %v", err)
144 | }
145 |
146 | redisServer.name = name
147 | redisServer.fields = route.Fields
148 | redisServer.config = *redisConfig
149 | redisServer.sender = sender
150 |
151 | if err := redisServer.ValidateConfig(redisConfig); err != nil {
152 | return fmt.Errorf("Error in config: %v", err)
153 | }
154 |
155 | return nil
156 | }
157 |
158 | func (redisServer *RedisServer) Start() error {
159 | if (redisServer.sender == nil) {
160 | log.Printf("[%s] No Route is specified for this output", redisServer.name)
161 | return nil
162 | }
163 | // Add the client as a subscriber
164 | receiveChan := make(chan *buffer.Event, recvBuffer)
165 | redisServer.sender.AddSubscriber(redisServer.name, receiveChan)
166 | defer redisServer.sender.DelSubscriber(redisServer.name)
167 |
168 | allQueues := make([]*RedisQueue, len(redisServer.config.CopyQueues))
169 |
170 | // Create Redis queue
171 | for index, key := range redisServer.config.CopyQueues {
172 | redisQueue := NewRedisQueue(redisServer.config, key)
173 | allQueues[index] = redisQueue
174 | go redisQueue.Start()
175 | }
176 |
177 | log.Printf("[%s] Started Redis Output Instance", redisServer.name)
178 | // Loop events and publish to Redis
179 | tick := time.NewTicker(time.Duration(redisFlushInterval) * time.Second)
180 | rateCounter := ratecounter.NewRateCounter(1 * time.Second)
181 |
182 | for {
183 | select {
184 | case ev := <-receiveChan:
185 | rateCounter.Incr(1)
186 | var allowed bool
187 | allowed = true
188 | for key, value := range redisServer.fields {
189 | if ((*ev.Fields)[key] == nil || ((*ev.Fields)[key] != nil && value != (*ev.Fields)[key].(string))) {
190 | allowed = false
191 | break
192 | }
193 | }
194 | if allowed && server.RandInt(0, 100) < *redisServer.config.SampleSize {
195 | text := *ev.Text
196 | for _, queue := range allQueues {
197 | queue.data <- text
198 | }
199 | }
200 | case <-tick.C:
201 | if rateCounter.Rate() > 0 {
202 | log.Printf("[%s] Current Redis input rate: %d/s\n", redisServer.name, rateCounter.Rate())
203 | }
204 | case <-redisServer.term:
205 | log.Println("RedisServer received term signal")
206 | for _, queue := range allQueues {
207 | queue.term <- true
208 | }
209 |
210 | return nil
211 | }
212 | }
213 |
214 | return nil
215 | }
216 |
217 | func (s *RedisServer) Stop() error {
218 | s.term <- true
219 | return nil
220 | }
221 |
--------------------------------------------------------------------------------
/output/s3/s3.go:
--------------------------------------------------------------------------------
1 | package s3
2 |
3 | import (
4 | "compress/gzip"
5 | "crypto/rand"
6 | "errors"
7 | "fmt"
8 | "io/ioutil"
9 | "log"
10 | "os"
11 | "strings"
12 | "time"
13 |
14 | "github.com/aws/aws-sdk-go/aws"
15 | "github.com/aws/aws-sdk-go/aws/credentials"
16 | "github.com/aws/aws-sdk-go/aws/session"
17 | "github.com/aws/aws-sdk-go/service/s3/s3manager"
18 |
19 | "github.com/packetzoom/logzoom/buffer"
20 | "github.com/packetzoom/logzoom/output"
21 | "github.com/packetzoom/logzoom/route"
22 | "github.com/packetzoom/logzoom/server"
23 |
24 | "github.com/jehiah/go-strftime"
25 | "github.com/paulbellamy/ratecounter"
26 |
27 | "gopkg.in/yaml.v2"
28 | )
29 |
30 | const (
31 | s3FlushInterval = 10
32 | recvBuffer = 10000
33 | maxSimultaneousUploads = 8
34 | )
35 |
36 | func uuid() string {
37 | b := make([]byte, 16)
38 | rand.Read(b)
39 | b[6] = (b[6] & 0x0f) | 0x40
40 | b[8] = (b[8] & 0x3f) | 0x80
41 | return fmt.Sprintf("%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])
42 | }
43 |
44 | type Config struct {
45 | AwsKeyIdLoc string `yaml:"aws_key_id_loc"`
46 | AwsSecKeyLoc string `yaml:"aws_sec_key_loc"`
47 | AwsS3Bucket string `yaml:"aws_s3_bucket"`
48 | AwsS3Region string `yaml:"aws_s3_region"`
49 |
50 | LocalPath string `yaml:"local_path"`
51 | Path string `yaml:"s3_path"`
52 | TimeSliceFormat string `yaml:"time_slice_format"`
53 | AwsS3OutputKey string `yaml:"aws_s3_output_key"`
54 | SampleSize *int `yaml:"sample_size,omitempty"`
55 | }
56 |
57 | type OutputFileInfo struct {
58 | Filename string
59 | Count int
60 | }
61 |
62 | type FileSaver struct {
63 | Config Config
64 | Writer *gzip.Writer
65 | FileInfo OutputFileInfo
66 | RateCounter *ratecounter.RateCounter
67 | }
68 |
69 | func (fileSaver *FileSaver) WriteToFile(name string, event *buffer.Event) error {
70 | if fileSaver.Writer == nil {
71 | log.Println("Creating new S3 gzip writer")
72 | file, err := ioutil.TempFile(fileSaver.Config.LocalPath, name)
73 |
74 | if err != nil {
75 | log.Printf("Error creating temporary file:", err)
76 | }
77 |
78 | fileSaver.Writer = gzip.NewWriter(file)
79 | fileSaver.FileInfo.Filename = file.Name()
80 | fileSaver.FileInfo.Count = 0
81 | }
82 |
83 | text := *event.Text
84 | _, err := fileSaver.Writer.Write([]byte(text))
85 |
86 | if err != nil {
87 | log.Println("Error writing:", err)
88 | return err
89 | }
90 |
91 | _, err = fileSaver.Writer.Write([]byte("\n"))
92 |
93 | if err != nil {
94 | log.Println("Error writing:", err)
95 | return err
96 | }
97 |
98 | fileSaver.FileInfo.Count += 1
99 | fileSaver.RateCounter.Incr(1)
100 |
101 | return nil
102 | }
103 |
104 | func (s3Writer *S3Writer) doUpload(fileInfo OutputFileInfo) error {
105 | log.Printf("Opening file %s\n", fileInfo.Filename)
106 | reader, err := os.Open(fileInfo.Filename)
107 |
108 | if err != nil {
109 | log.Printf("Failed to open file:", err)
110 | return err
111 | }
112 |
113 | curTime := time.Now()
114 | hostname, _ := os.Hostname()
115 | timeKey := strftime.Format(s3Writer.Config.TimeSliceFormat, curTime)
116 |
117 | valuesForKey := map[string]string{
118 | "path": s3Writer.Config.Path,
119 | "timeSlice": timeKey,
120 | "hostname": hostname,
121 | "uuid": uuid(),
122 | }
123 |
124 | destFile := s3Writer.Config.AwsS3OutputKey
125 |
126 | for key, value := range valuesForKey {
127 | expr := "%{" + key + "}"
128 | destFile = strings.Replace(destFile, expr, value, -1)
129 | }
130 |
131 | result, s3Error := s3Writer.S3Uploader.Upload(&s3manager.UploadInput{
132 | Body: reader,
133 | Bucket: aws.String(s3Writer.Config.AwsS3Bucket),
134 | Key: aws.String(destFile),
135 | ContentEncoding: aws.String("gzip"),
136 | })
137 |
138 | if s3Error == nil {
139 | log.Printf("%d events written to S3 %s", fileInfo.Count, result.Location)
140 | os.Remove(fileInfo.Filename)
141 | } else {
142 | log.Printf("Error uploading to S3", s3Error)
143 | }
144 |
145 | return s3Error
146 |
147 | }
148 |
149 | func (s3Writer *S3Writer) WaitForUpload() {
150 | for {
151 | select {
152 | case fileInfo := <-s3Writer.uploadChannel:
153 | s3Writer.doUpload(fileInfo)
154 | }
155 | }
156 | }
157 |
158 | func (s3Writer *S3Writer) InitiateUploadToS3(fileSaver *FileSaver) {
159 | if fileSaver.Writer == nil {
160 | return
161 | }
162 |
163 | log.Printf("Upload to S3, current event rate: %d/s\n", fileSaver.RateCounter.Rate())
164 | writer := fileSaver.Writer
165 | fileInfo := fileSaver.FileInfo
166 | fileSaver.Writer = nil
167 | writer.Close()
168 |
169 | s3Writer.uploadChannel <- fileInfo
170 | }
171 |
172 | type S3Writer struct {
173 | name string
174 | fields map[string]string
175 | Config Config
176 | Sender buffer.Sender
177 | S3Uploader *s3manager.Uploader
178 | uploadChannel chan OutputFileInfo
179 | term chan bool
180 | }
181 |
182 | func init() {
183 | output.Register("s3", New)
184 | }
185 |
186 | func New() (output.Output) {
187 | return &S3Writer{term: make(chan bool, 1)}
188 | }
189 |
190 | func (s3Writer *S3Writer) ValidateConfig(config *Config) error {
191 | if len(config.LocalPath) == 0 {
192 | return errors.New("missing local path")
193 | }
194 |
195 | // Create the local path if necessary
196 | if err := os.MkdirAll(config.LocalPath, 0700); err != nil {
197 | return errors.New("could not mkdir " + config.LocalPath)
198 | }
199 |
200 | // Try writing to local path
201 | if _, err := ioutil.TempFile(config.LocalPath, "logzoom"); err != nil {
202 | return errors.New("unable to write to " + config.LocalPath)
203 | }
204 |
205 | if len(config.AwsS3Bucket) == 0 {
206 | return errors.New("missing AWS S3 bucket")
207 | }
208 |
209 | if len(config.AwsS3Region) == 0 {
210 | return errors.New("missing AWS S3 region")
211 | }
212 |
213 | if len(config.AwsS3OutputKey) == 0 {
214 | return errors.New("missing AWS S3 output key")
215 | }
216 |
217 | if s3Writer.Config.SampleSize == nil {
218 | i := 100
219 | s3Writer.Config.SampleSize = &i
220 | }
221 | log.Printf("[%s] Setting Sample Size to %d", s3Writer.name, *s3Writer.Config.SampleSize)
222 |
223 | return nil
224 | }
225 |
226 | func (s3Writer *S3Writer) Init(name string, config yaml.MapSlice, sender buffer.Sender, route route.Route) error {
227 | var s3Config *Config
228 |
229 | // go-yaml doesn't have a great way to partially unmarshal YAML data
230 | // See https://github.com/go-yaml/yaml/issues/13
231 | yamlConfig, _ := yaml.Marshal(config)
232 |
233 | if err := yaml.Unmarshal(yamlConfig, &s3Config); err != nil {
234 | return fmt.Errorf("Error parsing S3 config: %v", err)
235 | }
236 |
237 | s3Writer.name = name
238 | s3Writer.fields = route.Fields
239 | s3Writer.uploadChannel = make(chan OutputFileInfo, maxSimultaneousUploads)
240 | s3Writer.Config = *s3Config
241 | s3Writer.Sender = sender
242 |
243 | if err := s3Writer.ValidateConfig(s3Config); err != nil {
244 | return fmt.Errorf("Error in config: %v", err)
245 | }
246 |
247 | aws_access_key_id_data, error := ioutil.ReadFile(s3Writer.Config.AwsKeyIdLoc)
248 | aws_access_key_id := strings.TrimSpace(string(aws_access_key_id_data))
249 | if error != nil {
250 | return fmt.Errorf("AWS Access Key ID not found: %v", error)
251 | }
252 | aws_secret_access_key_data, error := ioutil.ReadFile(s3Writer.Config.AwsSecKeyLoc)
253 | aws_secret_access_key := strings.TrimSpace(string(aws_secret_access_key_data))
254 | if error != nil {
255 | return fmt.Errorf("AWS Secret Key not found: %v", error)
256 | }
257 | token := ""
258 | creds := credentials.NewStaticCredentials(aws_access_key_id, aws_secret_access_key, token)
259 | _, err := creds.Get()
260 |
261 | if err != nil {
262 | return err
263 | }
264 |
265 | session := session.New(&aws.Config{
266 | Region: &s3Writer.Config.AwsS3Region,
267 | Credentials: creds,
268 | })
269 |
270 | s3Writer.S3Uploader = s3manager.NewUploader(session)
271 | log.Println("Done instantiating S3 uploader")
272 |
273 | return nil
274 | }
275 |
276 | func (s3Writer *S3Writer) Start() error {
277 | if (s3Writer.Sender == nil) {
278 | log.Printf("[%s] No route is specified for this output", s3Writer.name)
279 | return nil
280 | }
281 | // Create file saver
282 | fileSaver := new(FileSaver)
283 | fileSaver.Config = s3Writer.Config
284 | fileSaver.RateCounter = ratecounter.NewRateCounter(1 * time.Second)
285 |
286 | id := "s3_output"
287 | // Add the client as a subscriber
288 | receiveChan := make(chan *buffer.Event, recvBuffer)
289 | s3Writer.Sender.AddSubscriber(id, receiveChan)
290 | defer s3Writer.Sender.DelSubscriber(id)
291 |
292 | // Loop events and publish to S3
293 | tick := time.NewTicker(time.Duration(s3FlushInterval) * time.Second)
294 |
295 | go s3Writer.WaitForUpload()
296 |
297 | for {
298 | select {
299 | case ev := <-receiveChan:
300 | var allowed bool
301 | allowed = true
302 | for key, value := range s3Writer.fields {
303 | if ((*ev.Fields)[key] == nil || ((*ev.Fields)[key] != nil && value != (*ev.Fields)[key].(string))) {
304 | allowed = false
305 | break
306 | }
307 | }
308 | if allowed && server.RandInt(0, 100) <= *s3Writer.Config.SampleSize {
309 | fileSaver.WriteToFile(s3Writer.name, ev)
310 | }
311 | case <-tick.C:
312 | s3Writer.InitiateUploadToS3(fileSaver)
313 | case <-s3Writer.term:
314 | log.Println("S3Writer received term signal")
315 | return nil
316 | }
317 | }
318 |
319 | return nil
320 | }
321 |
322 | func (s *S3Writer) Stop() error {
323 | s.term <- true
324 | return nil
325 | }
326 |
--------------------------------------------------------------------------------
/output/tcp/tcp.go:
--------------------------------------------------------------------------------
1 | package tcp
2 |
3 | import (
4 | "fmt"
5 | "log"
6 | "net"
7 |
8 | "github.com/packetzoom/logzoom/buffer"
9 | "github.com/packetzoom/logzoom/output"
10 | "github.com/packetzoom/logzoom/route"
11 | "github.com/packetzoom/logzoom/server"
12 | "gopkg.in/yaml.v2"
13 | )
14 |
15 | const (
16 | recvBuffer = 100
17 | )
18 |
19 | type Config struct {
20 | Host string `yaml:"host"`
21 | SampleSize *int `yaml:"sample_size,omitempty"`
22 | }
23 |
24 | type TCPServer struct {
25 | name string
26 | fields map[string]string
27 | b buffer.Sender
28 | term chan bool
29 | config *Config
30 | }
31 |
32 | func init() {
33 | output.Register("tcp", New)
34 | }
35 |
36 | func New() (output.Output) {
37 | return &TCPServer{term: make(chan bool, 1)}
38 | }
39 |
40 | // lumberConn handles an incoming connection from a lumberjack client
41 | func (s *TCPServer) accept(c net.Conn) {
42 | defer func() {
43 | s.b.DelSubscriber(s.name)
44 | log.Printf("[%s - %s] closing tcp connection", s.name, c.RemoteAddr().String())
45 | c.Close()
46 | }()
47 |
48 | log.Printf("[%s - %s] accepting tcp connection", s.name, c.RemoteAddr().String())
49 |
50 | // Add the client as a subscriber
51 | r := make(chan *buffer.Event, recvBuffer)
52 | s.b.AddSubscriber(s.name, r)
53 |
54 | for {
55 | select {
56 | case ev := <-r:
57 | var allowed bool
58 | allowed = true
59 | for key, value := range s.fields {
60 | if ((*ev.Fields)[key] == nil || ((*ev.Fields)[key] != nil && value != (*ev.Fields)[key].(string))) {
61 | allowed = false
62 | break
63 | }
64 | }
65 | if allowed && server.RandInt(0, 100) < *s.config.SampleSize {
66 | _, err := c.Write([]byte(fmt.Sprintf("%s %s\n", ev.Source, *ev.Text)))
67 | if err != nil {
68 | log.Printf("[%s - %s] error sending event to tcp connection: %v", s.name, c.RemoteAddr().String(), err)
69 | return
70 | }
71 | }
72 | }
73 | }
74 |
75 | }
76 |
77 | func (s *TCPServer) Init(name string, config yaml.MapSlice, b buffer.Sender, route route.Route) error {
78 | var tcpConfig *Config
79 |
80 | // go-yaml doesn't have a great way to partially unmarshal YAML data
81 | // See https://github.com/go-yaml/yaml/issues/13
82 | yamlConfig, _ := yaml.Marshal(config)
83 |
84 | if err := yaml.Unmarshal(yamlConfig, &tcpConfig); err != nil {
85 | return fmt.Errorf("Error parsing tcp config: %v", err)
86 | }
87 |
88 | s.name = name
89 | s.fields = route.Fields
90 | s.config = tcpConfig
91 | s.b = b
92 | return nil
93 | }
94 |
95 | func (s *TCPServer) Start() error {
96 | if (s.b == nil) {
97 | log.Printf("[%s] No Route is specified for this output", s.name)
98 | return nil
99 | }
100 | ln, err := net.Listen("tcp", s.config.Host)
101 | if err != nil {
102 | return fmt.Errorf("TCPServer: listener failed: %v", err)
103 | }
104 |
105 | if s.config.SampleSize == nil {
106 | i := 100
107 | s.config.SampleSize = &i
108 | }
109 | log.Printf("[%s] Setting Sample Size to %d", s.name, *s.config.SampleSize)
110 |
111 | for {
112 | select {
113 | case <-s.term:
114 | log.Println("TCPServer received term signal")
115 | return nil
116 | default:
117 | conn, err := ln.Accept()
118 | if err != nil {
119 | log.Println("Error accepting tcp connection: %v", err)
120 | continue
121 | }
122 | go s.accept(conn)
123 | }
124 | }
125 |
126 | return nil
127 | }
128 |
129 | func (s *TCPServer) Stop() error {
130 | s.term <- true
131 | return nil
132 | }
133 |
--------------------------------------------------------------------------------
/output/websocket/template.go:
--------------------------------------------------------------------------------
1 | package websocket
2 |
3 | var index = `
4 |
5 |
6 |
7 |
8 | Logs
9 |
19 |
20 |
21 | Log Files
22 |
23 | {{range $source, $exists := .}}
24 | - {{$source}}
25 | {{end}}
26 |
27 |
28 |
29 | `
30 | var logs = `
31 |
32 |
33 |
34 |
35 | {{.Source}}
36 |
47 |
48 |
58 |
59 |
60 | {{.Source}}
61 |
62 |
63 |
64 | `
65 |
--------------------------------------------------------------------------------
/output/websocket/websocket.go:
--------------------------------------------------------------------------------
1 | package websocket
2 |
3 | import (
4 | "fmt"
5 | "log"
6 | "net/http"
7 | "sync"
8 | "text/template"
9 | "time"
10 |
11 | "github.com/packetzoom/logzoom/buffer"
12 | "github.com/packetzoom/logzoom/output"
13 | "github.com/packetzoom/logzoom/route"
14 | "github.com/packetzoom/logzoom/server"
15 | "golang.org/x/net/websocket"
16 |
17 | "gopkg.in/yaml.v2"
18 | )
19 |
20 | const (
21 | recvBuffer = 100
22 | )
23 |
24 | type Config struct {
25 | Host string `yaml:"host"`
26 | SampleSize *int `yaml:"sample_size,omitempty"`
27 | }
28 |
29 | type WebSocketServer struct {
30 | name string
31 | fields map[string]string
32 | b buffer.Sender
33 | term chan bool
34 | config *Config
35 |
36 | mtx sync.RWMutex
37 | logs map[string]time.Time
38 | }
39 |
40 | var (
41 | indexTemplate, _ = template.New("index").Parse(index)
42 | logsTemplate, _ = template.New("logs").Parse(logs)
43 | )
44 |
45 | func init() {
46 | output.Register("websocket", New)
47 | }
48 |
49 | func New() (output.Output) {
50 | return &WebSocketServer{
51 | logs: make(map[string]time.Time),
52 | term: make(chan bool, 1),
53 | }
54 | }
55 |
56 | func (ws *WebSocketServer) wslogsHandler(w *websocket.Conn) {
57 | source := w.Request().FormValue("source")
58 | host := fmt.Sprintf("%s/%d", w.RemoteAddr().String(), time.Now().UnixNano())
59 |
60 | defer func() {
61 | log.Printf("[%s - %s] closing websocket conn", ws.name, w.RemoteAddr().String())
62 | ws.b.DelSubscriber(host)
63 | w.Close()
64 | }()
65 |
66 | log.Printf("[%s - %s] accepting websocket conn", ws.name, w.RemoteAddr().String())
67 |
68 | r := make(chan *buffer.Event, recvBuffer)
69 | ws.b.AddSubscriber(host, r)
70 |
71 | for {
72 | select {
73 | case ev := <-r:
74 | if len(source) > 0 {
75 | if ev.Source != source {
76 | continue
77 | }
78 | }
79 |
80 | if server.RandInt(0, 100) >= *ws.config.SampleSize {
81 | continue
82 | }
83 |
84 | err := websocket.Message.Send(w, *ev.Text)
85 | if err != nil {
86 | log.Printf("[%s] error sending ws message: %v", w.RemoteAddr().String(), err.Error())
87 | return
88 | }
89 | }
90 | }
91 | }
92 |
93 | func (ws *WebSocketServer) logsHandler(w http.ResponseWriter, r *http.Request) {
94 | source := "*"
95 | host := fmt.Sprintf("ws://%s/wslogs", r.Host)
96 |
97 | if src := r.FormValue("source"); len(src) > 0 {
98 | source = src
99 | host = fmt.Sprintf("%s?source=%s", host, src)
100 | }
101 |
102 | logsTemplate.Execute(w, struct{ Source, Server string }{source, host})
103 | }
104 |
105 | func (ws *WebSocketServer) indexHandler(w http.ResponseWriter, r *http.Request) {
106 | ws.mtx.RLock()
107 | defer ws.mtx.RUnlock()
108 | indexTemplate.Execute(w, ws.logs)
109 | }
110 |
111 | func (ws *WebSocketServer) logListMaintainer() {
112 | defer func() {
113 | ws.b.DelSubscriber(ws.name + "_logList")
114 | }()
115 |
116 | r := make(chan *buffer.Event, recvBuffer)
117 | ws.b.AddSubscriber(ws.name + "_logList", r)
118 |
119 | ticker := time.NewTicker(time.Duration(600) * time.Second)
120 |
121 | for {
122 | select {
123 | case ev := <-r:
124 | ws.mtx.Lock()
125 | ws.logs[ev.Source] = time.Now()
126 | ws.mtx.Unlock()
127 | case <-ticker.C:
128 | t := time.Now()
129 | ws.mtx.Lock()
130 | for log, ttl := range ws.logs {
131 | if t.Sub(ttl).Seconds() > 600 {
132 | delete(ws.logs, log)
133 | }
134 | }
135 | ws.mtx.Unlock()
136 | }
137 | }
138 | }
139 |
140 | func (ws *WebSocketServer) Init(name string, config yaml.MapSlice, b buffer.Sender, route route.Route) error {
141 | var wsConfig *Config
142 |
143 | // go-yaml doesn't have a great way to partially unmarshal YAML data
144 | // See https://github.com/go-yaml/yaml/issues/13
145 | yamlConfig, _ := yaml.Marshal(config)
146 |
147 | if err := yaml.Unmarshal(yamlConfig, &wsConfig); err != nil {
148 | return fmt.Errorf("Error parsing websocket config: %v", err)
149 | }
150 |
151 | ws.name = name
152 | ws.fields = route.Fields
153 | ws.config = wsConfig
154 | ws.b = b
155 | return nil
156 | }
157 |
158 | func (ws *WebSocketServer) Start() error {
159 | if (ws.b == nil) {
160 | log.Printf("[%s] No route is specified for this output", ws.name)
161 | return nil
162 | }
163 |
164 | if ws.config.SampleSize == nil {
165 | i := 100
166 | ws.config.SampleSize = &i
167 | }
168 | log.Printf("[%s] Setting Sample Size to %d", ws.name, *ws.config.SampleSize)
169 |
170 | http.Handle("/wslogs", websocket.Handler(ws.wslogsHandler))
171 | http.HandleFunc("/logs", ws.logsHandler)
172 | http.HandleFunc("/", ws.indexHandler)
173 |
174 | go ws.logListMaintainer()
175 |
176 | err := http.ListenAndServe(ws.config.Host, nil)
177 | if err != nil {
178 | return fmt.Errorf("Error starting websocket server: %v", err)
179 | }
180 |
181 | return nil
182 | }
183 |
184 | func (ws *WebSocketServer) Stop() error {
185 | ws.term <- true
186 | return nil
187 | }
188 |
--------------------------------------------------------------------------------
/route/route.go:
--------------------------------------------------------------------------------
1 | package route
2 |
3 | type Route struct {
4 | Input string
5 | Output string
6 | Fields map[string]string
7 | }
8 |
--------------------------------------------------------------------------------
/server/config.go:
--------------------------------------------------------------------------------
1 | package server
2 |
3 | import (
4 | "fmt"
5 | "gopkg.in/yaml.v2"
6 | "io/ioutil"
7 | )
8 |
9 | type Config struct {
10 | Inputs []map[string]yaml.MapSlice `yaml:"inputs"`
11 | Outputs []map[string]yaml.MapSlice `yaml:"outputs"`
12 | Routes []map[string]yaml.MapSlice `yaml:"routes"`
13 | }
14 |
15 | func LoadConfig(file string) (*Config, error) {
16 | b, err := ioutil.ReadFile(file)
17 | if err != nil {
18 | return nil, fmt.Errorf("Could not read config file %s: %v", file, err)
19 | }
20 |
21 | var conf *Config
22 | err = yaml.Unmarshal(b, &conf)
23 | if err != nil {
24 | return nil, fmt.Errorf("Failed to parse config %s: %v", file, err)
25 | }
26 |
27 | return conf, nil
28 | }
29 |
--------------------------------------------------------------------------------
/server/rand.go:
--------------------------------------------------------------------------------
1 | package server
2 |
3 | import (
4 | "math/rand"
5 | "time"
6 | )
7 |
8 | func RandInt(min int, max int) int {
9 | rand.Seed(time.Now().UTC().UnixNano())
10 | return min + rand.Intn(max-min)
11 | }
12 |
--------------------------------------------------------------------------------
/server/server.go:
--------------------------------------------------------------------------------
1 | package server
2 |
3 | import (
4 | "log"
5 | "os"
6 | "os/signal"
7 | "sync"
8 | "syscall"
9 | "gopkg.in/yaml.v2"
10 | "github.com/packetzoom/logzoom/buffer"
11 | "github.com/packetzoom/logzoom/input"
12 | "github.com/packetzoom/logzoom/output"
13 | "github.com/packetzoom/logzoom/route"
14 | )
15 |
16 | type Server struct {
17 | Config *Config
18 | buffers map[string]*buffer.Buffer
19 |
20 | mtx sync.Mutex
21 | inputs map[string]input.Input
22 | outputs map[string]output.Output
23 | routes map[string]route.Route
24 | }
25 |
26 | func signalCatcher() chan os.Signal {
27 | c := make(chan os.Signal, 1)
28 | signal.Notify(c, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
29 | return c
30 | }
31 |
32 | func New(configFile string) (*Server, error) {
33 | config, err := LoadConfig(configFile)
34 | if err != nil {
35 | return nil, err
36 | }
37 |
38 | return &Server{
39 | Config: config,
40 | buffers: make(map[string]*buffer.Buffer),
41 | inputs: make(map[string]input.Input),
42 | outputs: make(map[string]output.Output),
43 | routes: make(map[string]route.Route),
44 | }, nil
45 | }
46 |
47 | func (s *Server) Start() {
48 | log.Println("Starting server")
49 |
50 |
51 | s.mtx.Lock()
52 |
53 | // Start buffer
54 | log.Println("Starting buffer")
55 | // Init routes
56 | for _, routeEntry := range s.Config.Routes {
57 | for name, routeDetails := range routeEntry {
58 | var input string
59 | var output string
60 | rules := make(map[string]string)
61 | for _, item := range routeDetails {
62 | if item.Key.(string) == "input" {
63 | input = item.Value.(string)
64 | }
65 | if item.Key.(string) == "output" {
66 | output = item.Value.(string)
67 | }
68 | if item.Key.(string) == "rules" {
69 | for _, rule := range item.Value.(yaml.MapSlice) {
70 | rules[rule.Key.(string)] = rule.Value.(string)
71 | }
72 | }
73 | }
74 | if (&input != nil && &output != nil) {
75 | s.buffers[input] = buffer.New()
76 | go s.buffers[input].Start()
77 | route := route.Route{Input: input, Output: output, Fields: rules}
78 | s.routes[name] = route
79 | }
80 | }
81 | }
82 |
83 | // Start inputs
84 | for _, inputEntry := range s.Config.Inputs {
85 | for name, inputConfig := range inputEntry {
86 | for i, item := range inputConfig {
87 | if i > 0 {
88 | panic("There are more than one configuration specified for an input entry.")
89 | }
90 | if i == 0 { //There should be only 1 input per entry
91 | in, err := input.Load(item.Key.(string))
92 | if err != nil {
93 | log.Println(err.Error)
94 | continue
95 | }
96 | err = in.Init(name, item.Value.(yaml.MapSlice), s.buffers[name]);
97 | if err != nil {
98 | log.Fatalf("Failed to init %s input: %v", item.Key, err)
99 | }
100 | go func(name string, in input.Input) {
101 | if err := in.Start(); err != nil {
102 | log.Fatalf("Error starting input %s: %v", item.Key, err)
103 | }
104 | } (name, in)
105 | s.inputs[name] = in
106 | }
107 | }
108 | }
109 | }
110 | // Start outputs
111 | for _, outputEntry := range s.Config.Outputs {
112 | for name, outputConfig := range outputEntry {
113 | for i, item := range outputConfig {
114 | if i > 0 {
115 | panic("There are more than one configuration specified for an output entry.")
116 | }
117 | if i == 0 { //There should be only 1 output per entry
118 | out, err := output.Load(item.Key.(string))
119 | if err != nil {
120 | log.Println(err.Error)
121 | continue
122 | }
123 | init := false
124 | for route_name, value := range s.routes {
125 | if value.Output == name {
126 | err = out.Init(name, item.Value.(yaml.MapSlice), s.buffers[value.Input], s.routes[route_name]);
127 | if err != nil {
128 | log.Fatalf("Failed to init %s input: %v", item.Key, err)
129 | }
130 | init = true
131 | break
132 | }
133 | }
134 | if init == false {
135 | err = out.Init(name, item.Value.(yaml.MapSlice), nil, route.Route{Input: "", Output: "", Fields: make(map[string]string)});
136 | if err != nil {
137 | log.Fatalf("Failed to init %s output: %v", item.Key, err)
138 | }
139 | }
140 | go func(name string, instance output.Output) {
141 | if err := out.Start(); err != nil {
142 | log.Fatalf("Error starting output %s: %v", item.Key, err)
143 | }
144 | } (name, out)
145 | s.outputs[name] = out
146 | }
147 | }
148 | }
149 | }
150 | s.mtx.Unlock()
151 |
152 | // Wait for kill signal
153 | <-signalCatcher()
154 | log.Printf("Received quit signal")
155 |
156 | // Stop Server
157 | s.Stop()
158 | }
159 |
160 | func (s *Server) Stop() {
161 | log.Println("Stopping server")
162 |
163 | s.mtx.Lock()
164 |
165 | // stop inputs
166 | for name, in := range s.inputs {
167 | log.Printf("Stopping input %s", name)
168 | if err := in.Stop(); err != nil {
169 | log.Printf("Error stopping %s input: %v", name, err)
170 | }
171 | }
172 |
173 | // stop ouputs
174 | for name, out := range s.outputs {
175 | log.Printf("Stopping output %s", name)
176 | if err := out.Stop(); err != nil {
177 | log.Printf("Error stopping %s output: %v", name, err)
178 | }
179 | }
180 |
181 | s.mtx.Unlock()
182 |
183 | for name, buffer := range s.buffers {
184 | log.Printf("Stopping buffer for input: %s", name)
185 | if err := buffer.Stop(); err != nil {
186 | log.Printf("Error stopping %s buffer: %v", name, err)
187 | }
188 | }
189 | }
190 |
--------------------------------------------------------------------------------