├── elasticsearch
└── Dockerfile
├── graylog
├── Dockerfile
└── graylog.conf
├── fluentd
├── fluent.conf
└── Dockerfile
├── client
├── Dockerfile
└── td-agent-bit.conf
├── README.md
└── docker-compose.yml
/elasticsearch/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM elasticsearch:2.4-alpine
2 |
3 | CMD ["elasticsearch","-Des.cluster.name=graylog"]
4 |
--------------------------------------------------------------------------------
/graylog/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM graylog2/server:2.2.0-1
2 |
3 | COPY graylog.conf /usr/share/graylog/data/config/graylog.conf
4 |
--------------------------------------------------------------------------------
/fluentd/fluent.conf:
--------------------------------------------------------------------------------
1 |
2 | @type forward
3 |
4 |
5 |
6 | @type gelf
7 | host graylog
8 | port 12201
9 | flush_interval 2s
10 |
11 |
--------------------------------------------------------------------------------
/fluentd/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM fluent/fluentd:v0.12
2 |
3 | USER root
4 |
5 | RUN apk --no-cache add sudo ca-certificates openssl && \
6 | gem install gelf
7 |
8 | RUN cd /fluentd/plugins && \
9 | wget https://raw.githubusercontent.com/emsearcy/fluent-plugin-gelf/master/lib/fluent/plugin/out_gelf.rb
10 |
11 | COPY fluent.conf /fluentd/etc/fluent.conf
12 |
--------------------------------------------------------------------------------
/client/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:16.04
2 |
3 | RUN apt-get update && apt-get install -y wget
4 | RUN wget -qO - http://packages.fluentbit.io/fluentbit.key | apt-key add -
5 | RUN echo "deb http://packages.fluentbit.io/ubuntu xenial main" >> /etc/apt/sources.list
6 | RUN apt-get update && \
7 | apt-get install -y td-agent-bit && \
8 | touch /var/log/syslog
9 |
10 | COPY td-agent-bit.conf /etc/td-agent-bit/td-agent-bit.conf
11 |
12 | CMD ["/opt/td-agent-bit/bin/td-agent-bit","-c","/etc/td-agent-bit/td-agent-bit.conf"]
13 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Fluentd + fluent-bit + Graylog2 + Elasticsearch
2 |
3 | I couldn't find an all-in-one solution for this stack that worked out of the box, so I made this.
4 |
5 | Keep in mind that this is merely a proof of concept, and should _not_ be used as-is in production.
6 |
7 | ## Setup
8 |
9 | 1. `docker-compose build`
10 | 2. `docker-compose up -d`
11 | 3. Go to http://127.0.0.1:9000 and log in using `admin` as both the username and password
12 | 4. Go to System > Inputs
13 | 5. Add GELF UDP with the default values, but a Receive Buffer Size of 212992
14 |
15 | From there, you should be good to go.
16 |
17 | To check if it's working, I exec bash into the client container and run `echo "test" >> /var/log/syslog`, and it should show up in Graylog.
18 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 | services:
3 | fluentd:
4 | build: ./fluentd
5 | ports:
6 | - "24224:24224"
7 | - "5140:5140"
8 |
9 | elasticsearch:
10 | build: ./elasticsearch
11 | ports:
12 | - "9200:9200"
13 | - "9300:9300"
14 | - "9350:9350"
15 |
16 | mongo:
17 | image: "mongo:3"
18 |
19 | graylog:
20 | build: ./graylog
21 | ports:
22 | - "9000:9000"
23 | - "1514/udp:1514/udp"
24 | - "12201/udp:12201/udp"
25 | - "12900:12900"
26 | links:
27 | - elasticsearch
28 | - mongo
29 | environment:
30 | GRAYLOG_PASSWORD_SECRET: somepasswordpepper
31 | GRAYLOG_ROOT_PASSWORD_SHA2: 8c6976e5b5410415bde908bd4dee15dfb167a9c873fc4bb8a81f6f2ab448a918
32 | GRAYLOG_WEB_ENDPOINT_URI: http://127.0.0.1:9000/api
33 |
34 | client:
35 | build: ./client
36 | links:
37 | - fluentd
38 |
--------------------------------------------------------------------------------
/client/td-agent-bit.conf:
--------------------------------------------------------------------------------
1 | [SERVICE]
2 | # Flush
3 | # =====
4 | # Set an interval of seconds before to flush records to a destination
5 | Flush 5
6 |
7 | # Daemon
8 | # ======
9 | # Instruct Fluent Bit to run in foreground or background mode.
10 | Daemon Off
11 |
12 | # Log_Level
13 | # =========
14 | # Set the verbosity level of the service, values can be:
15 | #
16 | # - error
17 | # - warning
18 | # - info
19 | # - debug
20 | # - trace
21 | #
22 | # By default 'info' is set, that means it includes 'error' and 'warning'.
23 | Log_Level info
24 |
25 | # HTTP Monitoring Server
26 | # ======================
27 | #
28 | # HTTP_Monitor: enable/disable the HTTP Server to monitor
29 | # Fluent Bit internals.
30 | # HTTP_Port : specify the TCP port of the HTTP Server
31 | HTTP_Monitor Off
32 | HTTP_Port 2020
33 |
34 | [INPUT]
35 | Name cpu
36 | Tag cpu.local
37 |
38 | [INPUT]
39 | Name tail
40 | Path /var/log/syslog
41 |
42 | [OUTPUT]
43 | Name forward
44 | Host fluentd
45 | Port 24224
46 | Match *
47 |
48 | [OUTPUT]
49 | Name stdout
50 | Match **
51 |
--------------------------------------------------------------------------------
/graylog/graylog.conf:
--------------------------------------------------------------------------------
1 | # If you are running more than one instances of Graylog server you have to select one of these
2 | # instances as master. The master will perform some periodical tasks that non-masters won't perform.
3 | is_master = true
4 |
5 | # The auto-generated node ID will be stored in this file and read after restarts. It is a good idea
6 | # to use an absolute file path here if you are starting Graylog server from init scripts or similar.
7 | node_id_file = /usr/share/graylog/data/config/node-id
8 |
9 | # You MUST set a secret to secure/pepper the stored user passwords here. Use at least 64 characters.
10 | # Generate one by using for example: pwgen -N 1 -s 96
11 | password_secret = replacethiswithyourownsecret!
12 |
13 | # The default root user is named 'admin'
14 | #root_username = admin
15 |
16 | # You MUST specify a hash password for the root user (which you only need to initially set up the
17 | # system and in case you lose connectivity to your authentication backend)
18 | # This password cannot be changed using the API or via the web interface. If you need to change it,
19 | # modify it in this file.
20 | # Create one by using for example: echo -n yourpassword | shasum -a 256
21 | # and put the resulting hash value into the following line
22 | root_password_sha2 = 8c6976e5b5410415bde908bd4dee15dfb167a9c873fc4bb8a81f6f2ab448a918
23 |
24 | # The email address of the root user.
25 | # Default is empty
26 | #root_email = ""
27 |
28 | # The time zone setting of the root user.
29 | # The configured time zone must be parseable by http://www.joda.org/joda-time/apidocs/org/joda/time/DateTimeZone.html#forID-java.lang.String-
30 | # Default is UTC
31 | #root_timezone = UTC
32 |
33 | # Set plugin directory here (relative or absolute)
34 | plugin_dir = /usr/share/graylog/plugin
35 |
36 | # REST API listen URI. Must be reachable by other Graylog server nodes if you run a cluster.
37 | # When using Graylog Collectors, this URI will be used to receive heartbeat messages and must be accessible for all collectors.
38 | rest_listen_uri = http://0.0.0.0:9000/api/
39 |
40 | # REST API transport address. Defaults to the value of rest_listen_uri. Exception: If rest_listen_uri
41 | # is set to a wildcard IP address (0.0.0.0) the first non-loopback IPv4 system address is used.
42 | # If set, this will be promoted in the cluster discovery APIs, so other nodes may try to connect on
43 | # this address and it is used to generate URLs addressing entities in the REST API. (see rest_listen_uri)
44 | # You will need to define this, if your Graylog server is running behind a HTTP proxy that is rewriting
45 | # the scheme, host name or URI.
46 | #rest_transport_uri = http://192.168.1.1:9000/api/
47 |
48 | # Enable CORS headers for REST API. This is necessary for JS-clients accessing the server directly.
49 | # If these are disabled, modern browsers will not be able to retrieve resources from the server.
50 | # This is enabled by default. Uncomment the next line to disable it.
51 | #rest_enable_cors = false
52 |
53 | # Enable GZIP support for REST API. This compresses API responses and therefore helps to reduce
54 | # overall round trip times. This is disabled by default. Uncomment the next line to enable it.
55 | #rest_enable_gzip = true
56 |
57 | # Enable HTTPS support for the REST API. This secures the communication with the REST API with
58 | # TLS to prevent request forgery and eavesdropping. This is disabled by default. Uncomment the
59 | # next line to enable it.
60 | #rest_enable_tls = true
61 |
62 | # The X.509 certificate file to use for securing the REST API.
63 | #rest_tls_cert_file = /path/to/graylog.crt
64 |
65 | # The private key to use for securing the REST API.
66 | #rest_tls_key_file = /path/to/graylog.key
67 |
68 | # The password to unlock the private key used for securing the REST API.
69 | #rest_tls_key_password = secret
70 |
71 | # The maximum size of the HTTP request headers in bytes.
72 | #rest_max_header_size = 8192
73 |
74 | # The maximal length of the initial HTTP/1.1 line in bytes.
75 | #rest_max_initial_line_length = 4096
76 |
77 | # The size of the thread pool used exclusively for serving the REST API.
78 | #rest_thread_pool_size = 16
79 |
80 | # Enable the embedded Graylog web interface.
81 | # Default: true
82 | #web_enable = false
83 |
84 | # Web interface listen URI
85 | web_listen_uri = http://0.0.0.0:9000/
86 |
87 | # Enable CORS headers for the web interface. This is necessary for JS-clients accessing the server directly.
88 | # If these are disabled, modern browsers will not be able to retrieve resources from the server.
89 | web_enable_cors = true
90 |
91 | # Enable/disable GZIP support for the web interface. This compresses HTTP responses and therefore helps to reduce
92 | # overall round trip times. This is enabled by default. Uncomment the next line to disable it.
93 | #web_enable_gzip = false
94 |
95 | # Enable HTTPS support for the web interface. This secures the communication of the web browser with the web interface
96 | # using TLS to prevent request forgery and eavesdropping.
97 | # This is disabled by default. Uncomment the next line to enable it and see the other related configuration settings.
98 | #web_enable_tls = true
99 |
100 | # The X.509 certificate file to use for securing the web interface.
101 | #web_tls_cert_file = /path/to/graylog-web.crt
102 |
103 | # The private key to use for securing the web interface.
104 | #web_tls_key_file = /path/to/graylog-web.key
105 |
106 | # The password to unlock the private key used for securing the web interface.
107 | #web_tls_key_password = secret
108 |
109 | # The maximum size of the HTTP request headers in bytes.
110 | #web_max_header_size = 8192
111 |
112 | # The maximal length of the initial HTTP/1.1 line in bytes.
113 | #web_max_initial_line_length = 4096
114 |
115 | # The size of the thread pool used exclusively for serving the web interface.
116 | #web_thread_pool_size = 16
117 |
118 | # Embedded Elasticsearch configuration file
119 | # pay attention to the working directory of the server, maybe use an absolute path here
120 | # elasticsearch_config_file = /usr/share/graylog/data/config/elasticsearch.yml
121 |
122 | # Graylog will use multiple indices to store documents in. You can configured the strategy it uses to determine
123 | # when to rotate the currently active write index.
124 | # It supports multiple rotation strategies:
125 | # - "count" of messages per index, use elasticsearch_max_docs_per_index below to configure
126 | # - "size" per index, use elasticsearch_max_size_per_index below to configure
127 | # valid values are "count", "size" and "time", default is "count"
128 | rotation_strategy = count
129 |
130 | # (Approximate) maximum number of documents in an Elasticsearch index before a new index
131 | # is being created, also see no_retention and elasticsearch_max_number_of_indices.
132 | # Configure this if you used 'rotation_strategy = count' above.
133 | elasticsearch_max_docs_per_index = 20000000
134 |
135 | # (Approximate) maximum size in bytes per Elasticsearch index on disk before a new index is being created, also see
136 | # no_retention and elasticsearch_max_number_of_indices. Default is 1GB.
137 | # Configure this if you used 'rotation_strategy = size' above.
138 | #elasticsearch_max_size_per_index = 1073741824
139 |
140 | # (Approximate) maximum time before a new Elasticsearch index is being created, also see
141 | # no_retention and elasticsearch_max_number_of_indices. Default is 1 day.
142 | # Configure this if you used 'rotation_strategy = time' above.
143 | # Please note that this rotation period does not look at the time specified in the received messages, but is
144 | # using the real clock value to decide when to rotate the index!
145 | # Specify the time using a duration and a suffix indicating which unit you want:
146 | # 1w = 1 week
147 | # 1d = 1 day
148 | # 12h = 12 hours
149 | # Permitted suffixes are: d for day, h for hour, m for minute, s for second.
150 | #elasticsearch_max_time_per_index = 1d
151 |
152 | # Disable checking the version of Elasticsearch for being compatible with this Graylog release.
153 | # WARNING: Using Graylog with unsupported and untested versions of Elasticsearch may lead to data loss!
154 | #elasticsearch_disable_version_check = true
155 |
156 | # Disable message retention on this node, i. e. disable Elasticsearch index rotation.
157 | #no_retention = false
158 |
159 | # How many indices do you want to keep?
160 | elasticsearch_max_number_of_indices = 20
161 |
162 | # Decide what happens with the oldest indices when the maximum number of indices is reached.
163 | # The following strategies are availble:
164 | # - delete # Deletes the index completely (Default)
165 | # - close # Closes the index and hides it from the system. Can be re-opened later.
166 | retention_strategy = delete
167 |
168 | # How many Elasticsearch shards and replicas should be used per index? Note that this only applies to newly created indices.
169 | elasticsearch_shards = 4
170 | elasticsearch_replicas = 0
171 |
172 | # Prefix for all Elasticsearch indices and index aliases managed by Graylog.
173 | elasticsearch_index_prefix = graylog
174 |
175 | # Name of the Elasticsearch index template used by Graylog to apply the mandatory index mapping.
176 | # # Default: graylog-internal
177 | #elasticsearch_template_name = graylog-internal
178 |
179 | # Do you want to allow searches with leading wildcards? This can be extremely resource hungry and should only
180 | # be enabled with care. See also: https://www.graylog.org/documentation/general/queries/
181 | allow_leading_wildcard_searches = true
182 |
183 | # Do you want to allow searches to be highlighted? Depending on the size of your messages this can be memory hungry and
184 | # should only be enabled after making sure your Elasticsearch cluster has enough memory.
185 | allow_highlighting = true
186 |
187 | # settings to be passed to elasticsearch's client (overriding those in the provided elasticsearch_config_file)
188 | # all these
189 | # this must be the same as for your Elasticsearch cluster
190 | elasticsearch_cluster_name = graylog
191 |
192 | # The prefix being used to generate the Elasticsearch node name which makes it easier to identify the specific Graylog
193 | # server running the embedded Elasticsearch instance. The node name will be constructed by concatenating this prefix
194 | # and the Graylog node ID (see node_id_file), for example "graylog-17052010-1234-5678-abcd-1337cafebabe".
195 | # Default: graylog-
196 | #elasticsearch_node_name_prefix = graylog-
197 |
198 | # we don't want the Graylog server to store any data, or be master node
199 | #elasticsearch_node_master = false
200 | #elasticsearch_node_data = false
201 |
202 | # use a different port if you run multiple Elasticsearch nodes on one machine
203 | elasticsearch_transport_tcp_port = 9350
204 |
205 | # we don't need to run the embedded HTTP server here
206 | elasticsearch_http_enabled = false
207 |
208 | elasticsearch_discovery_zen_ping_multicast_enabled = false
209 | elasticsearch_discovery_zen_ping_unicast_hosts = elasticsearch:9300
210 |
211 | # Change the following setting if you are running into problems with timeouts during Elasticsearch cluster discovery.
212 | # The setting is specified in milliseconds, the default is 5000ms (5 seconds).
213 | #elasticsearch_cluster_discovery_timeout = 5000
214 |
215 | # the following settings allow to change the bind addresses for the Elasticsearch client in Graylog
216 | # these settings are empty by default, letting Elasticsearch choose automatically,
217 | # override them here or in the 'elasticsearch_config_file' if you need to bind to a special address
218 | # refer to http://www.elasticsearch.org/guide/en/elasticsearch/reference/0.90/modules-network.html
219 | # for special values here
220 | elasticsearch_network_host = 0.0.0.0
221 | #elasticsearch_network_bind_host =
222 | #elasticsearch_network_publish_host =
223 |
224 | # The total amount of time discovery will look for other Elasticsearch nodes in the cluster
225 | # before giving up and declaring the current node master.
226 | #elasticsearch_discovery_initial_state_timeout = 3s
227 |
228 | # Analyzer (tokenizer) to use for message and full_message field. The "standard" filter usually is a good idea.
229 | # All supported analyzers are: standard, simple, whitespace, stop, keyword, pattern, language, snowball, custom
230 | # Elasticsearch documentation: http://www.elasticsearch.org/guide/reference/index-modules/analysis/
231 | # Note that this setting only takes effect on newly created indices.
232 | elasticsearch_analyzer = standard
233 |
234 | # Global request timeout for Elasticsearch requests (e. g. during search, index creation, or index time-range
235 | # calculations) based on a best-effort to restrict the runtime of Elasticsearch operations.
236 | # Default: 1m
237 | #elasticsearch_request_timeout = 1m
238 |
239 | # Time interval for index range information cleanups. This setting defines how often stale index range information
240 | # is being purged from the database.
241 | # Default: 1h
242 | #index_ranges_cleanup_interval = 1h
243 |
244 | # Batch size for the Elasticsearch output. This is the maximum (!) number of messages the Elasticsearch output
245 | # module will get at once and write to Elasticsearch in a batch call. If the configured batch size has not been
246 | # reached within output_flush_interval seconds, everything that is available will be flushed at once. Remember
247 | # that every outputbuffer processor manages its own batch and performs its own batch write calls.
248 | # ("outputbuffer_processors" variable)
249 | output_batch_size = 500
250 |
251 | # Flush interval (in seconds) for the Elasticsearch output. This is the maximum amount of time between two
252 | # batches of messages written to Elasticsearch. It is only effective at all if your minimum number of messages
253 | # for this time period is less than output_batch_size * outputbuffer_processors.
254 | output_flush_interval = 1
255 |
256 | # As stream outputs are loaded only on demand, an output which is failing to initialize will be tried over and
257 | # over again. To prevent this, the following configuration options define after how many faults an output will
258 | # not be tried again for an also configurable amount of seconds.
259 | output_fault_count_threshold = 5
260 | output_fault_penalty_seconds = 30
261 |
262 | # The number of parallel running processors.
263 | # Raise this number if your buffers are filling up.
264 | processbuffer_processors = 5
265 | outputbuffer_processors = 3
266 |
267 | #outputbuffer_processor_keep_alive_time = 5000
268 | #outputbuffer_processor_threads_core_pool_size = 3
269 | #outputbuffer_processor_threads_max_pool_size = 30
270 |
271 | # UDP receive buffer size for all message inputs (e. g. SyslogUDPInput).
272 | #udp_recvbuffer_sizes = 1048576
273 |
274 | # Wait strategy describing how buffer processors wait on a cursor sequence. (default: sleeping)
275 | # Possible types:
276 | # - yielding
277 | # Compromise between performance and CPU usage.
278 | # - sleeping
279 | # Compromise between performance and CPU usage. Latency spikes can occur after quiet periods.
280 | # - blocking
281 | # High throughput, low latency, higher CPU usage.
282 | # - busy_spinning
283 | # Avoids syscalls which could introduce latency jitter. Best when threads can be bound to specific CPU cores.
284 | processor_wait_strategy = blocking
285 |
286 | # Size of internal ring buffers. Raise this if raising outputbuffer_processors does not help anymore.
287 | # For optimum performance your LogMessage objects in the ring buffer should fit in your CPU L3 cache.
288 | # Start server with --statistics flag to see buffer utilization.
289 | # Must be a power of 2. (512, 1024, 2048, ...)
290 | ring_size = 65536
291 |
292 | inputbuffer_ring_size = 65536
293 | inputbuffer_processors = 2
294 | inputbuffer_wait_strategy = blocking
295 |
296 | # Enable the disk based message journal.
297 | message_journal_enabled = true
298 |
299 | # The directory which will be used to store the message journal. The directory must me exclusively used by Graylog and
300 | # must not contain any other files than the ones created by Graylog itself.
301 | message_journal_dir = /usr/share/graylog/data/journal
302 |
303 | # Journal hold messages before they could be written to Elasticsearch.
304 | # For a maximum of 12 hours or 5 GB whichever happens first.
305 | # During normal operation the journal will be smaller.
306 | #message_journal_max_age = 12h
307 | #message_journal_max_size = 5gb
308 |
309 | #message_journal_flush_age = 1m
310 | #message_journal_flush_interval = 1000000
311 | #message_journal_segment_age = 1h
312 | #message_journal_segment_size = 100mb
313 |
314 | # Number of threads used exclusively for dispatching internal events. Default is 2.
315 | #async_eventbus_processors = 2
316 |
317 | # How many seconds to wait between marking node as DEAD for possible load balancers and starting the actual
318 | # shutdown process. Set to 0 if you have no status checking load balancers in front.
319 | lb_recognition_period_seconds = 3
320 |
321 | # Every message is matched against the configured streams and it can happen that a stream contains rules which
322 | # take an unusual amount of time to run, for example if its using regular expressions that perform excessive backtracking.
323 | # This will impact the processing of the entire server. To keep such misbehaving stream rules from impacting other
324 | # streams, Graylog limits the execution time for each stream.
325 | # The default values are noted below, the timeout is in milliseconds.
326 | # If the stream matching for one stream took longer than the timeout value, and this happened more than "max_faults" times
327 | # that stream is disabled and a notification is shown in the web interface.
328 | #stream_processing_timeout = 2000
329 | #stream_processing_max_faults = 3
330 |
331 | # Length of the interval in seconds in which the alert conditions for all streams should be checked
332 | # and alarms are being sent.
333 | #alert_check_interval = 60
334 |
335 | # Since 0.21 the Graylog server supports pluggable output modules. This means a single message can be written to multiple
336 | # outputs. The next setting defines the timeout for a single output module, including the default output module where all
337 | # messages end up.
338 | #
339 | # Time in milliseconds to wait for all message outputs to finish writing a single message.
340 | #output_module_timeout = 10000
341 |
342 | # Time in milliseconds after which a detected stale master node is being rechecked on startup.
343 | #stale_master_timeout = 2000
344 |
345 | # Time in milliseconds which Graylog is waiting for all threads to stop on shutdown.
346 | #shutdown_timeout = 30000
347 |
348 | # MongoDB connection string
349 | # See http://docs.mongodb.org/manual/reference/connection-string/ for details
350 | mongodb_uri = mongodb://mongo/graylog
351 |
352 | # Authenticate against the MongoDB server
353 | #mongodb_uri = mongodb://grayloguser:secret@localhost:27017/graylog
354 |
355 | # Use a replica set instead of a single host
356 | #mongodb_uri = mongodb://grayloguser:secret@localhost:27017,localhost:27018,localhost:27019/graylog
357 |
358 | # Increase this value according to the maximum connections your MongoDB server can handle from a single client
359 | # if you encounter MongoDB connection problems.
360 | mongodb_max_connections = 100
361 |
362 | # Number of threads allowed to be blocked by MongoDB connections multiplier. Default: 5
363 | # If mongodb_max_connections is 100, and mongodb_threads_allowed_to_block_multiplier is 5,
364 | # then 500 threads can block. More than that and an exception will be thrown.
365 | # http://api.mongodb.org/java/current/com/mongodb/MongoOptions.html#threadsAllowedToBlockForConnectionMultiplier
366 | mongodb_threads_allowed_to_block_multiplier = 5
367 |
368 | # Drools Rule File (Use to rewrite incoming log messages)
369 | # See: https://www.graylog.org/documentation/general/rewriting/
370 | #rules_file = /etc/graylog/server/rules.drl
371 |
372 | # Email transport
373 | #transport_email_enabled = false
374 | #transport_email_hostname = mail.example.com
375 | #transport_email_port = 587
376 | #transport_email_use_auth = true
377 | #transport_email_use_tls = true
378 | #transport_email_use_ssl = true
379 | #transport_email_auth_username = you@example.com
380 | #transport_email_auth_password = secret
381 | #transport_email_subject_prefix = [graylog]
382 | #transport_email_from_email = graylog@example.com
383 |
384 | # Specify and uncomment this if you want to include links to the stream in your stream alert mails.
385 | # This should define the fully qualified base url to your web interface exactly the same way as it is accessed by your users.
386 | #transport_email_web_interface_url = https://graylog.example.com
387 |
388 | # The default connect timeout for outgoing HTTP connections.
389 | # Values must be a positive duration (and between 1 and 2147483647 when converted to milliseconds).
390 | # Default: 5s
391 | #http_connect_timeout = 5s
392 |
393 | # The default read timeout for outgoing HTTP connections.
394 | # Values must be a positive duration (and between 1 and 2147483647 when converted to milliseconds).
395 | # Default: 10s
396 | #http_read_timeout = 10s
397 |
398 | # The default write timeout for outgoing HTTP connections.
399 | # Values must be a positive duration (and between 1 and 2147483647 when converted to milliseconds).
400 | # Default: 10s
401 | #http_write_timeout = 10s
402 |
403 | # HTTP proxy for outgoing HTTP connections
404 | #http_proxy_uri =
405 |
406 | # Disable the optimization of Elasticsearch indices after index cycling. This may take some load from Elasticsearch
407 | # on heavily used systems with large indices, but it will decrease search performance. The default is to optimize
408 | # cycled indices.
409 | #disable_index_optimization = true
410 |
411 | # Optimize the index down to <= index_optimization_max_num_segments. A higher number may take some load from Elasticsearch
412 | # on heavily used systems with large indices, but it will decrease search performance. The default is 1.
413 | #index_optimization_max_num_segments = 1
414 |
415 | # The threshold of the garbage collection runs. If GC runs take longer than this threshold, a system notification
416 | # will be generated to warn the administrator about possible problems with the system. Default is 1 second.
417 | #gc_warning_threshold = 1s
418 |
419 | # Connection timeout for a configured LDAP server (e. g. ActiveDirectory) in milliseconds.
420 | #ldap_connection_timeout = 2000
421 |
422 | # Enable collection of Graylog-related metrics into MongoDB
423 | # WARNING: This will add *a lot* of data into your MongoDB database on a regular interval (1 second)!
424 | # DEPRECATED: This setting and the respective feature will be removed in a future version of Graylog.
425 | #enable_metrics_collection = false
426 |
427 | # Disable the use of SIGAR for collecting system stats
428 | #disable_sigar = false
429 |
430 | # The default cache time for dashboard widgets. (Default: 10 seconds, minimum: 1 second)
431 | #dashboard_widget_default_cache_time = 10s
432 |
433 | # Automatically load content packs in "content_packs_dir" on the first start of Graylog.
434 | content_packs_loader_enabled = true
435 |
436 | # The directory which contains content packs which should be loaded on the first start of Graylog.
437 | content_packs_dir = /usr/share/graylog/data/contentpacks
438 |
439 | # A comma-separated list of content packs (files in "content_packs_dir") which should be applied on
440 | # the first start of Graylog.
441 | content_packs_auto_load = grok-patterns.json
442 |
--------------------------------------------------------------------------------