├── doc
└── schema.png
├── telegraf
├── Dockerfile
└── root
│ └── etc
│ └── telegraf
│ └── telegraf.conf
├── bioyino
├── Dockerfile
└── root
│ └── etc
│ └── bioyino
│ └── bioyino.toml
├── client.sh
├── carbon-clickhouse
├── Dockerfile
└── root
│ └── etc
│ └── carbon-clickhouse
│ └── carbon-clickhouse.conf
├── carbonapi
├── Dockerfile
└── root
│ └── etc
│ └── carbonapi.yaml
├── clickhouse
├── Dockerfile
├── root
│ └── etc
│ │ └── clickhouse-server
│ │ └── config.d
│ │ ├── metrics.xml
│ │ └── rollup.xml
└── init.sql
├── graphite-clickhouse
├── Dockerfile
└── root
│ └── etc
│ └── graphite-clickhouse
│ ├── rollup.xml
│ └── graphite-clickhouse.conf
├── grafana
├── root
│ ├── etc
│ │ └── grafana
│ │ │ ├── provisioning
│ │ │ ├── dashboards
│ │ │ │ └── graphite-clickhouse.yaml
│ │ │ └── datasources
│ │ │ │ └── graphite.yaml
│ │ │ └── grafana.ini
│ └── var
│ │ └── lib
│ │ └── grafana
│ │ └── dashboards
│ │ └── graphite-clickhouse.json
└── Dockerfile
├── README.md
└── docker-compose.yaml
/doc/schema.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kolobaev/graphite-clickhouse-tldr/HEAD/doc/schema.png
--------------------------------------------------------------------------------
/telegraf/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM telegraf:1.12
2 |
3 | COPY ./root/etc/telegraf/telegraf.conf /etc/telegraf/telegraf.conf
4 |
--------------------------------------------------------------------------------
/bioyino/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM pik4ez/bioyino:0.6.0
2 |
3 | COPY ./root/etc/bioyino/bioyino.toml /etc/bioyino/bioyino.toml
4 |
--------------------------------------------------------------------------------
/client.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | docker-compose exec clickhouse bash -c "
4 | export HOME=/var/lib/clickhouse/
5 | exec clickhouse client
6 | "
--------------------------------------------------------------------------------
/carbon-clickhouse/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM lomik/carbon-clickhouse:v0.11.0
2 |
3 | COPY ./root/etc/carbon-clickhouse/carbon-clickhouse.conf /etc/carbon-clickhouse/carbon-clickhouse.conf
4 |
--------------------------------------------------------------------------------
/carbonapi/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM openmetric/carbonapi:0.11.0
2 |
3 | COPY ./root/etc/carbonapi.yaml /etc/carbonapi.yaml
4 |
5 | ENTRYPOINT ["carbonapi", "-config", "/etc/carbonapi.yaml"]
6 |
--------------------------------------------------------------------------------
/clickhouse/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM yandex/clickhouse-server:20.3.5.21
2 |
3 | COPY ./root/etc/clickhouse-server/config.d/rollup.xml /etc/clickhouse-server/config.d/rollup.xml
4 | COPY ./root/etc/clickhouse-server/config.d/metrics.xml /etc/clickhouse-server/config.d/metrics.xml
5 |
--------------------------------------------------------------------------------
/graphite-clickhouse/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM lomik/graphite-clickhouse:v0.11.1
2 |
3 | COPY ./root/etc/graphite-clickhouse/rollup.xml /etc/graphite-clickhouse/rollup.xml
4 | COPY ./root/etc/graphite-clickhouse/graphite-clickhouse.conf /etc/graphite-clickhouse/graphite-clickhouse.conf
5 |
--------------------------------------------------------------------------------
/grafana/root/etc/grafana/provisioning/dashboards/graphite-clickhouse.yaml:
--------------------------------------------------------------------------------
1 | # # config file version
2 | apiVersion: 1
3 |
4 | providers:
5 | - name: 'graphite-clickhouse'
6 | orgId: 1
7 | folder: ''
8 | type: file
9 | options:
10 | path: /var/lib/grafana/dashboards/graphite-clickhouse.json
11 |
--------------------------------------------------------------------------------
/grafana/root/etc/grafana/provisioning/datasources/graphite.yaml:
--------------------------------------------------------------------------------
1 | # # config file version
2 | apiVersion: 1
3 |
4 | datasources:
5 | - name: graphite1
6 | type: graphite
7 | access: proxy
8 | orgId: 1
9 | url: http://carbonapi:80
10 | isDefault: True
11 | jsonData:
12 | graphiteVersion: "1.1"
13 |
--------------------------------------------------------------------------------
/grafana/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM grafana/grafana:6.7.2
2 |
3 | COPY ./root/etc/grafana/grafana.ini /etc/grafana/grafana.ini
4 | COPY ./root/etc/grafana/provisioning/dashboards/graphite-clickhouse.yaml /etc/grafana/provisioning/dashboards/graphite-clickhouse.yaml
5 | COPY ./root/etc/grafana/provisioning/datasources/graphite.yaml /etc/grafana/provisioning/datasources/graphite.yaml
6 | COPY ./root/var/lib/grafana/dashboards/graphite-clickhouse.json /var/lib/grafana/dashboards/graphite-clickhouse.json
7 |
--------------------------------------------------------------------------------
/clickhouse/root/etc/clickhouse-server/config.d/metrics.xml:
--------------------------------------------------------------------------------
1 |
2 | true
3 |
4 | carbon-clickhouse
5 | 2003
6 | 1
7 | 10
8 | resources.monitoring.clickhouse
9 |
10 | true
11 | true
12 | true
13 |
14 |
15 |
--------------------------------------------------------------------------------
/clickhouse/root/etc/clickhouse-server/config.d/rollup.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | max
5 |
6 | 0
7 | 10
8 |
9 |
10 | 172800
11 | 60
12 |
13 |
14 | 3024000
15 | 600
16 |
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/graphite-clickhouse/root/etc/graphite-clickhouse/rollup.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | max
5 |
6 | 0
7 | 10
8 |
9 |
10 | 172800
11 | 60
12 |
13 |
14 | 3024000
15 | 600
16 |
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/graphite-clickhouse/root/etc/graphite-clickhouse/graphite-clickhouse.conf:
--------------------------------------------------------------------------------
1 | [common]
2 | listen = ":9090"
3 | max-cpu = 8
4 |
5 | [clickhouse]
6 | url = "http://clickhouse:8123/?max_query_size=268435456&max_ast_elements=1000000&max_execution_time=60&log_queries=1"
7 | data-table = "graphite.data"
8 | date-tree-table = "graphite.series"
9 | date-tree-table-version = 2
10 | tree-table = "graphite.metrics"
11 | tagged-table = "graphite.tagged"
12 | tag-table = "graphite.tag_w_prefix"
13 | rollup-conf = "/etc/graphite-clickhouse/rollup.xml"
14 | extra-prefix = ""
15 | data-timeout = "60s"
16 | tree-timeout = "60s"
17 |
18 | [[data-table]]
19 | table = "graphite.data_reverse"
20 | max-age = "48h"
21 | reverse = true
22 |
23 | [tags]
24 | rules = "/etc/graphite-clickhouse/tag.d/*.conf"
25 | date = "2016-11-01"
26 | input-file = ""
27 | output-file = ""
28 |
29 | [logging]
30 | file = "/var/log/graphite-clickhouse.log"
31 | level = "debug"
32 |
--------------------------------------------------------------------------------
/clickhouse/init.sql:
--------------------------------------------------------------------------------
1 | CREATE DATABASE IF NOT EXISTS graphite;
2 |
3 | CREATE TABLE IF NOT EXISTS graphite.data (
4 | Path String,
5 | Value Float64,
6 | Time UInt32,
7 | Date Date,
8 | Timestamp UInt32
9 | ) ENGINE = GraphiteMergeTree(Date, (Path, Time), 8192, 'graphite_rollup');
10 |
11 | CREATE TABLE IF NOT EXISTS graphite.data_reverse (
12 | Path String,
13 | Value Float64,
14 | Time UInt32,
15 | Date Date,
16 | Timestamp UInt32
17 | ) ENGINE = GraphiteMergeTree('graphite_rollup') PARTITION BY Date ORDER BY (Path, Time) SETTINGS index_granularity = 8192;
18 |
19 | CREATE TABLE IF NOT EXISTS graphite.metrics (
20 | Date Date,
21 | Level UInt32,
22 | Path String,
23 | Deleted UInt8,
24 | Version UInt32
25 | ) ENGINE = ReplacingMergeTree(Date, (Level, Path), 8192, Version);
26 |
27 | CREATE TABLE IF NOT EXISTS graphite.series (
28 | Date Date,
29 | Level UInt32,
30 | Path String,
31 | Deleted UInt8,
32 | Version UInt32
33 | ) ENGINE = ReplacingMergeTree(Date, (Level, Path, Date), 8192);
34 |
35 | CREATE TABLE IF NOT EXISTS graphite.tagged (
36 | Date Date,
37 | Tag1 String,
38 | Path String,
39 | Tags Array(String),
40 | Version UInt32,
41 | Deleted UInt8
42 | ) ENGINE = ReplacingMergeTree(Date, (Tag1, Path, Date), 8192, Version);
43 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # graphite-clickhouse-tldr
2 | Graphite-ClickHouse + Grafana + StatsD with [docker-compose](https://docs.docker.com/compose/install/)
3 |
4 | # Quick Start
5 | ```sh
6 | git clone https://github.com/kolobaev/graphite-clickhouse-tldr.git
7 | cd graphite-clickhouse-tldr
8 | docker-compose up
9 | ```
10 | Open http://127.0.0.1:3000/ in browser
11 |
12 | ## Work scheme
13 | 
14 |
15 | ### Docker-compose install:
16 | https://docs.docker.com/compose/install/
17 |
18 | ### Mapped Ports
19 |
20 | Host | Container | Service
21 | ---- | --------- | -------------------------------------------------------------------------------------------------------------------
22 | 80 | 80 | [carbonapi](https://github.com/go-graphite/carbonapi)
23 | 3000 | 3000 | [grafana (admin:admin)](https://github.com/grafana/grafana)
24 | 8126 | 8126 | [bioyino (StatsD)](https://github.com/avito-tech/bioyino)
25 | 2003 | 2003 | [carbon receiver - plaintext](http://graphite.readthedocs.io/en/latest/feeding-carbon.html#the-plaintext-protocol)
26 | 2004 | 2004 | [carbon receiver - pickle](http://graphite.readthedocs.io/en/latest/feeding-carbon.html#the-pickle-protocol)
27 | 2006 | 2006 | [carbon receiver - prometheus remote write](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#%3Cremote_write%3E)
28 |
--------------------------------------------------------------------------------
/carbonapi/root/etc/carbonapi.yaml:
--------------------------------------------------------------------------------
1 | # Listen address, should always include hostname or ip address and a port.
2 | listen: ":80"
3 | # Max concurrent requests to CarbonZipper
4 | concurency: 2000000
5 | cache:
6 | # Type of caching. Valid: "mem", "memcache", "null"
7 | type: "mem"
8 | # Cache limit in megabytes
9 | size_mb: 100000
10 | # Default cache timeout value. Identical to DEFAULT_CACHE_DURATION in graphite-web.
11 | defaultTimeoutSec: 15
12 | # Only used by memcache type of cache. List of memcache servers.
13 | memcachedServers:
14 | - "127.0.0.1:1234"
15 | - "127.0.0.2:1235"
16 | # Amount of CPUs to use. 0 - unlimited
17 | cpus: 0
18 | tz: ""
19 | sendGlobsAsIs: true
20 | alwaysSendGlobsAsIs: true
21 | maxBatchSize: 1000000
22 | graphite:
23 | # Host:port where to send internal metrics
24 | # Empty = disabled
25 | host: "carbon-clickhouse:2003"
26 | interval: "10s"
27 | prefix: "resources.monitoring.carbonapi"
28 | # Maximium idle connections to carbonzipper
29 | idleConnections: 100000
30 | pidFile: ""
31 | logger:
32 | - logger: ""
33 | file: "stderr"
34 | level: "debug"
35 | encoding: "console"
36 | encodingTime: "iso8601"
37 | encodingDuration: "seconds"
38 | - logger: ""
39 | file: "/var/log/carbonapi.log"
40 | level: "error"
41 | encoding: "json"
42 | - logger: "access"
43 | file: "/var/log/carbonapi_access.log"
44 | level: "info"
45 | encoding: "json"
46 | upstreams:
47 | timeouts:
48 | global: "60s"
49 | afterStarted: "60s"
50 | backends:
51 | - "http://graphite-clickhouse:9090"
52 | tagdb:
53 | url: "http://graphite-clickhouse:9090"
54 |
--------------------------------------------------------------------------------
/carbon-clickhouse/root/etc/carbon-clickhouse/carbon-clickhouse.conf:
--------------------------------------------------------------------------------
1 | [common]
2 | # Prefix for store all internal carbon-clickhouse graphs. Supported macroses: {host}
3 | metric-prefix = "resources.monitoring.carbon-clickhouse.{host}"
4 | # Endpoint for store internal carbon metrics. Valid values: "" or "local", "tcp://host:port", "udp://host:port"
5 | metric-endpoint = "local"
6 | # Interval of storing internal metrics. Like CARBON_METRIC_INTERVAL
7 | metric-interval = "10s"
8 | # GOMAXPROCS
9 | max-cpu = 2
10 |
11 | [logging]
12 | # "stderr", "stdout" can be used as file name
13 | file = "/var/log/carbon-clickhouse.log"
14 | # Logging error level. Valid values: "debug", "info", "warn" "error"
15 | level = "warn"
16 |
17 | [data]
18 | # Folder for buffering received data
19 | path = "/data/carbon-clickhouse/data/"
20 | # Rotate (and upload) file interval.
21 | # Minimize chunk-interval for minimize lag between point receive and store
22 | chunk-interval = "1s"
23 |
24 | [upload.graphite]
25 | type = "points"
26 | table = "graphite.data"
27 | threads = 2
28 | url = "http://clickhouse:8123/"
29 | timeout = "30s"
30 |
31 | [upload.graphite_tree]
32 | type = "tree"
33 | table = "graphite.metrics"
34 | date = "2016-11-01"
35 | threads = 2
36 | url = "http://clickhouse:8123/"
37 | timeout = "30s"
38 | cache-ttl = "12h0m0s"
39 |
40 | [upload.graphite_series]
41 | type = "series"
42 | table = "graphite.series"
43 | threads = 2
44 | url = "http://clickhouse:8123/"
45 | timeout = "30s"
46 | cache-ttl = "12h0m0s"
47 |
48 | [upload.graphite_tagged]
49 | type = "tagged"
50 | table = "graphite.tagged"
51 | threads = 1
52 | url = "http://clickhouse:8123/"
53 | timeout = "30s"
54 | cache-ttl = "12h0m0s"
55 |
56 | [upload.graphite_reverse]
57 | type = "points-reverse"
58 | table = "graphite.data_reverse"
59 | threads = 2
60 | url = "http://clickhouse:8123/"
61 | timeout = "30s"
62 | cache-ttl = "12h0m0s"
63 |
64 | [udp]
65 | listen = ":2003"
66 | enabled = true
67 |
68 | [tcp]
69 | listen = ":2003"
70 | enabled = true
71 |
72 | [pickle]
73 | listen = ":2004"
74 | enabled = true
75 |
76 | [grpc]
77 | listen = ":2005"
78 | enabled = false
79 |
80 | [pprof]
81 | listen = "localhost:7007"
82 | enabled = false
83 |
84 | [prometheus]
85 | listen = ":2006"
86 | enabled = true
87 |
--------------------------------------------------------------------------------
/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: '3.5'
2 | services:
3 | clickhouse:
4 | build: clickhouse/.
5 | image: clickhouse-preconfigured
6 | volumes:
7 | - "clickhouse-data:/var/lib/clickhouse/data"
8 | - "clickhouse-metadata:/var/lib/clickhouse/metadata"
9 | networks:
10 | - default
11 | restart: always
12 | clickhouse-init:
13 | build: clickhouse/.
14 | image: clickhouse-preconfigured
15 | volumes:
16 | - "./clickhouse/init.sql:/init.sql"
17 | entrypoint: |
18 | bash -c '
19 | sleep 5
20 | export HOME=/var/lib/clickhouse/
21 | cat /init.sql | clickhouse client --host clickhouse --multiquery'
22 | networks:
23 | - default
24 | carbon-clickhouse:
25 | build: carbon-clickhouse/.
26 | image: carbon-clickhouse-preconfigured
27 | volumes:
28 | - "carbon-clickhouse-data:/data/carbon-clickhouse/data"
29 | ports:
30 | - "2003:2003" # plain tcp
31 | - "2003:2003/udp" # plain udp
32 | - "2004:2004" # pickle
33 | - "2006:2006" # prometheus remote write
34 | networks:
35 | - default
36 | restart: always
37 | graphite-clickhouse:
38 | build: graphite-clickhouse/.
39 | image: graphite-clickhouse-preconfigured
40 | networks:
41 | - default
42 | restart: always
43 | carbonapi:
44 | build: carbonapi/.
45 | image: carbonapi-preconfigured
46 | ports:
47 | - "80:80"
48 | networks:
49 | - default
50 | restart: always
51 | grafana:
52 | build: grafana/.
53 | image: grafana-preconfigured
54 | volumes:
55 | - "grafana-data:/var/lib/grafana"
56 | ports:
57 | - "3000:3000"
58 | networks:
59 | - default
60 | restart: always
61 | telegraf:
62 | hostname: apphost
63 | build: telegraf/.
64 | image: telegraf-preconfigured
65 | environment:
66 | - HOST_PROC=/rootfs/proc
67 | - HOST_SYS=/rootfs/sys
68 | - HOST_ETC=/rootfs/etc
69 | volumes:
70 | - "/var/run/docker.sock:/var/run/docker.sock:ro"
71 | - "/sys:/rootfs/sys:ro"
72 | - "/proc:/rootfs/proc:ro"
73 | - "/etc:/rootfs/etc:ro"
74 | networks:
75 | - default
76 | restart: always
77 | bioyino:
78 | build: bioyino/.
79 | image: bioyino-preconfigured
80 | ports:
81 | - "8126:8126/udp"
82 | - "8136:8136"
83 | networks:
84 | - default
85 | restart: always
86 |
87 | networks:
88 | default:
89 |
90 | volumes:
91 | grafana-data:
92 | clickhouse-data:
93 | clickhouse-metadata:
94 | carbon-clickhouse-data:
95 |
--------------------------------------------------------------------------------
/bioyino/root/etc/bioyino/bioyino.toml:
--------------------------------------------------------------------------------
1 | # This is an example config showing all the possible options
2 | # Required options are filled with default values
3 | # Non-required options are commented with defaul values in comments
4 |
5 | verbosity = "warn"
6 |
7 | # Number of network worker threads in any mode, use 0(not recommended) to use all CPU cores
8 | n-threads = 1
9 |
10 | # Number of aggregating and counting threads, use 0(not recommended) to use all CPU cores
11 | w-threads = 1
12 |
13 | # Queue size for single counting thread before task is dropped
14 | task-queue-size = 1
15 |
16 | # If server should become leader from it's very start
17 | start-as-leader = true
18 |
19 | # How often to gather own stats, in ms. Use 0 to disable (stats are still gathered and printed to log,
20 | # but not included in metric dump
21 | stats-interval = 10000
22 |
23 | # Prefix for sending own stats
24 | stats-prefix = "resources.monitoring.bioyino"
25 |
26 | # What consensus to use: "consul", "internal" or "none"
27 | consensus = "none"
28 |
29 | [carbon]
30 |
31 | # IP and port of the carbon-protocol backend to send aggregated data to
32 | address = "carbon-clickhouse:2003"
33 |
34 | # Address to bind carbon client to when connecting
35 | # default: not specified, so no bind happens
36 | # bind-address = "127.0.0.1:2003"
37 |
38 | # How often to send metrics to carbon backend, ms
39 | interval = 3000
40 |
41 | # How much to sleep when connection to backend fails, ms
42 | connect-delay = 250
43 |
44 | # Multiply delay to this value for each consequent connection failure, float
45 | connect-delay-multiplier = 2
46 |
47 | # Maximum retry delay, ms
48 | connect-delay-max = 10000
49 |
50 | # How much times to retry when sending data to backend before giving up and dropping all metrics
51 | #note, that 0 means 1 try
52 | send-retries = 30
53 |
54 | # Network settings
55 | [network]
56 | # Address:port to listen for metrics at
57 | listen = "0.0.0.0:8126"
58 |
59 | # Address and port for replication server to listen on
60 | peer-listen = "0.0.0.0:8136"
61 |
62 | # Address for peer client to bind to
63 | # format is string with ip:port as with other addresses
64 | # Only binds if specified, doesn't do binding if not
65 | # peer-client-bind =
66 |
67 | # Address and port for management server to listen on
68 | mgmt-listen = "0.0.0.0:8137"
69 |
70 | # UDP buffer size for single packet. Needs to be around MTU. Packet's bytes after that value
71 | # may be lost
72 | bufsize = 1500
73 |
74 | # Enable multimessage(recvmmsg) mode
75 | multimessage = false
76 |
77 | # Number of multimessage packets to receive at once if in multimessage mode
78 | # Note that this setting is per thread, so in reality one can only see metrics
79 | # after receiving at least mm-packets*n_threads datagrams
80 | mm-packets = 1
81 |
82 | # Do multimessage operations in async mode.
83 | # This means recvmmsg will receive 0..mm-packets datagrams instead of waiting for mm-packets
84 | mm-async = false
85 |
86 | # Multimessage mode assumes early return by timeout, but ONLY when received
87 | # a packet after timeout expiration.
88 | # Basically this should be changed in very rare and specific cases.
89 | # 0 means this value will be equal to buffer-flush-time
90 | # mm-timeout = 0
91 |
92 | # To avoid packets staying in queue forever, this option can be used to flush
93 | # incoming data buffer forcing it to be sent even if it's not full, ms
94 | buffer-flush-time = 5000
95 |
96 | # Same as buffer-flush-time, but riggers on buffer length. Please, notice that multimessage
97 | # mode can only consider timer, so this check is only possible every mm-packets packets.
98 | # zero value means automatic management depending on memory allocator internal logic,
99 | # which on tests was found to reach 30Mb
100 | # if in multimessage mode this value is lower that mm-packets*bufsize, it will be set to this value
101 | buffer-flush-length = 65536
102 |
103 | # Nmber of green threads for single-message mode
104 | greens = 1
105 |
106 | # Socket pool size for single-message mode
107 | async-sockets = 1
108 |
109 | # List of nodes to replicate metrics to
110 | nodes = []
111 |
112 | # Interval to send snapshots to nodes, ms
113 | snapshot-interval = 1000
114 |
115 | [metrics]
116 | # Process buffers from different hosts separately, this gives more guarantee to parse
117 | # metrics from different hosts correctly. Can play bad if lots of metrics is received from a single host, set
118 | # it to false if you have such use case
119 | # consistent-parsing = true
120 |
121 | # Log all buffers being dropped due to parsing errors. Can be very spammy.
122 | # log-parse-errors = false
123 |
124 | # Size of buffer that parser considers invalid. Used to avoid DoS attacks on parser.
125 | # Increase this if you have metrics taking more than 1000 bytes
126 | # max-unparsed-buffer = 10000
127 |
128 | # Size of tags part of a metric (after semicolon character, not including the leading semicolon itself)
129 | # max-tags-len = 9000
130 |
131 | # Since tagged metric becomes a totally different metric in many systems the timeseries for such metrics
132 | # can be broken. To avoid this for the time the metrics is adopted, this option allows to create a copy of
133 | # such tagged metric, but without tags
134 | # Please note that this is effectively the double amount of memory and processing times
135 | # create-untagged-copy = false
136 |
137 | [aggregation]
138 | # round timestamp to interval of aggregation
139 | # possible values:
140 | # "up" - round to uppor bound of interval
141 | # "down" - round to lower bound
142 | # "no" - use exact timestamp, i.e. no rounding
143 | # round-timestamp = "no"
144 |
145 | # the threading mode of aggregation:
146 | # single - in a single thread
147 | # common - in the general thread pool along with parsing ang aggregating metrics
148 | # separate - in a separate thread pool
149 | mode = "single"
150 |
151 | # available values: smart (explained below), name, tag, both
152 | # smart means uto detection based on existence of tags in the metric,
153 | # so, for example sum for `some.metric` will be aggregated as `some.metric.sum`
154 | # but for `some.metric;tag=value` it will automatically become `some.metric;tag=value;aggregate=sum`
155 | destination = "smart"
156 |
157 | # updates aggregate is usually a debug value than some real one
158 | # also thi is the only one used for every type of metric, not only ms
159 | # so it's reasonable to avoid receiving a doubled amount of all metrics
160 | # This option allows to receive only metrics updated too often
161 | update-count-threshold = 200
162 |
163 | # a list of aggregates gatheredfor each ms-typed metric
164 | # please, note that setting this value will define the exact way, so, i.e. ["mean"] will gather ONLY ONE aggregate
165 | #
166 | # To add new percentiles an integer value with "precentile-" prefix should be used.
167 | # It will then be "appended" (in reality this is not string based calculation) with "0."
168 | # For example, "percentile-9999" will become 0.9999th percentile
169 | #
170 | # this is the full list of default values
171 | #ms-aggregates = [ "count", "last", "min", "max", "sum", "median", "mean", "updates", "percentile-75", "percentile-95", "percentile-98", "percentile-99", "percentile-999" ]
172 |
173 | # these attributes allow to change naming of aggregates in name postfix, prefix or in tags
174 | # by default names are the same as aggregate names, except for percentiles
175 | # percentile postfixes by default look like "percentile.99"
176 |
177 | # NOT defaults, just example
178 | #postfix-replacements = { "min" = "lower", "max" = "upper", "percentile-50" = "percentile.50" }
179 | postfix-replacements = { "updates" = "" }
180 |
181 | # prefix specific aggregates with specified string
182 | # all prefixes are empty by default
183 | # NOTE: you most probably want EITHER prefix or postfix, so having prefix specified here,
184 | # don't forget to explicitly set postfix to ""
185 | #
186 | # NOT defaults, just example
187 | #prefix-replacements = { "updates" = "resources.monitoring.bioyino.updates" }
188 | prefix-replacements = { "updates" = "resources.monitoring.bioyino.updates" }
189 |
190 | #tag-replacements = { "min" = "lower", "max" = "upper", "percentile-50" = "percentile.50" }
191 |
192 | # the default tag name taken when replacement is not specified
193 | # tag-name = "aggregate"
194 |
195 | # Settings for internal Raft
196 | [raft]
197 | # Defer start of raft consensus to avoid node becoming leader too early
198 | # Such situation is very likely when restarting current leader node
199 | # and means losing metrics in most cases
200 | #start-delay = 0
201 |
202 | # Timeouts tuned according to the Raft paper and typical network latency.
203 | # Better not to change if unsure
204 | #heartbeat-timeout = 250
205 | #election-timeout-min = 500
206 | #election-timeout-max = 750
207 |
208 | # The name of the current node is taken from hostname by default
209 | # After that all hostnames are resolved using DNS. If node name cannot
210 | # be resolved through DNS for some reason, it can be specified in this-node
211 | # parameter in a format similar to one in node list.
212 | # this-node =
213 |
214 | # A map of other raft nodes. Keys are in form of hostname:port or IP:port
215 | # values are integer IDs
216 | # this map is crucial to have the same address <-> ID mappings for all nodes
217 | nodes = {}
218 |
219 | # allow binding raft outgoing connnections to specific IP
220 | # default: not specified, so no bind happens
221 | # client-bind = "127.0.0.1:8138"
222 |
223 | [consul]
224 | # Start in disabled leader finding mode. This only works while consul is bootstrapping.
225 | # Can be helpful when there is a danger of agent being inaccessible.
226 | start-as = "enabled"
227 |
228 | # Consul agent address
229 | agent = "127.0.0.1:8500"
230 |
231 | # TTL of consul session, ms (Consul cannot set it to less than 10s)
232 | session-ttl = 11000
233 |
234 | # How often to renew Consul session, ms
235 | renew-time = 1000
236 |
237 | # Key name to lock in Consul
238 | key-name = "service/bioyino/lock"
239 |
--------------------------------------------------------------------------------
/grafana/root/etc/grafana/grafana.ini:
--------------------------------------------------------------------------------
1 | ##################### Grafana Configuration Example #####################
2 | #
3 | # Everything has defaults so you only need to uncomment things you want to
4 | # change
5 |
6 | # possible values : production, development
7 | ;app_mode = production
8 |
9 | # instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
10 | ;instance_name = ${HOSTNAME}
11 |
12 | #################################### Paths ####################################
13 | [paths]
14 | # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
15 | ;data = /var/lib/grafana
16 |
17 | # Directory where grafana can store logs
18 | ;logs = /var/log/grafana
19 |
20 | # Directory where grafana will automatically scan and look for plugins
21 | ;plugins = /var/lib/grafana/plugins
22 |
23 | # folder that contains provisioning config files that grafana will apply on startup and while running.
24 | ;provisioning = conf/provisioning
25 |
26 | #################################### Server ####################################
27 | [server]
28 | # Protocol (http, https, socket)
29 | ;protocol = http
30 |
31 | # The ip address to bind to, empty will bind to all interfaces
32 | ;http_addr =
33 |
34 | # The http port to use
35 | ;http_port = 3000
36 |
37 | # The public facing domain name used to access grafana from a browser
38 | ;domain = localhost
39 |
40 | # Redirect to correct domain if host header does not match domain
41 | # Prevents DNS rebinding attacks
42 | ;enforce_domain = false
43 |
44 | # The full public facing url you use in browser, used for redirects and emails
45 | # If you use reverse proxy and sub path specify full url (with sub path)
46 | ;root_url = http://localhost:3000
47 |
48 | # Log web requests
49 | ;router_logging = false
50 |
51 | # the path relative working path
52 | ;static_root_path = public
53 |
54 | # enable gzip
55 | ;enable_gzip = false
56 |
57 | # https certs & key file
58 | ;cert_file =
59 | ;cert_key =
60 |
61 | # Unix socket path
62 | ;socket =
63 |
64 | #################################### Database ####################################
65 | [database]
66 | # You can configure the database connection by specifying type, host, name, user and password
67 | # as separate properties or as on string using the url properties.
68 |
69 | # Either "mysql", "postgres" or "sqlite3", it's your choice
70 | ;type = sqlite3
71 | ;host = 127.0.0.1:3306
72 | ;name = grafana
73 | ;user = root
74 | # If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
75 | ;password =
76 |
77 | # Use either URL or the previous fields to configure the database
78 | # Example: mysql://user:secret@host:port/database
79 | ;url =
80 |
81 | # For "postgres" only, either "disable", "require" or "verify-full"
82 | ;ssl_mode = disable
83 |
84 | # For "sqlite3" only, path relative to data_path setting
85 | ;path = grafana.db
86 |
87 | # Max idle conn setting default is 2
88 | ;max_idle_conn = 2
89 |
90 | # Max conn setting default is 0 (mean not set)
91 | ;max_open_conn =
92 |
93 | # Connection Max Lifetime default is 14400 (means 14400 seconds or 4 hours)
94 | ;conn_max_lifetime = 14400
95 |
96 | # Set to true to log the sql calls and execution times.
97 | log_queries =
98 |
99 | #################################### Session ####################################
100 | [session]
101 | # Either "memory", "file", "redis", "mysql", "postgres", default is "file"
102 | ;provider = file
103 |
104 | # Provider config options
105 | # memory: not have any config yet
106 | # file: session dir path, is relative to grafana data_path
107 | # redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana`
108 | # mysql: go-sql-driver/mysql dsn config string, e.g. `user:password@tcp(127.0.0.1:3306)/database_name`
109 | # postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable
110 | ;provider_config = sessions
111 |
112 | # Session cookie name
113 | ;cookie_name = grafana_sess
114 |
115 | # If you use session in https only, default is false
116 | ;cookie_secure = false
117 |
118 | # Session life time, default is 86400
119 | ;session_life_time = 86400
120 |
121 | #################################### Data proxy ###########################
122 | [dataproxy]
123 |
124 | # This enables data proxy logging, default is false
125 | ;logging = false
126 |
127 | #################################### Analytics ####################################
128 | [analytics]
129 | # Server reporting, sends usage counters to stats.grafana.org every 24 hours.
130 | # No ip addresses are being tracked, only simple counters to track
131 | # running instances, dashboard and error counts. It is very helpful to us.
132 | # Change this option to false to disable reporting.
133 | ;reporting_enabled = true
134 |
135 | # Set to false to disable all checks to https://grafana.net
136 | # for new vesions (grafana itself and plugins), check is used
137 | # in some UI views to notify that grafana or plugin update exists
138 | # This option does not cause any auto updates, nor send any information
139 | # only a GET request to http://grafana.com to get latest versions
140 | ;check_for_updates = true
141 |
142 | # Google Analytics universal tracking code, only enabled if you specify an id here
143 | ;google_analytics_ua_id =
144 |
145 | #################################### Security ####################################
146 | [security]
147 | # default admin user, created on startup
148 | ;admin_user = admin
149 |
150 | # default admin password, can be changed before first start of grafana, or in profile settings
151 | ;admin_password = admin
152 |
153 | # used for signing
154 | ;secret_key = SW2YcwTIb9zpOOhoPsMm
155 |
156 | # Auto-login remember days
157 | ;login_remember_days = 7
158 | ;cookie_username = grafana_user
159 | ;cookie_remember_name = grafana_remember
160 |
161 | # disable gravatar profile images
162 | ;disable_gravatar = false
163 |
164 | # data source proxy whitelist (ip_or_domain:port separated by spaces)
165 | ;data_source_proxy_whitelist =
166 |
167 | # disable protection against brute force login attempts
168 | ;disable_brute_force_login_protection = false
169 |
170 | #################################### Snapshots ###########################
171 | [snapshots]
172 | # snapshot sharing options
173 | ;external_enabled = true
174 | ;external_snapshot_url = https://snapshots-origin.raintank.io
175 | ;external_snapshot_name = Publish to snapshot.raintank.io
176 |
177 | # remove expired snapshot
178 | ;snapshot_remove_expired = true
179 |
180 | #################################### Dashboards History ##################
181 | [dashboards]
182 | # Number dashboard versions to keep (per dashboard). Default: 20, Minimum: 1
183 | ;versions_to_keep = 20
184 |
185 | #################################### Users ###############################
186 | [users]
187 | # disable user signup / registration
188 | ;allow_sign_up = true
189 |
190 | # Allow non admin users to create organizations
191 | ;allow_org_create = true
192 |
193 | # Set to true to automatically assign new users to the default organization (id 1)
194 | ;auto_assign_org = true
195 |
196 | # Default role new users will be automatically assigned (if disabled above is set to true)
197 | ;auto_assign_org_role = Viewer
198 |
199 | # Background text for the user field on the login page
200 | ;login_hint = email or username
201 |
202 | # Default UI theme ("dark" or "light")
203 | ;default_theme = dark
204 |
205 | # External user management, these options affect the organization users view
206 | ;external_manage_link_url =
207 | ;external_manage_link_name =
208 | ;external_manage_info =
209 |
210 | # Viewers can edit/inspect dashboard settings in the browser. But not save the dashboard.
211 | ;viewers_can_edit = false
212 |
213 | [auth]
214 | # Set to true to disable (hide) the login form, useful if you use OAuth, defaults to false
215 | ;disable_login_form = false
216 |
217 | # Set to true to disable the signout link in the side menu. useful if you use auth.proxy, defaults to false
218 | ;disable_signout_menu = false
219 |
220 | # URL to redirect the user to after sign out
221 | ;signout_redirect_url =
222 |
223 | #################################### Anonymous Auth ##########################
224 | [auth.anonymous]
225 | # enable anonymous access
226 | enabled = true
227 |
228 | # specify organization name that should be used for unauthenticated users
229 | ;org_name = Main Org.
230 |
231 | # specify role for unauthenticated users
232 | ;org_role = Viewer
233 |
234 | #################################### Github Auth ##########################
235 | [auth.github]
236 | ;enabled = false
237 | ;allow_sign_up = true
238 | ;client_id = some_id
239 | ;client_secret = some_secret
240 | ;scopes = user:email,read:org
241 | ;auth_url = https://github.com/login/oauth/authorize
242 | ;token_url = https://github.com/login/oauth/access_token
243 | ;api_url = https://api.github.com/user
244 | ;team_ids =
245 | ;allowed_organizations =
246 |
247 | #################################### Google Auth ##########################
248 | [auth.google]
249 | ;enabled = false
250 | ;allow_sign_up = true
251 | ;client_id = some_client_id
252 | ;client_secret = some_client_secret
253 | ;scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email
254 | ;auth_url = https://accounts.google.com/o/oauth2/auth
255 | ;token_url = https://accounts.google.com/o/oauth2/token
256 | ;api_url = https://www.googleapis.com/oauth2/v1/userinfo
257 | ;allowed_domains =
258 |
259 | #################################### Generic OAuth ##########################
260 | [auth.generic_oauth]
261 | ;enabled = false
262 | ;name = OAuth
263 | ;allow_sign_up = true
264 | ;client_id = some_id
265 | ;client_secret = some_secret
266 | ;scopes = user:email,read:org
267 | ;auth_url = https://foo.bar/login/oauth/authorize
268 | ;token_url = https://foo.bar/login/oauth/access_token
269 | ;api_url = https://foo.bar/user
270 | ;team_ids =
271 | ;allowed_organizations =
272 |
273 | #################################### Grafana.com Auth ####################
274 | [auth.grafana_com]
275 | ;enabled = false
276 | ;allow_sign_up = true
277 | ;client_id = some_id
278 | ;client_secret = some_secret
279 | ;scopes = user:email
280 | ;allowed_organizations =
281 |
282 | #################################### Auth Proxy ##########################
283 | [auth.proxy]
284 | ;enabled = false
285 | ;header_name = X-WEBAUTH-USER
286 | ;header_property = username
287 | ;auto_sign_up = true
288 | ;ldap_sync_ttl = 60
289 | ;whitelist = 192.168.1.1, 192.168.2.1
290 |
291 | #################################### Basic Auth ##########################
292 | [auth.basic]
293 | ;enabled = true
294 |
295 | #################################### Auth LDAP ##########################
296 | [auth.ldap]
297 | ;enabled = false
298 | ;config_file = /etc/grafana/ldap.toml
299 | ;allow_sign_up = true
300 |
301 | #################################### SMTP / Emailing ##########################
302 | [smtp]
303 | ;enabled = false
304 | ;host = localhost:25
305 | ;user =
306 | # If the password contains # or ; you have to wrap it with trippel quotes. Ex """#password;"""
307 | ;password =
308 | ;cert_file =
309 | ;key_file =
310 | ;skip_verify = false
311 | ;from_address = admin@grafana.localhost
312 | ;from_name = Grafana
313 | # EHLO identity in SMTP dialog (defaults to instance_name)
314 | ;ehlo_identity = dashboard.example.com
315 |
316 | [emails]
317 | ;welcome_email_on_sign_up = false
318 |
319 | #################################### Logging ##########################
320 | [log]
321 | # Either "console", "file", "syslog". Default is console and file
322 | # Use space to separate multiple modes, e.g. "console file"
323 | ;mode = console file
324 |
325 | # Either "debug", "info", "warn", "error", "critical", default is "info"
326 | ;level = info
327 |
328 | # optional settings to set different levels for specific loggers. Ex filters = sqlstore:debug
329 | ;filters =
330 |
331 | # For "console" mode only
332 | [log.console]
333 | ;level =
334 |
335 | # log line format, valid options are text, console and json
336 | ;format = console
337 |
338 | # For "file" mode only
339 | [log.file]
340 | ;level =
341 |
342 | # log line format, valid options are text, console and json
343 | ;format = text
344 |
345 | # This enables automated log rotate(switch of following options), default is true
346 | ;log_rotate = true
347 |
348 | # Max line number of single file, default is 1000000
349 | ;max_lines = 1000000
350 |
351 | # Max size shift of single file, default is 28 means 1 << 28, 256MB
352 | ;max_size_shift = 28
353 |
354 | # Segment log daily, default is true
355 | ;daily_rotate = true
356 |
357 | # Expired days of log file(delete after max days), default is 7
358 | ;max_days = 7
359 |
360 | [log.syslog]
361 | ;level =
362 |
363 | # log line format, valid options are text, console and json
364 | ;format = text
365 |
366 | # Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used.
367 | ;network =
368 | ;address =
369 |
370 | # Syslog facility. user, daemon and local0 through local7 are valid.
371 | ;facility =
372 |
373 | # Syslog tag. By default, the process' argv[0] is used.
374 | ;tag =
375 |
376 | #################################### Alerting ############################
377 | [alerting]
378 | # Disable alerting engine & UI features
379 | ;enabled = true
380 | # Makes it possible to turn off alert rule execution but alerting UI is visible
381 | ;execute_alerts = true
382 |
383 | #################################### Explore #############################
384 | [explore]
385 | # Enable the Explore section
386 | ;enabled = false
387 |
388 | #################################### Internal Grafana Metrics ##########################
389 | # Metrics available at HTTP API Url /metrics
390 | [metrics]
391 | # Disable / Enable internal metrics
392 | ;enabled = true
393 |
394 | # Publish interval
395 | ;interval_seconds = 10
396 |
397 | # Send internal metrics to Graphite
398 | [metrics.graphite]
399 | # Enable by setting the address setting (ex localhost:2003)
400 | ;address =
401 | ;prefix = prod.grafana.%(instance_name)s.
402 |
403 | #################################### Distributed tracing ############
404 | [tracing.jaeger]
405 | # Enable by setting the address sending traces to jaeger (ex localhost:6831)
406 | ;address = localhost:6831
407 | # Tag that will always be included in when creating new spans. ex (tag1:value1,tag2:value2)
408 | ;always_included_tag = tag1:value1
409 | # Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote
410 | ;sampler_type = const
411 | # jaeger samplerconfig param
412 | # for "const" sampler, 0 or 1 for always false/true respectively
413 | # for "probabilistic" sampler, a probability between 0 and 1
414 | # for "rateLimiting" sampler, the number of spans per second
415 | # for "remote" sampler, param is the same as for "probabilistic"
416 | # and indicates the initial sampling rate before the actual one
417 | # is received from the mothership
418 | ;sampler_param = 1
419 |
420 | #################################### Grafana.com integration ##########################
421 | # Url used to to import dashboards directly from Grafana.com
422 | [grafana_com]
423 | ;url = https://grafana.com
424 |
425 | #################################### External image storage ##########################
426 | [external_image_storage]
427 | # Used for uploading images to public servers so they can be included in slack/email messages.
428 | # you can choose between (s3, webdav, gcs, azure_blob, local)
429 | ;provider =
430 |
431 | [external_image_storage.s3]
432 | ;bucket =
433 | ;region =
434 | ;path =
435 | ;access_key =
436 | ;secret_key =
437 |
438 | [external_image_storage.webdav]
439 | ;url =
440 | ;public_url =
441 | ;username =
442 | ;password =
443 |
444 | [external_image_storage.gcs]
445 | ;key_file =
446 | ;bucket =
447 | ;path =
448 |
449 | [external_image_storage.azure_blob]
450 | ;account_name =
451 | ;account_key =
452 | ;container_name =
453 |
454 | [external_image_storage.local]
455 | # does not require any configuration
456 |
--------------------------------------------------------------------------------
/grafana/root/var/lib/grafana/dashboards/graphite-clickhouse.json:
--------------------------------------------------------------------------------
1 | {
2 | "annotations": {
3 | "list": [
4 | {
5 | "builtIn": 1,
6 | "datasource": "-- Grafana --",
7 | "enable": true,
8 | "hide": true,
9 | "iconColor": "rgba(0, 211, 255, 1)",
10 | "name": "Annotations & Alerts",
11 | "type": "dashboard"
12 | }
13 | ]
14 | },
15 | "editable": true,
16 | "gnetId": null,
17 | "graphTooltip": 0,
18 | "id": 3,
19 | "links": [],
20 | "panels": [
21 | {
22 | "alert": {
23 | "conditions": [
24 | {
25 | "evaluator": {
26 | "params": [100],
27 | "type": "lt"
28 | },
29 | "operator": {
30 | "type": "and"
31 | },
32 | "query": {
33 | "params": ["A", "3m", "now"]
34 | },
35 | "reducer": {
36 | "params": [],
37 | "type": "avg"
38 | },
39 | "type": "query"
40 | }
41 | ],
42 | "executionErrorState": "alerting",
43 | "frequency": "60s",
44 | "handler": 1,
45 | "name": "Carbon-clickhouse [ metricsReceived ] alert",
46 | "noDataState": "alerting",
47 | "notifications": []
48 | },
49 | "aliasColors": {},
50 | "bars": false,
51 | "dashLength": 10,
52 | "dashes": false,
53 | "datasource": null,
54 | "fill": 1,
55 | "gridPos": {
56 | "h": 9,
57 | "w": 12,
58 | "x": 0,
59 | "y": 0
60 | },
61 | "id": 12,
62 | "legend": {
63 | "avg": false,
64 | "current": false,
65 | "max": false,
66 | "min": false,
67 | "show": true,
68 | "total": false,
69 | "values": false
70 | },
71 | "lines": true,
72 | "linewidth": 1,
73 | "links": [],
74 | "nullPointMode": "null as zero",
75 | "percentage": false,
76 | "pointradius": 5,
77 | "points": false,
78 | "renderer": "flot",
79 | "seriesOverrides": [],
80 | "spaceLength": 10,
81 | "stack": false,
82 | "steppedLine": false,
83 | "targets": [
84 | {
85 | "refId": "A",
86 | "target": "alias(sumSeries(keepLastValue(sortByMaxima(resources.monitoring.carbon-clickhouse.*.tcp.metricsReceived), 1)), 'count')",
87 | "textEditor": true
88 | }
89 | ],
90 | "thresholds": [
91 | {
92 | "colorMode": "critical",
93 | "fill": true,
94 | "line": true,
95 | "op": "lt",
96 | "value": 100
97 | }
98 | ],
99 | "timeFrom": null,
100 | "timeShift": null,
101 | "title": "Carbon-clickhouse [ metricsReceived ]",
102 | "tooltip": {
103 | "shared": true,
104 | "sort": 0,
105 | "value_type": "individual"
106 | },
107 | "transparent": true,
108 | "type": "graph",
109 | "xaxis": {
110 | "buckets": null,
111 | "mode": "time",
112 | "name": null,
113 | "show": true,
114 | "values": []
115 | },
116 | "yaxes": [
117 | {
118 | "format": "short",
119 | "label": null,
120 | "logBase": 1,
121 | "max": null,
122 | "min": null,
123 | "show": true
124 | },
125 | {
126 | "format": "short",
127 | "label": null,
128 | "logBase": 1,
129 | "max": null,
130 | "min": null,
131 | "show": true
132 | }
133 | ],
134 | "yaxis": {
135 | "align": false,
136 | "alignLevel": null
137 | }
138 | },
139 | {
140 | "aliasColors": {},
141 | "bars": false,
142 | "dashLength": 10,
143 | "dashes": false,
144 | "datasource": null,
145 | "fill": 1,
146 | "gridPos": {
147 | "h": 9,
148 | "w": 6,
149 | "x": 12,
150 | "y": 0
151 | },
152 | "id": 14,
153 | "legend": {
154 | "avg": false,
155 | "current": false,
156 | "max": false,
157 | "min": false,
158 | "show": true,
159 | "total": false,
160 | "values": false
161 | },
162 | "lines": true,
163 | "linewidth": 1,
164 | "links": [],
165 | "nullPointMode": "null",
166 | "percentage": false,
167 | "pointradius": 5,
168 | "points": false,
169 | "renderer": "flot",
170 | "seriesOverrides": [
171 | {
172 | "alias": "SelectQuery",
173 | "transform": "negative-Y"
174 | }
175 | ],
176 | "spaceLength": 10,
177 | "stack": false,
178 | "steppedLine": false,
179 | "targets": [
180 | {
181 | "refCount": 0,
182 | "refId": "B",
183 | "target": "aliasByMetric(sumSeries(resources.monitoring.clickhouse.*.*.*.InsertQuery))",
184 | "textEditor": true
185 | },
186 | {
187 | "refCount": 0,
188 | "refId": "A",
189 | "target": "aliasByMetric(sumSeries(resources.monitoring.clickhouse.*.*.*.SelectQuery))",
190 | "textEditor": true
191 | }
192 | ],
193 | "thresholds": [],
194 | "timeFrom": null,
195 | "timeShift": null,
196 | "title": "clickhouse-servers [ Queries ]",
197 | "tooltip": {
198 | "shared": true,
199 | "sort": 0,
200 | "value_type": "individual"
201 | },
202 | "type": "graph",
203 | "xaxis": {
204 | "buckets": null,
205 | "mode": "time",
206 | "name": null,
207 | "show": true,
208 | "values": []
209 | },
210 | "yaxes": [
211 | {
212 | "format": "short",
213 | "label": null,
214 | "logBase": 1,
215 | "max": null,
216 | "min": null,
217 | "show": true
218 | },
219 | {
220 | "format": "short",
221 | "label": null,
222 | "logBase": 1,
223 | "max": null,
224 | "min": null,
225 | "show": true
226 | }
227 | ],
228 | "yaxis": {
229 | "align": false,
230 | "alignLevel": null
231 | }
232 | },
233 | {
234 | "aliasColors": {},
235 | "bars": false,
236 | "dashLength": 10,
237 | "dashes": false,
238 | "datasource": null,
239 | "fill": 1,
240 | "gridPos": {
241 | "h": 9,
242 | "w": 6,
243 | "x": 18,
244 | "y": 0
245 | },
246 | "id": 15,
247 | "legend": {
248 | "avg": false,
249 | "current": false,
250 | "max": false,
251 | "min": false,
252 | "show": true,
253 | "total": false,
254 | "values": false
255 | },
256 | "lines": true,
257 | "linewidth": 1,
258 | "links": [],
259 | "nullPointMode": "null",
260 | "percentage": false,
261 | "pointradius": 5,
262 | "points": false,
263 | "renderer": "flot",
264 | "seriesOverrides": [
265 | {
266 | "alias": "SelectQuery",
267 | "transform": "negative-Y"
268 | }
269 | ],
270 | "spaceLength": 10,
271 | "stack": false,
272 | "steppedLine": false,
273 | "targets": [
274 | {
275 | "refCount": 0,
276 | "refId": "B",
277 | "target": "alias(sumSeries(keepLastValue(nonNegativeDerivative(resources.monitoring.carbonapi.*.total_alloc), 3)), 'Total')",
278 | "textEditor": true
279 | }
280 | ],
281 | "thresholds": [],
282 | "timeFrom": null,
283 | "timeShift": null,
284 | "title": "CarbonAPI [ Mem alloc ]",
285 | "tooltip": {
286 | "shared": true,
287 | "sort": 0,
288 | "value_type": "individual"
289 | },
290 | "type": "graph",
291 | "xaxis": {
292 | "buckets": null,
293 | "mode": "time",
294 | "name": null,
295 | "show": true,
296 | "values": []
297 | },
298 | "yaxes": [
299 | {
300 | "format": "decbytes",
301 | "label": null,
302 | "logBase": 1,
303 | "max": null,
304 | "min": null,
305 | "show": true
306 | },
307 | {
308 | "format": "short",
309 | "label": null,
310 | "logBase": 1,
311 | "max": null,
312 | "min": null,
313 | "show": true
314 | }
315 | ],
316 | "yaxis": {
317 | "align": false,
318 | "alignLevel": null
319 | }
320 | },
321 | {
322 | "aliasColors": {},
323 | "bars": false,
324 | "dashLength": 10,
325 | "dashes": false,
326 | "datasource": null,
327 | "fill": 1,
328 | "gridPos": {
329 | "h": 7,
330 | "w": 12,
331 | "x": 0,
332 | "y": 9
333 | },
334 | "id": 2,
335 | "legend": {
336 | "alignAsTable": true,
337 | "avg": false,
338 | "current": true,
339 | "max": true,
340 | "min": false,
341 | "rightSide": true,
342 | "show": true,
343 | "total": false,
344 | "values": true
345 | },
346 | "lines": true,
347 | "linewidth": 1,
348 | "links": [],
349 | "nullPointMode": "null",
350 | "percentage": false,
351 | "pointradius": 5,
352 | "points": false,
353 | "renderer": "flot",
354 | "seriesOverrides": [],
355 | "spaceLength": 10,
356 | "stack": false,
357 | "steppedLine": false,
358 | "targets": [
359 | {
360 | "refId": "A",
361 | "target": "aliasSub(sortByMaxima(aliasByMetric(servers.apphost.cpu-total.*.*_{user,system,softirq,w*})), 'usage_', '\\1')",
362 | "textEditor": true
363 | }
364 | ],
365 | "thresholds": [],
366 | "timeFrom": null,
367 | "timeShift": null,
368 | "title": "CPU",
369 | "tooltip": {
370 | "shared": true,
371 | "sort": 0,
372 | "value_type": "individual"
373 | },
374 | "type": "graph",
375 | "xaxis": {
376 | "buckets": null,
377 | "mode": "time",
378 | "name": null,
379 | "show": true,
380 | "values": []
381 | },
382 | "yaxes": [
383 | {
384 | "format": "short",
385 | "label": null,
386 | "logBase": 1,
387 | "max": null,
388 | "min": null,
389 | "show": true
390 | },
391 | {
392 | "format": "short",
393 | "label": null,
394 | "logBase": 1,
395 | "max": null,
396 | "min": null,
397 | "show": true
398 | }
399 | ],
400 | "yaxis": {
401 | "align": false,
402 | "alignLevel": null
403 | }
404 | },
405 | {
406 | "aliasColors": {},
407 | "bars": false,
408 | "dashLength": 10,
409 | "dashes": false,
410 | "datasource": null,
411 | "fill": 1,
412 | "gridPos": {
413 | "h": 7,
414 | "w": 12,
415 | "x": 12,
416 | "y": 9
417 | },
418 | "id": 3,
419 | "legend": {
420 | "alignAsTable": true,
421 | "avg": false,
422 | "current": true,
423 | "max": true,
424 | "min": false,
425 | "rightSide": true,
426 | "show": true,
427 | "total": false,
428 | "values": true
429 | },
430 | "lines": true,
431 | "linewidth": 1,
432 | "links": [],
433 | "nullPointMode": "null",
434 | "percentage": false,
435 | "pointradius": 5,
436 | "points": false,
437 | "renderer": "flot",
438 | "seriesOverrides": [],
439 | "spaceLength": 10,
440 | "stack": false,
441 | "steppedLine": false,
442 | "targets": [
443 | {
444 | "refId": "A",
445 | "target": "aliasSub(sortByMaxima(aliasByMetric(servers.apphost.mem.{used,buffered,slab_recl,ca*})), 'usage_', '\\1')",
446 | "textEditor": true
447 | }
448 | ],
449 | "thresholds": [],
450 | "timeFrom": null,
451 | "timeShift": null,
452 | "title": "Memory",
453 | "tooltip": {
454 | "shared": true,
455 | "sort": 0,
456 | "value_type": "individual"
457 | },
458 | "type": "graph",
459 | "xaxis": {
460 | "buckets": null,
461 | "mode": "time",
462 | "name": null,
463 | "show": true,
464 | "values": []
465 | },
466 | "yaxes": [
467 | {
468 | "format": "bytes",
469 | "label": null,
470 | "logBase": 1,
471 | "max": null,
472 | "min": null,
473 | "show": true
474 | },
475 | {
476 | "format": "short",
477 | "label": null,
478 | "logBase": 1,
479 | "max": null,
480 | "min": null,
481 | "show": true
482 | }
483 | ],
484 | "yaxis": {
485 | "align": false,
486 | "alignLevel": null
487 | }
488 | },
489 | {
490 | "aliasColors": {},
491 | "bars": false,
492 | "dashLength": 10,
493 | "dashes": false,
494 | "datasource": null,
495 | "fill": 1,
496 | "gridPos": {
497 | "h": 6,
498 | "w": 24,
499 | "x": 0,
500 | "y": 16
501 | },
502 | "id": 5,
503 | "legend": {
504 | "avg": false,
505 | "current": false,
506 | "max": false,
507 | "min": false,
508 | "show": true,
509 | "total": false,
510 | "values": false
511 | },
512 | "lines": true,
513 | "linewidth": 1,
514 | "links": [],
515 | "nullPointMode": "null",
516 | "percentage": false,
517 | "pointradius": 5,
518 | "points": false,
519 | "renderer": "flot",
520 | "seriesOverrides": [],
521 | "spaceLength": 10,
522 | "stack": false,
523 | "steppedLine": false,
524 | "targets": [
525 | {
526 | "refCount": 0,
527 | "refId": "A",
528 | "target": "alias(servers.*.system.load1, 'shortterm')"
529 | },
530 | {
531 | "refCount": 0,
532 | "refId": "B",
533 | "target": "alias(servers.*.system.load5, 'midterm')"
534 | },
535 | {
536 | "refCount": 0,
537 | "refId": "C",
538 | "target": "alias(servers.*.system.load15, 'longterm')"
539 | }
540 | ],
541 | "thresholds": [],
542 | "timeFrom": null,
543 | "timeShift": null,
544 | "title": "Load Average",
545 | "tooltip": {
546 | "shared": true,
547 | "sort": 0,
548 | "value_type": "individual"
549 | },
550 | "type": "graph",
551 | "xaxis": {
552 | "buckets": null,
553 | "mode": "time",
554 | "name": null,
555 | "show": true,
556 | "values": []
557 | },
558 | "yaxes": [
559 | {
560 | "format": "short",
561 | "label": null,
562 | "logBase": 1,
563 | "max": null,
564 | "min": null,
565 | "show": true
566 | },
567 | {
568 | "format": "short",
569 | "label": null,
570 | "logBase": 1,
571 | "max": null,
572 | "min": null,
573 | "show": true
574 | }
575 | ],
576 | "yaxis": {
577 | "align": false,
578 | "alignLevel": null
579 | }
580 | },
581 | {
582 | "aliasColors": {},
583 | "bars": false,
584 | "dashLength": 10,
585 | "dashes": false,
586 | "datasource": null,
587 | "fill": 1,
588 | "gridPos": {
589 | "h": 7,
590 | "w": 12,
591 | "x": 0,
592 | "y": 22
593 | },
594 | "id": 8,
595 | "legend": {
596 | "alignAsTable": true,
597 | "avg": false,
598 | "current": true,
599 | "max": true,
600 | "min": true,
601 | "rightSide": true,
602 | "show": true,
603 | "total": false,
604 | "values": true
605 | },
606 | "lines": true,
607 | "linewidth": 1,
608 | "links": [],
609 | "nullPointMode": "null",
610 | "percentage": false,
611 | "pointradius": 5,
612 | "points": false,
613 | "renderer": "flot",
614 | "seriesOverrides": [],
615 | "spaceLength": 10,
616 | "stack": false,
617 | "steppedLine": false,
618 | "targets": [
619 | {
620 | "refCount": 0,
621 | "refId": "A",
622 | "target": "aliasByNode(nonNegativeDerivative(scale(servers.*.sd*.*.io_time, 0.1)), 2, 5)",
623 | "textEditor": false
624 | }
625 | ],
626 | "thresholds": [],
627 | "timeFrom": null,
628 | "timeShift": null,
629 | "title": "Disk utilization",
630 | "tooltip": {
631 | "shared": true,
632 | "sort": 0,
633 | "value_type": "individual"
634 | },
635 | "type": "graph",
636 | "xaxis": {
637 | "buckets": null,
638 | "mode": "time",
639 | "name": null,
640 | "show": true,
641 | "values": []
642 | },
643 | "yaxes": [
644 | {
645 | "format": "percent",
646 | "label": null,
647 | "logBase": 1,
648 | "max": "100",
649 | "min": "0",
650 | "show": true
651 | },
652 | {
653 | "format": "short",
654 | "label": null,
655 | "logBase": 1,
656 | "max": null,
657 | "min": null,
658 | "show": true
659 | }
660 | ],
661 | "yaxis": {
662 | "align": false,
663 | "alignLevel": null
664 | }
665 | },
666 | {
667 | "aliasColors": {},
668 | "bars": false,
669 | "dashLength": 10,
670 | "dashes": false,
671 | "datasource": null,
672 | "fill": 1,
673 | "gridPos": {
674 | "h": 7,
675 | "w": 12,
676 | "x": 12,
677 | "y": 22
678 | },
679 | "id": 7,
680 | "legend": {
681 | "alignAsTable": true,
682 | "avg": false,
683 | "current": true,
684 | "max": false,
685 | "min": false,
686 | "rightSide": true,
687 | "show": true,
688 | "sortDesc": true,
689 | "total": false,
690 | "values": true
691 | },
692 | "lines": true,
693 | "linewidth": 1,
694 | "links": [],
695 | "nullPointMode": "null",
696 | "percentage": false,
697 | "pointradius": 5,
698 | "points": false,
699 | "renderer": "flot",
700 | "seriesOverrides": [],
701 | "spaceLength": 10,
702 | "stack": false,
703 | "steppedLine": false,
704 | "targets": [
705 | {
706 | "refCount": 0,
707 | "refId": "B",
708 | "target": "aliasByNode(servers.*.*-_Private.*.*.*.*.used_percent, 2)",
709 | "textEditor": true
710 | },
711 | {
712 | "refCount": 0,
713 | "refId": "A",
714 | "target": "aliasByNode(servers.*.sd*.*.*.*.*.used_percent, 2, 5)",
715 | "textEditor": true
716 | }
717 | ],
718 | "thresholds": [],
719 | "timeFrom": null,
720 | "timeShift": null,
721 | "title": "Disk used",
722 | "tooltip": {
723 | "shared": true,
724 | "sort": 0,
725 | "value_type": "individual"
726 | },
727 | "type": "graph",
728 | "xaxis": {
729 | "buckets": null,
730 | "mode": "time",
731 | "name": null,
732 | "show": true,
733 | "values": []
734 | },
735 | "yaxes": [
736 | {
737 | "format": "percent",
738 | "label": null,
739 | "logBase": 1,
740 | "max": null,
741 | "min": "0",
742 | "show": true
743 | },
744 | {
745 | "format": "short",
746 | "label": null,
747 | "logBase": 1,
748 | "max": null,
749 | "min": null,
750 | "show": true
751 | }
752 | ],
753 | "yaxis": {
754 | "align": false,
755 | "alignLevel": null
756 | }
757 | },
758 | {
759 | "aliasColors": {},
760 | "bars": false,
761 | "dashLength": 10,
762 | "dashes": false,
763 | "datasource": null,
764 | "fill": 1,
765 | "gridPos": {
766 | "h": 7,
767 | "w": 24,
768 | "x": 0,
769 | "y": 29
770 | },
771 | "id": 10,
772 | "legend": {
773 | "avg": false,
774 | "current": false,
775 | "max": false,
776 | "min": false,
777 | "show": true,
778 | "total": false,
779 | "values": false
780 | },
781 | "lines": true,
782 | "linewidth": 1,
783 | "links": [],
784 | "nullPointMode": "null",
785 | "percentage": false,
786 | "pointradius": 5,
787 | "points": false,
788 | "renderer": "flot",
789 | "seriesOverrides": [
790 | {
791 | "alias": "drop",
792 | "yaxis": 2
793 | },
794 | {
795 | "alias": "bytes_sent",
796 | "transform": "negative-Y"
797 | }
798 | ],
799 | "spaceLength": 10,
800 | "stack": false,
801 | "steppedLine": false,
802 | "targets": [
803 | {
804 | "refCount": 0,
805 | "refId": "A",
806 | "target": "aliasByNode(nonNegativeDerivative(servers.*.eth0.net.bytes_sent), 4)"
807 | },
808 | {
809 | "refCount": 0,
810 | "refId": "B",
811 | "target": "aliasByNode(nonNegativeDerivative(servers.*.eth0.net.bytes_recv), 4)"
812 | },
813 | {
814 | "refCount": 0,
815 | "refId": "D",
816 | "target": "alias(sumSeries(nonNegativeDerivative(servers.*.eth0.net.drop_*)), 'drop')"
817 | }
818 | ],
819 | "thresholds": [],
820 | "timeFrom": null,
821 | "timeShift": null,
822 | "title": "Network",
823 | "tooltip": {
824 | "shared": true,
825 | "sort": 0,
826 | "value_type": "individual"
827 | },
828 | "type": "graph",
829 | "xaxis": {
830 | "buckets": null,
831 | "mode": "time",
832 | "name": null,
833 | "show": true,
834 | "values": []
835 | },
836 | "yaxes": [
837 | {
838 | "format": "Bps",
839 | "label": null,
840 | "logBase": 1,
841 | "max": null,
842 | "min": null,
843 | "show": true
844 | },
845 | {
846 | "format": "short",
847 | "label": null,
848 | "logBase": 1,
849 | "max": null,
850 | "min": null,
851 | "show": true
852 | }
853 | ],
854 | "yaxis": {
855 | "align": false,
856 | "alignLevel": null
857 | }
858 | },
859 | {
860 | "collapsed": false,
861 | "gridPos": {
862 | "h": 1,
863 | "w": 24,
864 | "x": 0,
865 | "y": 36
866 | },
867 | "id": 21,
868 | "panels": [],
869 | "title": "StatsD",
870 | "type": "row"
871 | },
872 | {
873 | "aliasColors": {},
874 | "bars": false,
875 | "dashLength": 10,
876 | "dashes": false,
877 | "datasource": null,
878 | "fill": 1,
879 | "gridPos": {
880 | "h": 7,
881 | "w": 8,
882 | "x": 0,
883 | "y": 37
884 | },
885 | "id": 17,
886 | "legend": {
887 | "avg": false,
888 | "current": false,
889 | "max": false,
890 | "min": false,
891 | "show": true,
892 | "total": false,
893 | "values": false
894 | },
895 | "lines": true,
896 | "linewidth": 1,
897 | "links": [],
898 | "nullPointMode": "connected",
899 | "percentage": false,
900 | "pointradius": 5,
901 | "points": false,
902 | "renderer": "flot",
903 | "seriesOverrides": [],
904 | "spaceLength": 10,
905 | "stack": false,
906 | "steppedLine": false,
907 | "targets": [
908 | {
909 | "refId": "A",
910 | "target": "aliasByNode(resources.monitoring.bioyino.ingress*, 3)",
911 | "textEditor": true
912 | }
913 | ],
914 | "thresholds": [],
915 | "timeFrom": null,
916 | "timeShift": null,
917 | "title": "bioyino [ingress]",
918 | "tooltip": {
919 | "shared": true,
920 | "sort": 0,
921 | "value_type": "individual"
922 | },
923 | "type": "graph",
924 | "xaxis": {
925 | "buckets": null,
926 | "mode": "time",
927 | "name": null,
928 | "show": true,
929 | "values": []
930 | },
931 | "yaxes": [
932 | {
933 | "format": "short",
934 | "label": null,
935 | "logBase": 1,
936 | "max": null,
937 | "min": null,
938 | "show": true
939 | },
940 | {
941 | "format": "short",
942 | "label": null,
943 | "logBase": 1,
944 | "max": null,
945 | "min": null,
946 | "show": true
947 | }
948 | ],
949 | "yaxis": {
950 | "align": false,
951 | "alignLevel": null
952 | }
953 | },
954 | {
955 | "aliasColors": {},
956 | "bars": false,
957 | "dashLength": 10,
958 | "dashes": false,
959 | "datasource": null,
960 | "fill": 1,
961 | "gridPos": {
962 | "h": 7,
963 | "w": 8,
964 | "x": 8,
965 | "y": 37
966 | },
967 | "id": 22,
968 | "legend": {
969 | "avg": false,
970 | "current": false,
971 | "max": false,
972 | "min": false,
973 | "show": true,
974 | "total": false,
975 | "values": false
976 | },
977 | "lines": true,
978 | "linewidth": 1,
979 | "links": [],
980 | "nullPointMode": "connected",
981 | "percentage": false,
982 | "pointradius": 5,
983 | "points": false,
984 | "renderer": "flot",
985 | "seriesOverrides": [],
986 | "spaceLength": 10,
987 | "stack": false,
988 | "steppedLine": false,
989 | "targets": [
990 | {
991 | "refId": "A",
992 | "target": "aliasByNode(resources.monitoring.bioyino.egress, 3)",
993 | "textEditor": true
994 | }
995 | ],
996 | "thresholds": [],
997 | "timeFrom": null,
998 | "timeShift": null,
999 | "title": "bioyino [egress]",
1000 | "tooltip": {
1001 | "shared": true,
1002 | "sort": 0,
1003 | "value_type": "individual"
1004 | },
1005 | "type": "graph",
1006 | "xaxis": {
1007 | "buckets": null,
1008 | "mode": "time",
1009 | "name": null,
1010 | "show": true,
1011 | "values": []
1012 | },
1013 | "yaxes": [
1014 | {
1015 | "format": "short",
1016 | "label": null,
1017 | "logBase": 1,
1018 | "max": null,
1019 | "min": null,
1020 | "show": true
1021 | },
1022 | {
1023 | "format": "short",
1024 | "label": null,
1025 | "logBase": 1,
1026 | "max": null,
1027 | "min": null,
1028 | "show": true
1029 | }
1030 | ],
1031 | "yaxis": {
1032 | "align": false,
1033 | "alignLevel": null
1034 | }
1035 | },
1036 | {
1037 | "aliasColors": {},
1038 | "bars": false,
1039 | "dashLength": 10,
1040 | "dashes": false,
1041 | "datasource": null,
1042 | "fill": 1,
1043 | "gridPos": {
1044 | "h": 7,
1045 | "w": 8,
1046 | "x": 16,
1047 | "y": 37
1048 | },
1049 | "id": 23,
1050 | "legend": {
1051 | "avg": false,
1052 | "current": false,
1053 | "max": false,
1054 | "min": false,
1055 | "show": true,
1056 | "total": false,
1057 | "values": false
1058 | },
1059 | "lines": true,
1060 | "linewidth": 1,
1061 | "links": [],
1062 | "nullPointMode": "connected",
1063 | "percentage": false,
1064 | "pointradius": 5,
1065 | "points": false,
1066 | "renderer": "flot",
1067 | "seriesOverrides": [],
1068 | "spaceLength": 10,
1069 | "stack": false,
1070 | "steppedLine": false,
1071 | "targets": [
1072 | {
1073 | "refId": "A",
1074 | "target": "aliasByMetric(resources.monitoring.bioyino.{*-error,drop})",
1075 | "textEditor": true
1076 | }
1077 | ],
1078 | "thresholds": [],
1079 | "timeFrom": null,
1080 | "timeShift": null,
1081 | "title": "bioyino [errors]",
1082 | "tooltip": {
1083 | "shared": true,
1084 | "sort": 0,
1085 | "value_type": "individual"
1086 | },
1087 | "type": "graph",
1088 | "xaxis": {
1089 | "buckets": null,
1090 | "mode": "time",
1091 | "name": null,
1092 | "show": true,
1093 | "values": []
1094 | },
1095 | "yaxes": [
1096 | {
1097 | "format": "short",
1098 | "label": null,
1099 | "logBase": 1,
1100 | "max": null,
1101 | "min": null,
1102 | "show": true
1103 | },
1104 | {
1105 | "format": "short",
1106 | "label": null,
1107 | "logBase": 1,
1108 | "max": null,
1109 | "min": null,
1110 | "show": true
1111 | }
1112 | ],
1113 | "yaxis": {
1114 | "align": false,
1115 | "alignLevel": null
1116 | }
1117 | }
1118 | ],
1119 | "refresh": "10s",
1120 | "schemaVersion": 16,
1121 | "style": "dark",
1122 | "tags": ["monitoring"],
1123 | "templating": {
1124 | "list": []
1125 | },
1126 | "time": {
1127 | "from": "now-1h",
1128 | "to": "now"
1129 | },
1130 | "timepicker": {
1131 | "refresh_intervals": [
1132 | "5s",
1133 | "10s",
1134 | "30s",
1135 | "1m",
1136 | "5m",
1137 | "15m",
1138 | "30m",
1139 | "1h",
1140 | "2h",
1141 | "1d"
1142 | ],
1143 | "time_options": ["5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d"]
1144 | },
1145 | "timezone": "",
1146 | "title": "Graphite-clickhouse",
1147 | "version": 5
1148 | }
1149 |
--------------------------------------------------------------------------------
/telegraf/root/etc/telegraf/telegraf.conf:
--------------------------------------------------------------------------------
1 | # Telegraf Configuration
2 | #
3 | # Telegraf is entirely plugin driven. All metrics are gathered from the
4 | # declared inputs, and sent to the declared outputs.
5 | #
6 | # Plugins must be declared in here to be active.
7 | # To deactivate a plugin, comment out the name and any variables.
8 | #
9 | # Use 'telegraf -config telegraf.conf -test' to see what metrics a config
10 | # file would generate.
11 | #
12 | # Environment variables can be used anywhere in this config file, simply prepend
13 | # them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
14 | # for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
15 |
16 |
17 | # Global tags can be specified here in key="value" format.
18 | [global_tags]
19 | # dc = "us-east-1" # will tag all metrics with dc=us-east-1
20 | # rack = "1a"
21 | ## Environment variables can be used as tags, and throughout the config file
22 | # user = "$USER"
23 |
24 |
25 | # Configuration for telegraf agent
26 | [agent]
27 | ## Default data collection interval for all inputs
28 | interval = "10s"
29 | ## Rounds collection interval to 'interval'
30 | ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
31 | round_interval = true
32 |
33 | ## Telegraf will send metrics to outputs in batches of at most
34 | ## metric_batch_size metrics.
35 | ## This controls the size of writes that Telegraf sends to output plugins.
36 | metric_batch_size = 1000
37 |
38 | ## For failed writes, telegraf will cache metric_buffer_limit metrics for each
39 | ## output, and will flush this buffer on a successful write. Oldest metrics
40 | ## are dropped first when this buffer fills.
41 | ## This buffer only fills when writes fail to output plugin(s).
42 | metric_buffer_limit = 10000
43 |
44 | ## Collection jitter is used to jitter the collection by a random amount.
45 | ## Each plugin will sleep for a random time within jitter before collecting.
46 | ## This can be used to avoid many plugins querying things like sysfs at the
47 | ## same time, which can have a measurable effect on the system.
48 | collection_jitter = "0s"
49 |
50 | ## Default flushing interval for all outputs. You shouldn't set this below
51 | ## interval. Maximum flush_interval will be flush_interval + flush_jitter
52 | flush_interval = "10s"
53 | ## Jitter the flush interval by a random amount. This is primarily to avoid
54 | ## large write spikes for users running a large number of telegraf instances.
55 | ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
56 | flush_jitter = "0s"
57 |
58 | ## By default or when set to "0s", precision will be set to the same
59 | ## timestamp order as the collection interval, with the maximum being 1s.
60 | ## ie, when interval = "10s", precision will be "1s"
61 | ## when interval = "250ms", precision will be "1ms"
62 | ## Precision will NOT be used for service inputs. It is up to each individual
63 | ## service input to set the timestamp at the appropriate precision.
64 | ## Valid time units are "ns", "us" (or "µs"), "ms", "s".
65 | precision = ""
66 |
67 | ## Logging configuration:
68 | ## Run telegraf with debug log messages.
69 | debug = false
70 | ## Run telegraf in quiet mode (error log messages only).
71 | quiet = false
72 | ## Specify the log file name. The empty string means to log to stderr.
73 | logfile = ""
74 |
75 | ## Override default hostname, if empty use os.Hostname()
76 | hostname = ""
77 | ## If set to true, do no set the "host" tag in the telegraf agent.
78 | omit_hostname = false
79 |
80 |
81 | ###############################################################################
82 | # OUTPUT PLUGINS #
83 | ###############################################################################
84 | # # Configuration for Graphite server to send metrics to
85 | [[outputs.graphite]]
86 | # ## TCP endpoint for your graphite instance.
87 | # ## If multiple endpoints are configured, output will be load balanced.
88 | # ## Only one of the endpoints will be written to with each iteration.
89 | servers = ["carbon-clickhouse:2003"]
90 | # ## Prefix metrics name
91 | prefix = "servers"
92 | # ## Graphite output template
93 | # ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
94 | template = "host.tags.measurement.field"
95 | # ## timeout in seconds for the write connection to graphite
96 | timeout = 2
97 |
98 | ###############################################################################
99 | # PROCESSOR PLUGINS #
100 | ###############################################################################
101 |
102 | # # Print all metrics that pass through this filter.
103 | # [[processors.printer]]
104 |
105 |
106 | ###############################################################################
107 | # AGGREGATOR PLUGINS #
108 | ###############################################################################
109 |
110 | # # Create aggregate histograms.
111 | # [[aggregators.histogram]]
112 | # ## The period in which to flush the aggregator.
113 | # period = "30s"
114 | #
115 | # ## If true, the original metric will be dropped by the
116 | # ## aggregator and will not get sent to the output plugins.
117 | # drop_original = false
118 | #
119 | # ## Example config that aggregates all fields of the metric.
120 | # # [[aggregators.histogram.config]]
121 | # # ## The set of buckets.
122 | # # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
123 | # # ## The name of metric.
124 | # # measurement_name = "cpu"
125 | #
126 | # ## Example config that aggregates only specific fields of the metric.
127 | # # [[aggregators.histogram.config]]
128 | # # ## The set of buckets.
129 | # # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
130 | # # ## The name of metric.
131 | # # measurement_name = "diskio"
132 | # # ## The concrete fields of metric
133 | # # fields = ["io_time", "read_time", "write_time"]
134 |
135 |
136 | # # Keep the aggregate min/max of each metric passing through.
137 | # [[aggregators.minmax]]
138 | # ## General Aggregator Arguments:
139 | # ## The period on which to flush & clear the aggregator.
140 | # period = "30s"
141 | # ## If true, the original metric will be dropped by the
142 | # ## aggregator and will not get sent to the output plugins.
143 | # drop_original = false
144 |
145 |
146 |
147 | ###############################################################################
148 | # INPUT PLUGINS #
149 | ###############################################################################
150 |
151 | # Read metrics about cpu usage
152 | [[inputs.cpu]]
153 | ## Whether to report per-cpu stats or not
154 | percpu = true
155 | ## Whether to report total system cpu stats or not
156 | totalcpu = true
157 | ## If true, collect raw CPU time metrics.
158 | collect_cpu_time = false
159 | ## If true, compute and report the sum of all non-idle CPU states.
160 | report_active = false
161 |
162 |
163 | # Read metrics about disk usage by mount point
164 | [[inputs.disk]]
165 | ## By default, telegraf gather stats for all mountpoints.
166 | ## Setting mountpoints will restrict the stats to the specified mountpoints.
167 | # mount_points = ["/"]
168 |
169 | ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
170 | ## present on /run, /var/run, /dev/shm or /dev).
171 | ignore_fs = ["tmpfs", "devtmpfs", "devfs"]
172 |
173 |
174 | # Read metrics about disk IO by device
175 | [[inputs.diskio]]
176 | ## By default, telegraf will gather stats for all devices including
177 | ## disk partitions.
178 | ## Setting devices will restrict the stats to the specified devices.
179 | # devices = ["sda", "sdb"]
180 | ## Uncomment the following line if you need disk serial numbers.
181 | # skip_serial_number = false
182 | #
183 | ## On systems which support it, device metadata can be added in the form of
184 | ## tags.
185 | ## Currently only Linux is supported via udev properties. You can view
186 | ## available properties for a device by running:
187 | ## 'udevadm info -q property -n /dev/sda'
188 | # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
189 | #
190 | ## Using the same metadata source as device_tags, you can also customize the
191 | ## name of the device via templates.
192 | ## The 'name_templates' parameter is a list of templates to try and apply to
193 | ## the device. The template may contain variables in the form of '$PROPERTY' or
194 | ## '${PROPERTY}'. The first template which does not contain any variables not
195 | ## present for the device is used as the device name tag.
196 | ## The typical use case is for LVM volumes, to get the VG/LV name instead of
197 | ## the near-meaningless DM-0 name.
198 | # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]
199 |
200 |
201 | # Get kernel statistics from /proc/stat
202 | [[inputs.kernel]]
203 | # no configuration
204 |
205 |
206 | # Read metrics about memory usage
207 | [[inputs.mem]]
208 | # no configuration
209 |
210 |
211 | # Get the number of processes and group them by status
212 | [[inputs.processes]]
213 | # no configuration
214 |
215 |
216 | # Read metrics about swap memory usage
217 | [[inputs.swap]]
218 | # no configuration
219 |
220 |
221 | # Read metrics about system load & uptime
222 | [[inputs.system]]
223 | # no configuration
224 |
225 |
226 | # # Read stats from aerospike server(s)
227 | # [[inputs.aerospike]]
228 | # ## Aerospike servers to connect to (with port)
229 | # ## This plugin will query all namespaces the aerospike
230 | # ## server has configured and get stats for them.
231 | # servers = ["localhost:3000"]
232 |
233 |
234 | # # Read Apache status information (mod_status)
235 | # [[inputs.apache]]
236 | # ## An array of URLs to gather from, must be directed at the machine
237 | # ## readable version of the mod_status page including the auto query string.
238 | # ## Default is "http://localhost/server-status?auto".
239 | # urls = ["http://localhost/server-status?auto"]
240 | #
241 | # ## Credentials for basic HTTP authentication.
242 | # # username = "myuser"
243 | # # password = "mypassword"
244 | #
245 | # ## Maximum time to receive response.
246 | # # response_timeout = "5s"
247 | #
248 | # ## Optional SSL Config
249 | # # ssl_ca = "/etc/telegraf/ca.pem"
250 | # # ssl_cert = "/etc/telegraf/cert.pem"
251 | # # ssl_key = "/etc/telegraf/key.pem"
252 | # ## Use SSL but skip chain & host verification
253 | # # insecure_skip_verify = false
254 |
255 |
256 | # # Read metrics of bcache from stats_total and dirty_data
257 | # [[inputs.bcache]]
258 | # ## Bcache sets path
259 | # ## If not specified, then default is:
260 | # bcachePath = "/sys/fs/bcache"
261 | #
262 | # ## By default, telegraf gather stats for all bcache devices
263 | # ## Setting devices will restrict the stats to the specified
264 | # ## bcache devices.
265 | # bcacheDevs = ["bcache0"]
266 |
267 |
268 | # # Read Cassandra metrics through Jolokia
269 | # [[inputs.cassandra]]
270 | # # This is the context root used to compose the jolokia url
271 | # context = "/jolokia/read"
272 | # ## List of cassandra servers exposing jolokia read service
273 | # servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
274 | # ## List of metrics collected on above servers
275 | # ## Each metric consists of a jmx path.
276 | # ## This will collect all heap memory usage metrics from the jvm and
277 | # ## ReadLatency metrics for all keyspaces and tables.
278 | # ## "type=Table" in the query works with Cassandra3.0. Older versions might
279 | # ## need to use "type=ColumnFamily"
280 | # metrics = [
281 | # "/java.lang:type=Memory/HeapMemoryUsage",
282 | # "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency"
283 | # ]
284 |
285 |
286 | # # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
287 | # [[inputs.ceph]]
288 | # ## This is the recommended interval to poll. Too frequent and you will lose
289 | # ## data points due to timeouts during rebalancing and recovery
290 | # interval = '1m'
291 | #
292 | # ## All configuration values are optional, defaults are shown below
293 | #
294 | # ## location of ceph binary
295 | # ceph_binary = "/usr/bin/ceph"
296 | #
297 | # ## directory in which to look for socket files
298 | # socket_dir = "/var/run/ceph"
299 | #
300 | # ## prefix of MON and OSD socket files, used to determine socket type
301 | # mon_prefix = "ceph-mon"
302 | # osd_prefix = "ceph-osd"
303 | #
304 | # ## suffix used to identify socket files
305 | # socket_suffix = "asok"
306 | #
307 | # ## Ceph user to authenticate as
308 | # ceph_user = "client.admin"
309 | #
310 | # ## Ceph configuration to use to locate the cluster
311 | # ceph_config = "/etc/ceph/ceph.conf"
312 | #
313 | # ## Whether to gather statistics via the admin socket
314 | # gather_admin_socket_stats = true
315 | #
316 | # ## Whether to gather statistics via ceph commands
317 | # gather_cluster_stats = false
318 |
319 |
320 | # # Read specific statistics per cgroup
321 | # [[inputs.cgroup]]
322 | # ## Directories in which to look for files, globs are supported.
323 | # ## Consider restricting paths to the set of cgroups you really
324 | # ## want to monitor if you have a large number of cgroups, to avoid
325 | # ## any cardinality issues.
326 | # # paths = [
327 | # # "/cgroup/memory",
328 | # # "/cgroup/memory/child1",
329 | # # "/cgroup/memory/child2/*",
330 | # # ]
331 | # ## cgroup stat fields, as file names, globs are supported.
332 | # ## these file names are appended to each path from above.
333 | # # files = ["memory.*usage*", "memory.limit_in_bytes"]
334 |
335 |
336 | # # Get standard chrony metrics, requires chronyc executable.
337 | # [[inputs.chrony]]
338 | # ## If true, chronyc tries to perform a DNS lookup for the time server.
339 | # # dns_lookup = false
340 |
341 |
342 | # # Pull Metric Statistics from Amazon CloudWatch
343 | # [[inputs.cloudwatch]]
344 | # ## Amazon Region
345 | # region = "us-east-1"
346 | #
347 | # ## Amazon Credentials
348 | # ## Credentials are loaded in the following order
349 | # ## 1) Assumed credentials via STS if role_arn is specified
350 | # ## 2) explicit credentials from 'access_key' and 'secret_key'
351 | # ## 3) shared profile from 'profile'
352 | # ## 4) environment variables
353 | # ## 5) shared credentials file
354 | # ## 6) EC2 Instance Profile
355 | # #access_key = ""
356 | # #secret_key = ""
357 | # #token = ""
358 | # #role_arn = ""
359 | # #profile = ""
360 | # #shared_credential_file = ""
361 | #
362 | # # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
363 | # # metrics are made available to the 1 minute period. Some are collected at
364 | # # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
365 | # # Note that if a period is configured that is smaller than the minimum for a
366 | # # particular metric, that metric will not be returned by the Cloudwatch API
367 | # # and will not be collected by Telegraf.
368 | # #
369 | # ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
370 | # period = "5m"
371 | #
372 | # ## Collection Delay (required - must account for metrics availability via CloudWatch API)
373 | # delay = "5m"
374 | #
375 | # ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
376 | # ## gaps or overlap in pulled data
377 | # interval = "5m"
378 | #
379 | # ## Configure the TTL for the internal cache of metrics.
380 | # ## Defaults to 1 hr if not specified
381 | # #cache_ttl = "10m"
382 | #
383 | # ## Metric Statistic Namespace (required)
384 | # namespace = "AWS/ELB"
385 | #
386 | # ## Maximum requests per second. Note that the global default AWS rate limit is
387 | # ## 400 reqs/sec, so if you define multiple namespaces, these should add up to a
388 | # ## maximum of 400. Optional - default value is 200.
389 | # ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
390 | # ratelimit = 200
391 | #
392 | # ## Metrics to Pull (optional)
393 | # ## Defaults to all Metrics in Namespace if nothing is provided
394 | # ## Refreshes Namespace available metrics every 1h
395 | # #[[inputs.cloudwatch.metrics]]
396 | # # names = ["Latency", "RequestCount"]
397 | # #
398 | # # ## Dimension filters for Metric (optional)
399 | # # [[inputs.cloudwatch.metrics.dimensions]]
400 | # # name = "LoadBalancerName"
401 | # # value = "p-example"
402 |
403 |
404 | # # Collects conntrack stats from the configured directories and files.
405 | # [[inputs.conntrack]]
406 | # ## The following defaults would work with multiple versions of conntrack.
407 | # ## Note the nf_ and ip_ filename prefixes are mutually exclusive across
408 | # ## kernel versions, as are the directory locations.
409 | #
410 | # ## Superset of filenames to look for within the conntrack dirs.
411 | # ## Missing files will be ignored.
412 | # files = ["ip_conntrack_count","ip_conntrack_max",
413 | # "nf_conntrack_count","nf_conntrack_max"]
414 | #
415 | # ## Directories to search within for the conntrack files above.
416 | # ## Missing directrories will be ignored.
417 | # dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
418 |
419 |
420 | # # Gather health check statuses from services registered in Consul
421 | # [[inputs.consul]]
422 | # ## Most of these values defaults to the one configured on a Consul's agent level.
423 | # ## Optional Consul server address (default: "localhost")
424 | # # address = "localhost"
425 | # ## Optional URI scheme for the Consul server (default: "http")
426 | # # scheme = "http"
427 | # ## Optional ACL token used in every request (default: "")
428 | # # token = ""
429 | # ## Optional username used for request HTTP Basic Authentication (default: "")
430 | # # username = ""
431 | # ## Optional password used for HTTP Basic Authentication (default: "")
432 | # # password = ""
433 | # ## Optional data centre to query the health checks from (default: "")
434 | # # datacentre = ""
435 |
436 |
437 | # # Read metrics from one or many couchbase clusters
438 | # [[inputs.couchbase]]
439 | # ## specify servers via a url matching:
440 | # ## [protocol://][:password]@address[:port]
441 | # ## e.g.
442 | # ## http://couchbase-0.example.com/
443 | # ## http://admin:secret@couchbase-0.example.com:8091/
444 | # ##
445 | # ## If no servers are specified, then localhost is used as the host.
446 | # ## If no protocol is specifed, HTTP is used.
447 | # ## If no port is specified, 8091 is used.
448 | # servers = ["http://localhost:8091"]
449 |
450 |
451 | # # Read CouchDB Stats from one or more servers
452 | # [[inputs.couchdb]]
453 | # ## Works with CouchDB stats endpoints out of the box
454 | # ## Multiple HOSTs from which to read CouchDB stats:
455 | # hosts = ["http://localhost:8086/_stats"]
456 |
457 |
458 | # # Read metrics from one or many disque servers
459 | # [[inputs.disque]]
460 | # ## An array of URI to gather stats about. Specify an ip or hostname
461 | # ## with optional port and password.
462 | # ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc.
463 | # ## If no servers are specified, then localhost is used as the host.
464 | # servers = ["localhost"]
465 |
466 |
467 | # # Provide a native collection for dmsetup based statistics for dm-cache
468 | # [[inputs.dmcache]]
469 | # ## Whether to report per-device stats or not
470 | # per_device = true
471 |
472 |
473 | # # Query given DNS server and gives statistics
474 | # [[inputs.dns_query]]
475 | # ## servers to query
476 | # servers = ["8.8.8.8"]
477 | #
478 | # ## Network is the network protocol name.
479 | # # network = "udp"
480 | #
481 | # ## Domains or subdomains to query.
482 | # # domains = ["."]
483 | #
484 | # ## Query record type.
485 | # ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
486 | # # record_type = "A"
487 | #
488 | # ## Dns server port.
489 | # # port = 53
490 | #
491 | # ## Query timeout in seconds.
492 | # # timeout = 2
493 |
494 |
495 | # # Read metrics about docker containers
496 | # [[inputs.docker]]
497 | # ## Docker Endpoint
498 | # ## To use TCP, set endpoint = "tcp://[ip]:[port]"
499 | # ## To use environment variables (ie, docker-machine), set endpoint = "ENV"
500 | # endpoint = "unix:///var/run/docker.sock"
501 | #
502 | # ## Only collect metrics for these containers, collect all if empty
503 | # container_names = []
504 | #
505 | # ## Containers to include and exclude. Globs accepted.
506 | # ## Note that an empty array for both will include all containers
507 | # container_name_include = []
508 | # container_name_exclude = []
509 | #
510 | # ## Timeout for docker list, info, and stats commands
511 | # timeout = "5s"
512 | #
513 | # ## Whether to report for each container per-device blkio (8:0, 8:1...) and
514 | # ## network (eth0, eth1, ...) stats or not
515 | # perdevice = true
516 | # ## Whether to report for each container total blkio and network stats or not
517 | # total = false
518 | # ## Which environment variables should we use as a tag
519 | # ##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
520 | #
521 | # ## docker labels to include and exclude as tags. Globs accepted.
522 | # ## Note that an empty array for both will include all labels as tags
523 | # docker_label_include = []
524 | # docker_label_exclude = []
525 | #
526 | # ## Optional SSL Config
527 | # # ssl_ca = "/etc/telegraf/ca.pem"
528 | # # ssl_cert = "/etc/telegraf/cert.pem"
529 | # # ssl_key = "/etc/telegraf/key.pem"
530 | # ## Use SSL but skip chain & host verification
531 | # # insecure_skip_verify = false
532 |
533 |
534 | # # Read statistics from one or many dovecot servers
535 | # [[inputs.dovecot]]
536 | # ## specify dovecot servers via an address:port list
537 | # ## e.g.
538 | # ## localhost:24242
539 | # ##
540 | # ## If no servers are specified, then localhost is used as the host.
541 | # servers = ["localhost:24242"]
542 | # ## Type is one of "user", "domain", "ip", or "global"
543 | # type = "global"
544 | # ## Wildcard matches like "*.com". An empty string "" is same as "*"
545 | # ## If type = "ip" filters should be
546 | # filters = [""]
547 |
548 |
549 | # # Read stats from one or more Elasticsearch servers or clusters
550 | # [[inputs.elasticsearch]]
551 | # ## specify a list of one or more Elasticsearch servers
552 | # # you can add username and password to your url to use basic authentication:
553 | # # servers = ["http://user:pass@localhost:9200"]
554 | # servers = ["http://localhost:9200"]
555 | #
556 | # ## Timeout for HTTP requests to the elastic search server(s)
557 | # http_timeout = "5s"
558 | #
559 | # ## When local is true (the default), the node will read only its own stats.
560 | # ## Set local to false when you want to read the node stats from all nodes
561 | # ## of the cluster.
562 | # local = true
563 | #
564 | # ## Set cluster_health to true when you want to also obtain cluster health stats
565 | # cluster_health = false
566 | #
567 | # ## Set cluster_stats to true when you want to also obtain cluster stats from the
568 | # ## Master node.
569 | # cluster_stats = false
570 | #
571 | # ## Optional SSL Config
572 | # # ssl_ca = "/etc/telegraf/ca.pem"
573 | # # ssl_cert = "/etc/telegraf/cert.pem"
574 | # # ssl_key = "/etc/telegraf/key.pem"
575 | # ## Use SSL but skip chain & host verification
576 | # # insecure_skip_verify = false
577 |
578 |
579 | # # Read metrics from one or more commands that can output to stdout
580 | # [[inputs.exec]]
581 | # ## Commands array
582 | # commands = [
583 | # "/tmp/test.sh",
584 | # "/usr/bin/mycollector --foo=bar",
585 | # "/tmp/collect_*.sh"
586 | # ]
587 | #
588 | # ## Timeout for each command to complete.
589 | # timeout = "5s"
590 | #
591 | # ## measurement name suffix (for separating different commands)
592 | # name_suffix = "_mycollector"
593 | #
594 | # ## Data format to consume.
595 | # ## Each data format has its own unique set of configuration options, read
596 | # ## more about them here:
597 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
598 | # data_format = "influx"
599 |
600 |
601 | # # Read metrics from fail2ban.
602 | # [[inputs.fail2ban]]
603 | # ## Use sudo to run fail2ban-client
604 | # use_sudo = false
605 |
606 |
607 | # # Read stats about given file(s)
608 | # [[inputs.filestat]]
609 | # ## Files to gather stats about.
610 | # ## These accept standard unix glob matching rules, but with the addition of
611 | # ## ** as a "super asterisk". ie:
612 | # ## "/var/log/**.log" -> recursively find all .log files in /var/log
613 | # ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
614 | # ## "/var/log/apache.log" -> just tail the apache log file
615 | # ##
616 | # ## See https://github.com/gobwas/glob for more examples
617 | # ##
618 | # files = ["/var/log/**.log"]
619 | # ## If true, read the entire file and calculate an md5 checksum.
620 | # md5 = false
621 |
622 |
623 | # # Read metrics exposed by fluentd in_monitor plugin
624 | # [[inputs.fluentd]]
625 | # ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).
626 | # ##
627 | # ## Endpoint:
628 | # ## - only one URI is allowed
629 | # ## - https is not supported
630 | # endpoint = "http://localhost:24220/api/plugins.json"
631 | #
632 | # ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent)
633 | # exclude = [
634 | # "monitor_agent",
635 | # "dummy",
636 | # ]
637 |
638 |
639 | # # Read flattened metrics from one or more GrayLog HTTP endpoints
640 | # [[inputs.graylog]]
641 | # ## API endpoint, currently supported API:
642 | # ##
643 | # ## - multiple (Ex http://:12900/system/metrics/multiple)
644 | # ## - namespace (Ex http://:12900/system/metrics/namespace/{namespace})
645 | # ##
646 | # ## For namespace endpoint, the metrics array will be ignored for that call.
647 | # ## Endpoint can contain namespace and multiple type calls.
648 | # ##
649 | # ## Please check http://[graylog-server-ip]:12900/api-browser for full list
650 | # ## of endpoints
651 | # servers = [
652 | # "http://[graylog-server-ip]:12900/system/metrics/multiple",
653 | # ]
654 | #
655 | # ## Metrics list
656 | # ## List of metrics can be found on Graylog webservice documentation.
657 | # ## Or by hitting the the web service api at:
658 | # ## http://[graylog-host]:12900/system/metrics
659 | # metrics = [
660 | # "jvm.cl.loaded",
661 | # "jvm.memory.pools.Metaspace.committed"
662 | # ]
663 | #
664 | # ## Username and password
665 | # username = ""
666 | # password = ""
667 | #
668 | # ## Optional SSL Config
669 | # # ssl_ca = "/etc/telegraf/ca.pem"
670 | # # ssl_cert = "/etc/telegraf/cert.pem"
671 | # # ssl_key = "/etc/telegraf/key.pem"
672 | # ## Use SSL but skip chain & host verification
673 | # # insecure_skip_verify = false
674 |
675 |
676 | # # Read metrics of haproxy, via socket or csv stats page
677 | # [[inputs.haproxy]]
678 | # ## An array of address to gather stats about. Specify an ip on hostname
679 | # ## with optional port. ie localhost, 10.10.3.33:1936, etc.
680 | # ## Make sure you specify the complete path to the stats endpoint
681 | # ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats
682 | #
683 | # ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
684 | # servers = ["http://myhaproxy.com:1936/haproxy?stats"]
685 | #
686 | # ## You can also use local socket with standard wildcard globbing.
687 | # ## Server address not starting with 'http' will be treated as a possible
688 | # ## socket, so both examples below are valid.
689 | # # servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
690 | #
691 | # ## By default, some of the fields are renamed from what haproxy calls them.
692 | # ## Setting this option to true results in the plugin keeping the original
693 | # ## field names.
694 | # # keep_field_names = true
695 | #
696 | # ## Optional SSL Config
697 | # # ssl_ca = "/etc/telegraf/ca.pem"
698 | # # ssl_cert = "/etc/telegraf/cert.pem"
699 | # # ssl_key = "/etc/telegraf/key.pem"
700 | # ## Use SSL but skip chain & host verification
701 | # # insecure_skip_verify = false
702 |
703 |
704 | # # Monitor disks' temperatures using hddtemp
705 | # [[inputs.hddtemp]]
706 | # ## By default, telegraf gathers temps data from all disks detected by the
707 | # ## hddtemp.
708 | # ##
709 | # ## Only collect temps from the selected disks.
710 | # ##
711 | # ## A * as the device name will return the temperature values of all disks.
712 | # ##
713 | # # address = "127.0.0.1:7634"
714 | # # devices = ["sda", "*"]
715 |
716 |
717 | # # HTTP/HTTPS request given an address a method and a timeout
718 | # [[inputs.http_response]]
719 | # ## Server address (default http://localhost)
720 | # # address = "http://localhost"
721 | #
722 | # ## Set response_timeout (default 5 seconds)
723 | # # response_timeout = "5s"
724 | #
725 | # ## HTTP Request Method
726 | # # method = "GET"
727 | #
728 | # ## Whether to follow redirects from the server (defaults to false)
729 | # # follow_redirects = false
730 | #
731 | # ## Optional HTTP Request Body
732 | # # body = '''
733 | # # {'fake':'data'}
734 | # # '''
735 | #
736 | # ## Optional substring or regex match in body of the response
737 | # # response_string_match = "\"service_status\": \"up\""
738 | # # response_string_match = "ok"
739 | # # response_string_match = "\".*_status\".?:.?\"up\""
740 | #
741 | # ## Optional SSL Config
742 | # # ssl_ca = "/etc/telegraf/ca.pem"
743 | # # ssl_cert = "/etc/telegraf/cert.pem"
744 | # # ssl_key = "/etc/telegraf/key.pem"
745 | # ## Use SSL but skip chain & host verification
746 | # # insecure_skip_verify = false
747 | #
748 | # ## HTTP Request Headers (all values must be strings)
749 | # # [inputs.http_response.headers]
750 | # # Host = "github.com"
751 |
752 |
753 | # # Read flattened metrics from one or more JSON HTTP endpoints
754 | # [[inputs.httpjson]]
755 | # ## NOTE This plugin only reads numerical measurements, strings and booleans
756 | # ## will be ignored.
757 | #
758 | # ## Name for the service being polled. Will be appended to the name of the
759 | # ## measurement e.g. httpjson_webserver_stats
760 | # ##
761 | # ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead.
762 | # name = "webserver_stats"
763 | #
764 | # ## URL of each server in the service's cluster
765 | # servers = [
766 | # "http://localhost:9999/stats/",
767 | # "http://localhost:9998/stats/",
768 | # ]
769 | # ## Set response_timeout (default 5 seconds)
770 | # response_timeout = "5s"
771 | #
772 | # ## HTTP method to use: GET or POST (case-sensitive)
773 | # method = "GET"
774 | #
775 | # ## List of tag names to extract from top-level of JSON server response
776 | # # tag_keys = [
777 | # # "my_tag_1",
778 | # # "my_tag_2"
779 | # # ]
780 | #
781 | # ## HTTP parameters (all values must be strings). For "GET" requests, data
782 | # ## will be included in the query. For "POST" requests, data will be included
783 | # ## in the request body as "x-www-form-urlencoded".
784 | # # [inputs.httpjson.parameters]
785 | # # event_type = "cpu_spike"
786 | # # threshold = "0.75"
787 | #
788 | # ## HTTP Headers (all values must be strings)
789 | # # [inputs.httpjson.headers]
790 | # # X-Auth-Token = "my-xauth-token"
791 | # # apiVersion = "v1"
792 | #
793 | # ## Optional SSL Config
794 | # # ssl_ca = "/etc/telegraf/ca.pem"
795 | # # ssl_cert = "/etc/telegraf/cert.pem"
796 | # # ssl_key = "/etc/telegraf/key.pem"
797 | # ## Use SSL but skip chain & host verification
798 | # # insecure_skip_verify = false
799 |
800 |
801 | # # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints
802 | # [[inputs.influxdb]]
803 | # ## Works with InfluxDB debug endpoints out of the box,
804 | # ## but other services can use this format too.
805 | # ## See the influxdb plugin's README for more details.
806 | #
807 | # ## Multiple URLs from which to read InfluxDB-formatted JSON
808 | # ## Default is "http://localhost:8086/debug/vars".
809 | # urls = [
810 | # "http://localhost:8086/debug/vars"
811 | # ]
812 | #
813 | # ## Optional SSL Config
814 | # # ssl_ca = "/etc/telegraf/ca.pem"
815 | # # ssl_cert = "/etc/telegraf/cert.pem"
816 | # # ssl_key = "/etc/telegraf/key.pem"
817 | # ## Use SSL but skip chain & host verification
818 | # # insecure_skip_verify = false
819 | #
820 | # ## http request & header timeout
821 | # timeout = "5s"
822 |
823 |
824 | # # Collect statistics about itself
825 | # [[inputs.internal]]
826 | # ## If true, collect telegraf memory stats.
827 | # # collect_memstats = true
828 |
829 |
830 | # # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs.
831 | # [[inputs.interrupts]]
832 | # ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e.
833 | # # [inputs.interrupts.tagdrop]
834 | # # irq = [ "NET_RX", "TASKLET" ]
835 |
836 |
837 | # # Read metrics from the bare metal servers via IPMI
838 | # [[inputs.ipmi_sensor]]
839 | # ## optionally specify the path to the ipmitool executable
840 | # # path = "/usr/bin/ipmitool"
841 | # #
842 | # ## optionally specify one or more servers via a url matching
843 | # ## [username[:password]@][protocol[(address)]]
844 | # ## e.g.
845 | # ## root:passwd@lan(127.0.0.1)
846 | # ##
847 | # ## if no servers are specified, local machine sensor stats will be queried
848 | # ##
849 | # # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
850 | #
851 | # ## Recomended: use metric 'interval' that is a multiple of 'timeout' to avoid
852 | # ## gaps or overlap in pulled data
853 | # interval = "30s"
854 | #
855 | # ## Timeout for the ipmitool command to complete
856 | # timeout = "20s"
857 |
858 |
859 | # # Gather packets and bytes throughput from iptables
860 | # [[inputs.iptables]]
861 | # ## iptables require root access on most systems.
862 | # ## Setting 'use_sudo' to true will make use of sudo to run iptables.
863 | # ## Users must configure sudo to allow telegraf user to run iptables with no password.
864 | # ## iptables can be restricted to only list command "iptables -nvL".
865 | # use_sudo = false
866 | # ## Setting 'use_lock' to true runs iptables with the "-w" option.
867 | # ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl")
868 | # use_lock = false
869 | # ## defines the table to monitor:
870 | # table = "filter"
871 | # ## defines the chains to monitor.
872 | # ## NOTE: iptables rules without a comment will not be monitored.
873 | # ## Read the plugin documentation for more information.
874 | # chains = [ "INPUT" ]
875 |
876 |
877 | # # Read JMX metrics through Jolokia
878 | # [[inputs.jolokia]]
879 | # ## This is the context root used to compose the jolokia url
880 | # ## NOTE that Jolokia requires a trailing slash at the end of the context root
881 | # ## NOTE that your jolokia security policy must allow for POST requests.
882 | # context = "/jolokia/"
883 | #
884 | # ## This specifies the mode used
885 | # # mode = "proxy"
886 | # #
887 | # ## When in proxy mode this section is used to specify further
888 | # ## proxy address configurations.
889 | # ## Remember to change host address to fit your environment.
890 | # # [inputs.jolokia.proxy]
891 | # # host = "127.0.0.1"
892 | # # port = "8080"
893 | #
894 | # ## Optional http timeouts
895 | # ##
896 | # ## response_header_timeout, if non-zero, specifies the amount of time to wait
897 | # ## for a server's response headers after fully writing the request.
898 | # # response_header_timeout = "3s"
899 | # ##
900 | # ## client_timeout specifies a time limit for requests made by this client.
901 | # ## Includes connection time, any redirects, and reading the response body.
902 | # # client_timeout = "4s"
903 | #
904 | # ## Attribute delimiter
905 | # ##
906 | # ## When multiple attributes are returned for a single
907 | # ## [inputs.jolokia.metrics], the field name is a concatenation of the metric
908 | # ## name, and the attribute name, separated by the given delimiter.
909 | # # delimiter = "_"
910 | #
911 | # ## List of servers exposing jolokia read service
912 | # [[inputs.jolokia.servers]]
913 | # name = "as-server-01"
914 | # host = "127.0.0.1"
915 | # port = "8080"
916 | # # username = "myuser"
917 | # # password = "mypassword"
918 | #
919 | # ## List of metrics collected on above servers
920 | # ## Each metric consists in a name, a jmx path and either
921 | # ## a pass or drop slice attribute.
922 | # ## This collect all heap memory usage metrics.
923 | # [[inputs.jolokia.metrics]]
924 | # name = "heap_memory_usage"
925 | # mbean = "java.lang:type=Memory"
926 | # attribute = "HeapMemoryUsage"
927 | #
928 | # ## This collect thread counts metrics.
929 | # [[inputs.jolokia.metrics]]
930 | # name = "thread_count"
931 | # mbean = "java.lang:type=Threading"
932 | # attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount"
933 | #
934 | # ## This collect number of class loaded/unloaded counts metrics.
935 | # [[inputs.jolokia.metrics]]
936 | # name = "class_count"
937 | # mbean = "java.lang:type=ClassLoading"
938 | # attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"
939 |
940 |
941 | # # Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints
942 | # [[inputs.kapacitor]]
943 | # ## Multiple URLs from which to read Kapacitor-formatted JSON
944 | # ## Default is "http://localhost:9092/kapacitor/v1/debug/vars".
945 | # urls = [
946 | # "http://localhost:9092/kapacitor/v1/debug/vars"
947 | # ]
948 | #
949 | # ## Time limit for http requests
950 | # timeout = "5s"
951 |
952 |
953 | # # Get kernel statistics from /proc/vmstat
954 | # [[inputs.kernel_vmstat]]
955 | # # no configuration
956 |
957 |
958 | # # Read metrics from the kubernetes kubelet api
959 | # [[inputs.kubernetes]]
960 | # ## URL for the kubelet
961 | # url = "http://1.1.1.1:10255"
962 | #
963 | # ## Use bearer token for authorization
964 | # # bearer_token = /path/to/bearer/token
965 | #
966 | # ## Optional SSL Config
967 | # # ssl_ca = /path/to/cafile
968 | # # ssl_cert = /path/to/certfile
969 | # # ssl_key = /path/to/keyfile
970 | # ## Use SSL but skip chain & host verification
971 | # # insecure_skip_verify = false
972 |
973 |
974 | # # Read metrics from a LeoFS Server via SNMP
975 | # [[inputs.leofs]]
976 | # ## An array of URLs of the form:
977 | # ## "udp://" host [ ":" port]
978 | # servers = ["udp://127.0.0.1:4020"]
979 |
980 |
981 | # # Provides Linux sysctl fs metrics
982 | # [[inputs.linux_sysctl_fs]]
983 | # # no configuration
984 |
985 |
986 | # # Read metrics from local Lustre service on OST, MDS
987 | # [[inputs.lustre2]]
988 | # ## An array of /proc globs to search for Lustre stats
989 | # ## If not specified, the default will work on Lustre 2.5.x
990 | # ##
991 | # # ost_procfiles = [
992 | # # "/proc/fs/lustre/obdfilter/*/stats",
993 | # # "/proc/fs/lustre/osd-ldiskfs/*/stats",
994 | # # "/proc/fs/lustre/obdfilter/*/job_stats",
995 | # # ]
996 | # # mds_procfiles = [
997 | # # "/proc/fs/lustre/mdt/*/md_stats",
998 | # # "/proc/fs/lustre/mdt/*/job_stats",
999 | # # ]
1000 |
1001 |
1002 | # # Gathers metrics from the /3.0/reports MailChimp API
1003 | # [[inputs.mailchimp]]
1004 | # ## MailChimp API key
1005 | # ## get from https://admin.mailchimp.com/account/api/
1006 | # api_key = "" # required
1007 | # ## Reports for campaigns sent more than days_old ago will not be collected.
1008 | # ## 0 means collect all.
1009 | # days_old = 0
1010 | # ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old
1011 | # # campaign_id = ""
1012 |
1013 |
1014 | # # Read metrics from one or many memcached servers
1015 | # [[inputs.memcached]]
1016 | # ## An array of address to gather stats about. Specify an ip on hostname
1017 | # ## with optional port. ie localhost, 10.0.0.1:11211, etc.
1018 | # servers = ["localhost:11211"]
1019 | # # unix_sockets = ["/var/run/memcached.sock"]
1020 |
1021 |
1022 | # # Telegraf plugin for gathering metrics from N Mesos masters
1023 | # [[inputs.mesos]]
1024 | # ## Timeout, in ms.
1025 | # timeout = 100
1026 | # ## A list of Mesos masters.
1027 | # masters = ["localhost:5050"]
1028 | # ## Master metrics groups to be collected, by default, all enabled.
1029 | # master_collections = [
1030 | # "resources",
1031 | # "master",
1032 | # "system",
1033 | # "agents",
1034 | # "frameworks",
1035 | # "tasks",
1036 | # "messages",
1037 | # "evqueue",
1038 | # "registrar",
1039 | # ]
1040 | # ## A list of Mesos slaves, default is []
1041 | # # slaves = []
1042 | # ## Slave metrics groups to be collected, by default, all enabled.
1043 | # # slave_collections = [
1044 | # # "resources",
1045 | # # "agent",
1046 | # # "system",
1047 | # # "executors",
1048 | # # "tasks",
1049 | # # "messages",
1050 | # # ]
1051 |
1052 |
1053 | # # Collects scores from a minecraft server's scoreboard using the RCON protocol
1054 | # [[inputs.minecraft]]
1055 | # ## server address for minecraft
1056 | # # server = "localhost"
1057 | # ## port for RCON
1058 | # # port = "25575"
1059 | # ## password RCON for mincraft server
1060 | # # password = ""
1061 |
1062 |
1063 | # # Read metrics from one or many MongoDB servers
1064 | # [[inputs.mongodb]]
1065 | # ## An array of URLs of the form:
1066 | # ## "mongodb://" [user ":" pass "@"] host [ ":" port]
1067 | # ## For example:
1068 | # ## mongodb://user:auth_key@10.10.3.30:27017,
1069 | # ## mongodb://10.10.3.33:18832,
1070 | # servers = ["mongodb://127.0.0.1:27017"]
1071 | # gather_perdb_stats = false
1072 | #
1073 | # ## Optional SSL Config
1074 | # # ssl_ca = "/etc/telegraf/ca.pem"
1075 | # # ssl_cert = "/etc/telegraf/cert.pem"
1076 | # # ssl_key = "/etc/telegraf/key.pem"
1077 | # ## Use SSL but skip chain & host verification
1078 | # # insecure_skip_verify = false
1079 |
1080 |
1081 | # # Read metrics from one or many mysql servers
1082 | # [[inputs.mysql]]
1083 | # ## specify servers via a url matching:
1084 | # ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]
1085 | # ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
1086 | # ## e.g.
1087 | # ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"]
1088 | # ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"]
1089 | # #
1090 | # ## If no servers are specified, then localhost is used as the host.
1091 | # servers = ["tcp(127.0.0.1:3306)/"]
1092 | # ## the limits for metrics form perf_events_statements
1093 | # perf_events_statements_digest_text_limit = 120
1094 | # perf_events_statements_limit = 250
1095 | # perf_events_statements_time_limit = 86400
1096 | # #
1097 | # ## if the list is empty, then metrics are gathered from all databasee tables
1098 | # table_schema_databases = []
1099 | # #
1100 | # ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list
1101 | # gather_table_schema = false
1102 | # #
1103 | # ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
1104 | # gather_process_list = true
1105 | # #
1106 | # ## gather thread state counts from INFORMATION_SCHEMA.USER_STATISTICS
1107 | # gather_user_statistics = true
1108 | # #
1109 | # ## gather auto_increment columns and max values from information schema
1110 | # gather_info_schema_auto_inc = true
1111 | # #
1112 | # ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS
1113 | # gather_innodb_metrics = true
1114 | # #
1115 | # ## gather metrics from SHOW SLAVE STATUS command output
1116 | # gather_slave_status = true
1117 | # #
1118 | # ## gather metrics from SHOW BINARY LOGS command output
1119 | # gather_binary_logs = false
1120 | # #
1121 | # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE
1122 | # gather_table_io_waits = false
1123 | # #
1124 | # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
1125 | # gather_table_lock_waits = false
1126 | # #
1127 | # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE
1128 | # gather_index_io_waits = false
1129 | # #
1130 | # ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
1131 | # gather_event_waits = false
1132 | # #
1133 | # ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME
1134 | # gather_file_events_stats = false
1135 | # #
1136 | # ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
1137 | # gather_perf_events_statements = false
1138 | # #
1139 | # ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
1140 | # interval_slow = "30m"
1141 | #
1142 | # ## Optional SSL Config (will be used if tls=custom parameter specified in server uri)
1143 | # ssl_ca = "/etc/telegraf/ca.pem"
1144 | # ssl_cert = "/etc/telegraf/cert.pem"
1145 | # ssl_key = "/etc/telegraf/key.pem"
1146 |
1147 |
1148 | # # Read metrics about network interface usage
1149 | [[inputs.net]]
1150 | # ## By default, telegraf gathers stats from any up interface (excluding loopback)
1151 | # ## Setting interfaces will tell it to gather these explicit interfaces,
1152 | # ## regardless of status.
1153 | # ##
1154 | interfaces = ["eth0","tun0"]
1155 |
1156 |
1157 | # # TCP or UDP 'ping' given url and collect response time in seconds
1158 | # [[inputs.net_response]]
1159 | # ## Protocol, must be "tcp" or "udp"
1160 | # ## NOTE: because the "udp" protocol does not respond to requests, it requires
1161 | # ## a send/expect string pair (see below).
1162 | # protocol = "tcp"
1163 | # ## Server address (default localhost)
1164 | # address = "localhost:80"
1165 | # ## Set timeout
1166 | # timeout = "1s"
1167 | #
1168 | # ## Set read timeout (only used if expecting a response)
1169 | # read_timeout = "1s"
1170 | #
1171 | # ## The following options are required for UDP checks. For TCP, they are
1172 | # ## optional. The plugin will send the given string to the server and then
1173 | # ## expect to receive the given 'expect' string back.
1174 | # ## string sent to the server
1175 | # # send = "ssh"
1176 | # ## expected string in answer
1177 | # # expect = "ssh"
1178 |
1179 |
1180 | # # Read TCP metrics such as established, time wait and sockets counts.
1181 | # [[inputs.netstat]]
1182 | # # no configuration
1183 |
1184 |
1185 | # # Read Nginx's basic status information (ngx_http_stub_status_module)
1186 | # [[inputs.nginx]]
1187 | # # An array of Nginx stub_status URI to gather stats.
1188 | # urls = ["http://localhost/server_status"]
1189 | #
1190 | # # TLS/SSL configuration
1191 | # ssl_ca = "/etc/telegraf/ca.pem"
1192 | # ssl_cert = "/etc/telegraf/cert.cer"
1193 | # ssl_key = "/etc/telegraf/key.key"
1194 | # insecure_skip_verify = false
1195 | #
1196 | # # HTTP response timeout (default: 5s)
1197 | # response_timeout = "5s"
1198 |
1199 |
1200 | # # Read NSQ topic and channel statistics.
1201 | # [[inputs.nsq]]
1202 | # ## An array of NSQD HTTP API endpoints
1203 | # endpoints = ["http://localhost:4151"]
1204 |
1205 |
1206 | # # Collect kernel snmp counters and network interface statistics
1207 | # [[inputs.nstat]]
1208 | # ## file paths for proc files. If empty default paths will be used:
1209 | # ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6
1210 | # ## These can also be overridden with env variables, see README.
1211 | # proc_net_netstat = "/proc/net/netstat"
1212 | # proc_net_snmp = "/proc/net/snmp"
1213 | # proc_net_snmp6 = "/proc/net/snmp6"
1214 | # ## dump metrics with 0 values too
1215 | # dump_zeros = true
1216 |
1217 |
1218 | # # Get standard NTP query metrics, requires ntpq executable.
1219 | # [[inputs.ntpq]]
1220 | # ## If false, set the -n ntpq flag. Can reduce metric gather time.
1221 | # dns_lookup = true
1222 |
1223 |
1224 | # # OpenLDAP cn=Monitor plugin
1225 | # [[inputs.openldap]]
1226 | # host = "localhost"
1227 | # port = 389
1228 | #
1229 | # # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption.
1230 | # # note that port will likely need to be changed to 636 for ldaps
1231 | # # valid options: "" | "starttls" | "ldaps"
1232 | # ssl = ""
1233 | #
1234 | # # skip peer certificate verification. Default is false.
1235 | # insecure_skip_verify = false
1236 | #
1237 | # # Path to PEM-encoded Root certificate to use to verify server certificate
1238 | # ssl_ca = "/etc/ssl/certs.pem"
1239 | #
1240 | # # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed.
1241 | # bind_dn = ""
1242 | # bind_password = ""
1243 |
1244 |
1245 | # # Read metrics of passenger using passenger-status
1246 | # [[inputs.passenger]]
1247 | # ## Path of passenger-status.
1248 | # ##
1249 | # ## Plugin gather metric via parsing XML output of passenger-status
1250 | # ## More information about the tool:
1251 | # ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html
1252 | # ##
1253 | # ## If no path is specified, then the plugin simply execute passenger-status
1254 | # ## hopefully it can be found in your PATH
1255 | # command = "passenger-status -v --show=xml"
1256 |
1257 |
1258 | # # Read metrics of phpfpm, via HTTP status page or socket
1259 | # [[inputs.phpfpm]]
1260 | # ## An array of addresses to gather stats about. Specify an ip or hostname
1261 | # ## with optional port and path
1262 | # ##
1263 | # ## Plugin can be configured in three modes (either can be used):
1264 | # ## - http: the URL must start with http:// or https://, ie:
1265 | # ## "http://localhost/status"
1266 | # ## "http://192.168.130.1/status?full"
1267 | # ##
1268 | # ## - unixsocket: path to fpm socket, ie:
1269 | # ## "/var/run/php5-fpm.sock"
1270 | # ## or using a custom fpm status path:
1271 | # ## "/var/run/php5-fpm.sock:fpm-custom-status-path"
1272 | # ##
1273 | # ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie:
1274 | # ## "fcgi://10.0.0.12:9000/status"
1275 | # ## "cgi://10.0.10.12:9001/status"
1276 | # ##
1277 | # ## Example of multiple gathering from local socket and remove host
1278 | # ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"]
1279 | # urls = ["http://localhost/status"]
1280 |
1281 |
1282 | # # Ping given url(s) and return statistics
1283 | # [[inputs.ping]]
1284 | # ## NOTE: this plugin forks the ping command. You may need to set capabilities
1285 | # ## via setcap cap_net_raw+p /bin/ping
1286 | # #
1287 | # ## List of urls to ping
1288 | # urls = ["www.google.com"] # required
1289 | # ## number of pings to send per collection (ping -c )
1290 | # # count = 1
1291 | # ## interval, in s, at which to ping. 0 == default (ping -i )
1292 | # # ping_interval = 1.0
1293 | # ## per-ping timeout, in s. 0 == no timeout (ping -W )
1294 | # # timeout = 1.0
1295 | # ## interface to send ping from (ping -I )
1296 | # # interface = ""
1297 |
1298 |
1299 | # # Read metrics from one or many postgresql servers
1300 | # [[inputs.postgresql]]
1301 | # ## specify address via a url matching:
1302 | # ## postgres://[pqgotest[:password]]@localhost[/dbname]\
1303 | # ## ?sslmode=[disable|verify-ca|verify-full]
1304 | # ## or a simple string:
1305 | # ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
1306 | # ##
1307 | # ## All connection parameters are optional.
1308 | # ##
1309 | # ## Without the dbname parameter, the driver will default to a database
1310 | # ## with the same name as the user. This dbname is just for instantiating a
1311 | # ## connection with the server and doesn't restrict the databases we are trying
1312 | # ## to grab metrics for.
1313 | # ##
1314 | # address = "host=localhost user=postgres sslmode=disable"
1315 | #
1316 | # ## A list of databases to explicitly ignore. If not specified, metrics for all
1317 | # ## databases are gathered. Do NOT use with the 'databases' option.
1318 | # # ignored_databases = ["postgres", "template0", "template1"]
1319 | #
1320 | # ## A list of databases to pull metrics about. If not specified, metrics for all
1321 | # ## databases are gathered. Do NOT use with the 'ignored_databases' option.
1322 | # # databases = ["app_production", "testing"]
1323 |
1324 |
1325 | # # Read metrics from one or many postgresql servers
1326 | # [[inputs.postgresql_extensible]]
1327 | # ## specify address via a url matching:
1328 | # ## postgres://[pqgotest[:password]]@localhost[/dbname]\
1329 | # ## ?sslmode=[disable|verify-ca|verify-full]
1330 | # ## or a simple string:
1331 | # ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
1332 | # #
1333 | # ## All connection parameters are optional. #
1334 | # ## Without the dbname parameter, the driver will default to a database
1335 | # ## with the same name as the user. This dbname is just for instantiating a
1336 | # ## connection with the server and doesn't restrict the databases we are trying
1337 | # ## to grab metrics for.
1338 | # #
1339 | # address = "host=localhost user=postgres sslmode=disable"
1340 | # ## A list of databases to pull metrics about. If not specified, metrics for all
1341 | # ## databases are gathered.
1342 | # ## databases = ["app_production", "testing"]
1343 | # #
1344 | # # outputaddress = "db01"
1345 | # ## A custom name for the database that will be used as the "server" tag in the
1346 | # ## measurement output. If not specified, a default one generated from
1347 | # ## the connection address is used.
1348 | # #
1349 | # ## Define the toml config where the sql queries are stored
1350 | # ## New queries can be added, if the withdbname is set to true and there is no
1351 | # ## databases defined in the 'databases field', the sql query is ended by a
1352 | # ## 'is not null' in order to make the query succeed.
1353 | # ## Example :
1354 | # ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become
1355 | # ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')"
1356 | # ## because the databases variable was set to ['postgres', 'pgbench' ] and the
1357 | # ## withdbname was true. Be careful that if the withdbname is set to false you
1358 | # ## don't have to define the where clause (aka with the dbname) the tagvalue
1359 | # ## field is used to define custom tags (separated by commas)
1360 | # ## The optional "measurement" value can be used to override the default
1361 | # ## output measurement name ("postgresql").
1362 | # #
1363 | # ## Structure :
1364 | # ## [[inputs.postgresql_extensible.query]]
1365 | # ## sqlquery string
1366 | # ## version string
1367 | # ## withdbname boolean
1368 | # ## tagvalue string (comma separated)
1369 | # ## measurement string
1370 | # [[inputs.postgresql_extensible.query]]
1371 | # sqlquery="SELECT * FROM pg_stat_database"
1372 | # version=901
1373 | # withdbname=false
1374 | # tagvalue=""
1375 | # measurement=""
1376 | # [[inputs.postgresql_extensible.query]]
1377 | # sqlquery="SELECT * FROM pg_stat_bgwriter"
1378 | # version=901
1379 | # withdbname=false
1380 | # tagvalue="postgresql.stats"
1381 |
1382 |
1383 | # # Read metrics from one or many PowerDNS servers
1384 | # [[inputs.powerdns]]
1385 | # ## An array of sockets to gather stats about.
1386 | # ## Specify a path to unix socket.
1387 | # unix_sockets = ["/var/run/pdns.controlsocket"]
1388 |
1389 |
1390 | # # Monitor process cpu and memory usage
1391 | # [[inputs.procstat]]
1392 | # ## Must specify one of: pid_file, exe, or pattern
1393 | # ## PID file to monitor process
1394 | # pid_file = "/var/run/nginx.pid"
1395 | # ## executable name (ie, pgrep )
1396 | # # exe = "nginx"
1397 | # ## pattern as argument for pgrep (ie, pgrep -f )
1398 | # # pattern = "nginx"
1399 | # ## user as argument for pgrep (ie, pgrep -u )
1400 | # # user = "nginx"
1401 | #
1402 | # ## override for process_name
1403 | # ## This is optional; default is sourced from /proc//status
1404 | # # process_name = "bar"
1405 | # ## Field name prefix
1406 | # prefix = ""
1407 | # ## comment this out if you want raw cpu_time stats
1408 | # fielddrop = ["cpu_time_*"]
1409 | # ## This is optional; moves pid into a tag instead of a field
1410 | # pid_tag = false
1411 |
1412 |
1413 | # # Read metrics from one or many prometheus clients
1414 | # [[inputs.prometheus]]
1415 | # ## An array of urls to scrape metrics from.
1416 | # urls = ["http://localhost:9100/metrics"]
1417 | #
1418 | # ## Use bearer token for authorization
1419 | # # bearer_token = /path/to/bearer/token
1420 | #
1421 | # ## Specify timeout duration for slower prometheus clients (default is 3s)
1422 | # # response_timeout = "3s"
1423 | #
1424 | # ## Optional SSL Config
1425 | # # ssl_ca = /path/to/cafile
1426 | # # ssl_cert = /path/to/certfile
1427 | # # ssl_key = /path/to/keyfile
1428 | # ## Use SSL but skip chain & host verification
1429 | # # insecure_skip_verify = false
1430 |
1431 |
1432 | # # Reads last_run_summary.yaml file and converts to measurments
1433 | # [[inputs.puppetagent]]
1434 | # ## Location of puppet last run summary file
1435 | # location = "/var/lib/puppet/state/last_run_summary.yaml"
1436 |
1437 |
1438 | # # Reads metrics from RabbitMQ servers via the Management Plugin
1439 | # [[inputs.rabbitmq]]
1440 | # ## Management Plugin url. (default: http://localhost:15672)
1441 | # # url = "http://localhost:15672"
1442 | # ## Tag added to rabbitmq_overview series; deprecated: use tags
1443 | # # name = "rmq-server-1"
1444 | # ## Credentials
1445 | # # username = "guest"
1446 | # # password = "guest"
1447 | #
1448 | # ## Optional SSL Config
1449 | # # ssl_ca = "/etc/telegraf/ca.pem"
1450 | # # ssl_cert = "/etc/telegraf/cert.pem"
1451 | # # ssl_key = "/etc/telegraf/key.pem"
1452 | # ## Use SSL but skip chain & host verification
1453 | # # insecure_skip_verify = false
1454 | #
1455 | # ## Optional request timeouts
1456 | # ##
1457 | # ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait
1458 | # ## for a server's response headers after fully writing the request.
1459 | # # header_timeout = "3s"
1460 | # ##
1461 | # ## client_timeout specifies a time limit for requests made by this client.
1462 | # ## Includes connection time, any redirects, and reading the response body.
1463 | # # client_timeout = "4s"
1464 | #
1465 | # ## A list of nodes to pull metrics about. If not specified, metrics for
1466 | # ## all nodes are gathered.
1467 | # # nodes = ["rabbit@node1", "rabbit@node2"]
1468 |
1469 |
1470 | # # Read raindrops stats (raindrops - real-time stats for preforking Rack servers)
1471 | # [[inputs.raindrops]]
1472 | # ## An array of raindrops middleware URI to gather stats.
1473 | # urls = ["http://localhost:8080/_raindrops"]
1474 |
1475 |
1476 | # # Read metrics from one or many redis servers
1477 | # [[inputs.redis]]
1478 | # ## specify servers via a url matching:
1479 | # ## [protocol://][:password]@address[:port]
1480 | # ## e.g.
1481 | # ## tcp://localhost:6379
1482 | # ## tcp://:password@192.168.99.100
1483 | # ## unix:///var/run/redis.sock
1484 | # ##
1485 | # ## If no servers are specified, then localhost is used as the host.
1486 | # ## If no port is specified, 6379 is used
1487 | # servers = ["tcp://localhost:6379"]
1488 |
1489 |
1490 | # # Read metrics from one or many RethinkDB servers
1491 | # [[inputs.rethinkdb]]
1492 | # ## An array of URI to gather stats about. Specify an ip or hostname
1493 | # ## with optional port add password. ie,
1494 | # ## rethinkdb://user:auth_key@10.10.3.30:28105,
1495 | # ## rethinkdb://10.10.3.33:18832,
1496 | # ## 10.0.0.1:10000, etc.
1497 | # servers = ["127.0.0.1:28015"]
1498 | # ##
1499 | # ## If you use actual rethinkdb of > 2.3.0 with username/password authorization,
1500 | # ## protocol have to be named "rethinkdb2" - it will use 1_0 H.
1501 | # # servers = ["rethinkdb2://username:password@127.0.0.1:28015"]
1502 | # ##
1503 | # ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol
1504 | # ## have to be named "rethinkdb".
1505 | # # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"]
1506 |
1507 |
1508 | # # Read metrics one or many Riak servers
1509 | # [[inputs.riak]]
1510 | # # Specify a list of one or more riak http servers
1511 | # servers = ["http://localhost:8098"]
1512 |
1513 |
1514 | # # Read API usage and limits for a Salesforce organisation
1515 | # [[inputs.salesforce]]
1516 | # ## specify your credentials
1517 | # ##
1518 | # username = "your_username"
1519 | # password = "your_password"
1520 | # ##
1521 | # ## (optional) security token
1522 | # # security_token = "your_security_token"
1523 | # ##
1524 | # ## (optional) environment type (sandbox or production)
1525 | # ## default is: production
1526 | # ##
1527 | # # environment = "production"
1528 | # ##
1529 | # ## (optional) API version (default: "39.0")
1530 | # ##
1531 | # # version = "39.0"
1532 |
1533 |
1534 | # # Monitor sensors, requires lm-sensors package
1535 | # [[inputs.sensors]]
1536 | # ## Remove numbers from field names.
1537 | # ## If true, a field name like 'temp1_input' will be changed to 'temp_input'.
1538 | # # remove_numbers = true
1539 |
1540 |
1541 | # # Retrieves SNMP values from remote agents
1542 | # [[inputs.snmp]]
1543 | # agents = [ "127.0.0.1:161" ]
1544 | # ## Timeout for each SNMP query.
1545 | # timeout = "5s"
1546 | # ## Number of retries to attempt within timeout.
1547 | # retries = 3
1548 | # ## SNMP version, values can be 1, 2, or 3
1549 | # version = 2
1550 | #
1551 | # ## SNMP community string.
1552 | # community = "public"
1553 | #
1554 | # ## The GETBULK max-repetitions parameter
1555 | # max_repetitions = 10
1556 | #
1557 | # ## SNMPv3 auth parameters
1558 | # #sec_name = "myuser"
1559 | # #auth_protocol = "md5" # Values: "MD5", "SHA", ""
1560 | # #auth_password = "pass"
1561 | # #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv"
1562 | # #context_name = ""
1563 | # #priv_protocol = "" # Values: "DES", "AES", ""
1564 | # #priv_password = ""
1565 | #
1566 | # ## measurement name
1567 | # name = "system"
1568 | # [[inputs.snmp.field]]
1569 | # name = "hostname"
1570 | # oid = ".1.0.0.1.1"
1571 | # [[inputs.snmp.field]]
1572 | # name = "uptime"
1573 | # oid = ".1.0.0.1.2"
1574 | # [[inputs.snmp.field]]
1575 | # name = "load"
1576 | # oid = ".1.0.0.1.3"
1577 | # [[inputs.snmp.field]]
1578 | # oid = "HOST-RESOURCES-MIB::hrMemorySize"
1579 | #
1580 | # [[inputs.snmp.table]]
1581 | # ## measurement name
1582 | # name = "remote_servers"
1583 | # inherit_tags = [ "hostname" ]
1584 | # [[inputs.snmp.table.field]]
1585 | # name = "server"
1586 | # oid = ".1.0.0.0.1.0"
1587 | # is_tag = true
1588 | # [[inputs.snmp.table.field]]
1589 | # name = "connections"
1590 | # oid = ".1.0.0.0.1.1"
1591 | # [[inputs.snmp.table.field]]
1592 | # name = "latency"
1593 | # oid = ".1.0.0.0.1.2"
1594 | #
1595 | # [[inputs.snmp.table]]
1596 | # ## auto populate table's fields using the MIB
1597 | # oid = "HOST-RESOURCES-MIB::hrNetworkTable"
1598 |
1599 |
1600 | # # DEPRECATED! PLEASE USE inputs.snmp INSTEAD.
1601 | # [[inputs.snmp_legacy]]
1602 | # ## Use 'oids.txt' file to translate oids to names
1603 | # ## To generate 'oids.txt' you need to run:
1604 | # ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
1605 | # ## Or if you have an other MIB folder with custom MIBs
1606 | # ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
1607 | # snmptranslate_file = "/tmp/oids.txt"
1608 | # [[inputs.snmp.host]]
1609 | # address = "192.168.2.2:161"
1610 | # # SNMP community
1611 | # community = "public" # default public
1612 | # # SNMP version (1, 2 or 3)
1613 | # # Version 3 not supported yet
1614 | # version = 2 # default 2
1615 | # # SNMP response timeout
1616 | # timeout = 2.0 # default 2.0
1617 | # # SNMP request retries
1618 | # retries = 2 # default 2
1619 | # # Which get/bulk do you want to collect for this host
1620 | # collect = ["mybulk", "sysservices", "sysdescr"]
1621 | # # Simple list of OIDs to get, in addition to "collect"
1622 | # get_oids = []
1623 | #
1624 | # [[inputs.snmp.host]]
1625 | # address = "192.168.2.3:161"
1626 | # community = "public"
1627 | # version = 2
1628 | # timeout = 2.0
1629 | # retries = 2
1630 | # collect = ["mybulk"]
1631 | # get_oids = [
1632 | # "ifNumber",
1633 | # ".1.3.6.1.2.1.1.3.0",
1634 | # ]
1635 | #
1636 | # [[inputs.snmp.get]]
1637 | # name = "ifnumber"
1638 | # oid = "ifNumber"
1639 | #
1640 | # [[inputs.snmp.get]]
1641 | # name = "interface_speed"
1642 | # oid = "ifSpeed"
1643 | # instance = "0"
1644 | #
1645 | # [[inputs.snmp.get]]
1646 | # name = "sysuptime"
1647 | # oid = ".1.3.6.1.2.1.1.3.0"
1648 | # unit = "second"
1649 | #
1650 | # [[inputs.snmp.bulk]]
1651 | # name = "mybulk"
1652 | # max_repetition = 127
1653 | # oid = ".1.3.6.1.2.1.1"
1654 | #
1655 | # [[inputs.snmp.bulk]]
1656 | # name = "ifoutoctets"
1657 | # max_repetition = 127
1658 | # oid = "ifOutOctets"
1659 | #
1660 | # [[inputs.snmp.host]]
1661 | # address = "192.168.2.13:161"
1662 | # #address = "127.0.0.1:161"
1663 | # community = "public"
1664 | # version = 2
1665 | # timeout = 2.0
1666 | # retries = 2
1667 | # #collect = ["mybulk", "sysservices", "sysdescr", "systype"]
1668 | # collect = ["sysuptime" ]
1669 | # [[inputs.snmp.host.table]]
1670 | # name = "iftable3"
1671 | # include_instances = ["enp5s0", "eth1"]
1672 | #
1673 | # # SNMP TABLEs
1674 | # # table without mapping neither subtables
1675 | # [[inputs.snmp.table]]
1676 | # name = "iftable1"
1677 | # oid = ".1.3.6.1.2.1.31.1.1.1"
1678 | #
1679 | # # table without mapping but with subtables
1680 | # [[inputs.snmp.table]]
1681 | # name = "iftable2"
1682 | # oid = ".1.3.6.1.2.1.31.1.1.1"
1683 | # sub_tables = [".1.3.6.1.2.1.2.2.1.13"]
1684 | #
1685 | # # table with mapping but without subtables
1686 | # [[inputs.snmp.table]]
1687 | # name = "iftable3"
1688 | # oid = ".1.3.6.1.2.1.31.1.1.1"
1689 | # # if empty. get all instances
1690 | # mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
1691 | # # if empty, get all subtables
1692 | #
1693 | # # table with both mapping and subtables
1694 | # [[inputs.snmp.table]]
1695 | # name = "iftable4"
1696 | # oid = ".1.3.6.1.2.1.31.1.1.1"
1697 | # # if empty get all instances
1698 | # mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
1699 | # # if empty get all subtables
1700 | # # sub_tables could be not "real subtables"
1701 | # sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
1702 |
1703 |
1704 | # # Read metrics from Microsoft SQL Server
1705 | # [[inputs.sqlserver]]
1706 | # ## Specify instances to monitor with a list of connection strings.
1707 | # ## All connection parameters are optional.
1708 | # ## By default, the host is localhost, listening on default port, TCP 1433.
1709 | # ## for Windows, the user is the currently running AD user (SSO).
1710 | # ## See https://github.com/denisenkom/go-mssqldb for detailed connection
1711 | # ## parameters.
1712 | # # servers = [
1713 | # # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;",
1714 | # # ]
1715 |
1716 |
1717 | # # Sysstat metrics collector
1718 | # [[inputs.sysstat]]
1719 | # ## Path to the sadc command.
1720 | # #
1721 | # ## Common Defaults:
1722 | # ## Debian/Ubuntu: /usr/lib/sysstat/sadc
1723 | # ## Arch: /usr/lib/sa/sadc
1724 | # ## RHEL/CentOS: /usr/lib64/sa/sadc
1725 | # sadc_path = "/usr/lib/sa/sadc" # required
1726 | # #
1727 | # #
1728 | # ## Path to the sadf command, if it is not in PATH
1729 | # # sadf_path = "/usr/bin/sadf"
1730 | # #
1731 | # #
1732 | # ## Activities is a list of activities, that are passed as argument to the
1733 | # ## sadc collector utility (e.g: DISK, SNMP etc...)
1734 | # ## The more activities that are added, the more data is collected.
1735 | # # activities = ["DISK"]
1736 | # #
1737 | # #
1738 | # ## Group metrics to measurements.
1739 | # ##
1740 | # ## If group is false each metric will be prefixed with a description
1741 | # ## and represents itself a measurement.
1742 | # ##
1743 | # ## If Group is true, corresponding metrics are grouped to a single measurement.
1744 | # # group = true
1745 | # #
1746 | # #
1747 | # ## Options for the sadf command. The values on the left represent the sadf
1748 | # ## options and the values on the right their description (wich are used for
1749 | # ## grouping and prefixing metrics).
1750 | # ##
1751 | # ## Run 'sar -h' or 'man sar' to find out the supported options for your
1752 | # ## sysstat version.
1753 | # [inputs.sysstat.options]
1754 | # -C = "cpu"
1755 | # -B = "paging"
1756 | # -b = "io"
1757 | # -d = "disk" # requires DISK activity
1758 | # "-n ALL" = "network"
1759 | # "-P ALL" = "per_cpu"
1760 | # -q = "queue"
1761 | # -R = "mem"
1762 | # -r = "mem_util"
1763 | # -S = "swap_util"
1764 | # -u = "cpu_util"
1765 | # -v = "inode"
1766 | # -W = "swap"
1767 | # -w = "task"
1768 | # # -H = "hugepages" # only available for newer linux distributions
1769 | # # "-I ALL" = "interrupts" # requires INT activity
1770 | # #
1771 | # #
1772 | # ## Device tags can be used to add additional tags for devices.
1773 | # ## For example the configuration below adds a tag vg with value rootvg for
1774 | # ## all metrics with sda devices.
1775 | # # [[inputs.sysstat.device_tags.sda]]
1776 | # # vg = "rootvg"
1777 |
1778 |
1779 | # # Gather metrics from the Tomcat server status page.
1780 | # [[inputs.tomcat]]
1781 | # ## URL of the Tomcat server status
1782 | # # url = "http://127.0.0.1:8080/manager/status/all?XML=true"
1783 | #
1784 | # ## HTTP Basic Auth Credentials
1785 | # # username = "tomcat"
1786 | # # password = "s3cret"
1787 | #
1788 | # ## Request timeout
1789 | # # timeout = "5s"
1790 | #
1791 | # ## Optional SSL Config
1792 | # # ssl_ca = "/etc/telegraf/ca.pem"
1793 | # # ssl_cert = "/etc/telegraf/cert.pem"
1794 | # # ssl_key = "/etc/telegraf/key.pem"
1795 | # ## Use SSL but skip chain & host verification
1796 | # # insecure_skip_verify = false
1797 |
1798 |
1799 | # # Inserts sine and cosine waves for demonstration purposes
1800 | # [[inputs.trig]]
1801 | # ## Set the amplitude
1802 | # amplitude = 10.0
1803 |
1804 |
1805 | # # Read Twemproxy stats data
1806 | # [[inputs.twemproxy]]
1807 | # ## Twemproxy stats address and port (no scheme)
1808 | # addr = "localhost:22222"
1809 | # ## Monitor pool name
1810 | # pools = ["redis_pool", "mc_pool"]
1811 |
1812 |
1813 | # # A plugin to collect stats from Varnish HTTP Cache
1814 | # [[inputs.varnish]]
1815 | # ## If running as a restricted user you can prepend sudo for additional access:
1816 | # #use_sudo = false
1817 | #
1818 | # ## The default location of the varnishstat binary can be overridden with:
1819 | # binary = "/usr/bin/varnishstat"
1820 | #
1821 | # ## By default, telegraf gather stats for 3 metric points.
1822 | # ## Setting stats will override the defaults shown below.
1823 | # ## Glob matching can be used, ie, stats = ["MAIN.*"]
1824 | # ## stats may also be set to ["*"], which will collect all stats
1825 | # stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
1826 |
1827 |
1828 | # # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools
1829 | # [[inputs.zfs]]
1830 | # ## ZFS kstat path. Ignored on FreeBSD
1831 | # ## If not specified, then default is:
1832 | # # kstatPath = "/proc/spl/kstat/zfs"
1833 | #
1834 | # ## By default, telegraf gather all zfs stats
1835 | # ## If not specified, then default is:
1836 | # # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"]
1837 | #
1838 | # ## By default, don't gather zpool stats
1839 | # # poolMetrics = false
1840 |
1841 |
1842 | # # Reads 'mntr' stats from one or many zookeeper servers
1843 | # [[inputs.zookeeper]]
1844 | # ## An array of address to gather stats about. Specify an ip or hostname
1845 | # ## with port. ie localhost:2181, 10.0.0.1:2181, etc.
1846 | #
1847 | # ## If no servers are specified, then localhost is used as the host.
1848 | # ## If no port is specified, 2181 is used
1849 | # servers = [":2181"]
1850 |
1851 |
1852 |
1853 | ###############################################################################
1854 | # SERVICE INPUT PLUGINS #
1855 | ###############################################################################
1856 |
1857 | # # AMQP consumer plugin
1858 | # [[inputs.amqp_consumer]]
1859 | # ## AMQP url
1860 | # url = "amqp://localhost:5672/influxdb"
1861 | # ## AMQP exchange
1862 | # exchange = "telegraf"
1863 | # ## AMQP queue name
1864 | # queue = "telegraf"
1865 | # ## Binding Key
1866 | # binding_key = "#"
1867 | #
1868 | # ## Maximum number of messages server should give to the worker.
1869 | # prefetch_count = 50
1870 | #
1871 | # ## Auth method. PLAIN and EXTERNAL are supported
1872 | # ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
1873 | # ## described here: https://www.rabbitmq.com/plugins.html
1874 | # # auth_method = "PLAIN"
1875 | #
1876 | # ## Optional SSL Config
1877 | # # ssl_ca = "/etc/telegraf/ca.pem"
1878 | # # ssl_cert = "/etc/telegraf/cert.pem"
1879 | # # ssl_key = "/etc/telegraf/key.pem"
1880 | # ## Use SSL but skip chain & host verification
1881 | # # insecure_skip_verify = false
1882 | #
1883 | # ## Data format to consume.
1884 | # ## Each data format has its own unique set of configuration options, read
1885 | # ## more about them here:
1886 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
1887 | # data_format = "influx"
1888 |
1889 |
1890 | # # Influx HTTP write listener
1891 | # [[inputs.http_listener]]
1892 | # ## Address and port to host HTTP listener on
1893 | # service_address = ":8186"
1894 | #
1895 | # ## maximum duration before timing out read of the request
1896 | # read_timeout = "10s"
1897 | # ## maximum duration before timing out write of the response
1898 | # write_timeout = "10s"
1899 | #
1900 | # ## Maximum allowed http request body size in bytes.
1901 | # ## 0 means to use the default of 536,870,912 bytes (500 mebibytes)
1902 | # max_body_size = 0
1903 | #
1904 | # ## Maximum line size allowed to be sent in bytes.
1905 | # ## 0 means to use the default of 65536 bytes (64 kibibytes)
1906 | # max_line_size = 0
1907 |
1908 |
1909 | # # Read metrics from Kafka topic(s)
1910 | # [[inputs.kafka_consumer]]
1911 | # ## kafka servers
1912 | # brokers = ["localhost:9092"]
1913 | # ## topic(s) to consume
1914 | # topics = ["telegraf"]
1915 | #
1916 | # ## Optional SSL Config
1917 | # # ssl_ca = "/etc/telegraf/ca.pem"
1918 | # # ssl_cert = "/etc/telegraf/cert.pem"
1919 | # # ssl_key = "/etc/telegraf/key.pem"
1920 | # ## Use SSL but skip chain & host verification
1921 | # # insecure_skip_verify = false
1922 | #
1923 | # ## Optional SASL Config
1924 | # # sasl_username = "kafka"
1925 | # # sasl_password = "secret"
1926 | #
1927 | # ## the name of the consumer group
1928 | # consumer_group = "telegraf_metrics_consumers"
1929 | # ## Offset (must be either "oldest" or "newest")
1930 | # offset = "oldest"
1931 | #
1932 | # ## Data format to consume.
1933 | # ## Each data format has its own unique set of configuration options, read
1934 | # ## more about them here:
1935 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
1936 | # data_format = "influx"
1937 | #
1938 | # ## Maximum length of a message to consume, in bytes (default 0/unlimited);
1939 | # ## larger messages are dropped
1940 | # max_message_len = 65536
1941 |
1942 |
1943 | # # Read metrics from Kafka topic(s)
1944 | # [[inputs.kafka_consumer_legacy]]
1945 | # ## topic(s) to consume
1946 | # topics = ["telegraf"]
1947 | # ## an array of Zookeeper connection strings
1948 | # zookeeper_peers = ["localhost:2181"]
1949 | # ## Zookeeper Chroot
1950 | # zookeeper_chroot = ""
1951 | # ## the name of the consumer group
1952 | # consumer_group = "telegraf_metrics_consumers"
1953 | # ## Offset (must be either "oldest" or "newest")
1954 | # offset = "oldest"
1955 | #
1956 | # ## Data format to consume.
1957 | # ## Each data format has its own unique set of configuration options, read
1958 | # ## more about them here:
1959 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
1960 | # data_format = "influx"
1961 | #
1962 | # ## Maximum length of a message to consume, in bytes (default 0/unlimited);
1963 | # ## larger messages are dropped
1964 | # max_message_len = 65536
1965 |
1966 |
1967 | # # Stream and parse log file(s).
1968 | # [[inputs.logparser]]
1969 | # ## Log files to parse.
1970 | # ## These accept standard unix glob matching rules, but with the addition of
1971 | # ## ** as a "super asterisk". ie:
1972 | # ## /var/log/**.log -> recursively find all .log files in /var/log
1973 | # ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
1974 | # ## /var/log/apache.log -> only tail the apache log file
1975 | # files = ["/var/log/apache/access.log"]
1976 | #
1977 | # ## Read files that currently exist from the beginning. Files that are created
1978 | # ## while telegraf is running (and that match the "files" globs) will always
1979 | # ## be read from the beginning.
1980 | # from_beginning = false
1981 | #
1982 | # ## Parse logstash-style "grok" patterns:
1983 | # ## Telegraf built-in parsing patterns: https://goo.gl/dkay10
1984 | # [inputs.logparser.grok]
1985 | # ## This is a list of patterns to check the given log file(s) for.
1986 | # ## Note that adding patterns here increases processing time. The most
1987 | # ## efficient configuration is to have one pattern per logparser.
1988 | # ## Other common built-in patterns are:
1989 | # ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
1990 | # ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
1991 | # patterns = ["%{COMBINED_LOG_FORMAT}"]
1992 | #
1993 | # ## Name of the outputted measurement name.
1994 | # measurement = "apache_access_log"
1995 | #
1996 | # ## Full path(s) to custom pattern files.
1997 | # custom_pattern_files = []
1998 | #
1999 | # ## Custom patterns can also be defined here. Put one pattern per line.
2000 | # custom_patterns = '''
2001 | #
2002 | # ## Timezone allows you to provide an override for timestamps that
2003 | # ## don't already include an offset
2004 | # ## e.g. 04/06/2016 12:41:45 data one two 5.43µs
2005 | # ##
2006 | # ## Default: "" which renders UTC
2007 | # ## Options are as follows:
2008 | # ## 1. Local -- interpret based on machine localtime
2009 | # ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
2010 | # ## 3. UTC -- or blank/unspecified, will return timestamp in UTC
2011 | # timezone = "Canada/Eastern"
2012 | # '''
2013 |
2014 |
2015 | # # Read metrics from MQTT topic(s)
2016 | # [[inputs.mqtt_consumer]]
2017 | # servers = ["localhost:1883"]
2018 | # ## MQTT QoS, must be 0, 1, or 2
2019 | # qos = 0
2020 | # ## Connection timeout for initial connection in seconds
2021 | # connection_timeout = 30
2022 | #
2023 | # ## Topics to subscribe to
2024 | # topics = [
2025 | # "telegraf/host01/cpu",
2026 | # "telegraf/+/mem",
2027 | # "sensors/#",
2028 | # ]
2029 | #
2030 | # # if true, messages that can't be delivered while the subscriber is offline
2031 | # # will be delivered when it comes back (such as on service restart).
2032 | # # NOTE: if true, client_id MUST be set
2033 | # persistent_session = false
2034 | # # If empty, a random client ID will be generated.
2035 | # client_id = ""
2036 | #
2037 | # ## username and password to connect MQTT server.
2038 | # # username = "telegraf"
2039 | # # password = "metricsmetricsmetricsmetrics"
2040 | #
2041 | # ## Optional SSL Config
2042 | # # ssl_ca = "/etc/telegraf/ca.pem"
2043 | # # ssl_cert = "/etc/telegraf/cert.pem"
2044 | # # ssl_key = "/etc/telegraf/key.pem"
2045 | # ## Use SSL but skip chain & host verification
2046 | # # insecure_skip_verify = false
2047 | #
2048 | # ## Data format to consume.
2049 | # ## Each data format has its own unique set of configuration options, read
2050 | # ## more about them here:
2051 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
2052 | # data_format = "influx"
2053 |
2054 |
2055 | # # Read metrics from NATS subject(s)
2056 | # [[inputs.nats_consumer]]
2057 | # ## urls of NATS servers
2058 | # # servers = ["nats://localhost:4222"]
2059 | # ## Use Transport Layer Security
2060 | # # secure = false
2061 | # ## subject(s) to consume
2062 | # # subjects = ["telegraf"]
2063 | # ## name a queue group
2064 | # # queue_group = "telegraf_consumers"
2065 | #
2066 | # ## Sets the limits for pending msgs and bytes for each subscription
2067 | # ## These shouldn't need to be adjusted except in very high throughput scenarios
2068 | # # pending_message_limit = 65536
2069 | # # pending_bytes_limit = 67108864
2070 | #
2071 | # ## Data format to consume.
2072 | # ## Each data format has its own unique set of configuration options, read
2073 | # ## more about them here:
2074 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
2075 | # data_format = "influx"
2076 |
2077 |
2078 | # # Read NSQ topic for metrics.
2079 | # [[inputs.nsq_consumer]]
2080 | # ## An string representing the NSQD TCP Endpoint
2081 | # server = "localhost:4150"
2082 | # topic = "telegraf"
2083 | # channel = "consumer"
2084 | # max_in_flight = 100
2085 | #
2086 | # ## Data format to consume.
2087 | # ## Each data format has its own unique set of configuration options, read
2088 | # ## more about them here:
2089 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
2090 | # data_format = "influx"
2091 |
2092 |
2093 | # # Generic socket listener capable of handling multiple socket types.
2094 | # [[inputs.socket_listener]]
2095 | # ## URL to listen on
2096 | # # service_address = "tcp://:8094"
2097 | # # service_address = "tcp://127.0.0.1:http"
2098 | # # service_address = "tcp4://:8094"
2099 | # # service_address = "tcp6://:8094"
2100 | # # service_address = "tcp6://[2001:db8::1]:8094"
2101 | # # service_address = "udp://:8094"
2102 | # # service_address = "udp4://:8094"
2103 | # # service_address = "udp6://:8094"
2104 | # # service_address = "unix:///tmp/telegraf.sock"
2105 | # # service_address = "unixgram:///tmp/telegraf.sock"
2106 | #
2107 | # ## Maximum number of concurrent connections.
2108 | # ## Only applies to stream sockets (e.g. TCP).
2109 | # ## 0 (default) is unlimited.
2110 | # # max_connections = 1024
2111 | #
2112 | # ## Read timeout.
2113 | # ## Only applies to stream sockets (e.g. TCP).
2114 | # ## 0 (default) is unlimited.
2115 | # # read_timeout = "30s"
2116 | #
2117 | # ## Maximum socket buffer size in bytes.
2118 | # ## For stream sockets, once the buffer fills up, the sender will start backing up.
2119 | # ## For datagram sockets, once the buffer fills up, metrics will start dropping.
2120 | # ## Defaults to the OS default.
2121 | # # read_buffer_size = 65535
2122 | #
2123 | # ## Period between keep alive probes.
2124 | # ## Only applies to TCP sockets.
2125 | # ## 0 disables keep alive probes.
2126 | # ## Defaults to the OS configuration.
2127 | # # keep_alive_period = "5m"
2128 | #
2129 | # ## Data format to consume.
2130 | # ## Each data format has its own unique set of configuration options, read
2131 | # ## more about them here:
2132 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
2133 | # # data_format = "influx"
2134 |
2135 |
2136 | # # Statsd UDP/TCP Server
2137 | # [[inputs.statsd]]
2138 | # ## Protocol, must be "tcp" or "udp" (default=udp)
2139 | # protocol = "udp"
2140 | #
2141 | # ## MaxTCPConnection - applicable when protocol is set to tcp (default=250)
2142 | # max_tcp_connections = 250
2143 | #
2144 | # ## Address and port to host UDP listener on
2145 | # service_address = ":8125"
2146 | #
2147 | # ## The following configuration options control when telegraf clears it's cache
2148 | # ## of previous values. If set to false, then telegraf will only clear it's
2149 | # ## cache when the daemon is restarted.
2150 | # ## Reset gauges every interval (default=true)
2151 | # delete_gauges = true
2152 | # ## Reset counters every interval (default=true)
2153 | # delete_counters = true
2154 | # ## Reset sets every interval (default=true)
2155 | # delete_sets = true
2156 | # ## Reset timings & histograms every interval (default=true)
2157 | # delete_timings = true
2158 | #
2159 | # ## Percentiles to calculate for timing & histogram stats
2160 | # percentiles = [90]
2161 | #
2162 | # ## separator to use between elements of a statsd metric
2163 | # metric_separator = "_"
2164 | #
2165 | # ## Parses tags in the datadog statsd format
2166 | # ## http://docs.datadoghq.com/guides/dogstatsd/
2167 | # parse_data_dog_tags = false
2168 | #
2169 | # ## Statsd data translation templates, more info can be read here:
2170 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite
2171 | # # templates = [
2172 | # # "cpu.* measurement*"
2173 | # # ]
2174 | #
2175 | # ## Number of UDP messages allowed to queue up, once filled,
2176 | # ## the statsd server will start dropping packets
2177 | # allowed_pending_messages = 10000
2178 | #
2179 | # ## Number of timing/histogram values to track per-measurement in the
2180 | # ## calculation of percentiles. Raising this limit increases the accuracy
2181 | # ## of percentiles but also increases the memory usage and cpu time.
2182 | # percentile_limit = 1000
2183 |
2184 |
2185 | # # Stream a log file, like the tail -f command
2186 | # [[inputs.tail]]
2187 | # ## files to tail.
2188 | # ## These accept standard unix glob matching rules, but with the addition of
2189 | # ## ** as a "super asterisk". ie:
2190 | # ## "/var/log/**.log" -> recursively find all .log files in /var/log
2191 | # ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
2192 | # ## "/var/log/apache.log" -> just tail the apache log file
2193 | # ##
2194 | # ## See https://github.com/gobwas/glob for more examples
2195 | # ##
2196 | # files = ["/var/mymetrics.out"]
2197 | # ## Read file from beginning.
2198 | # from_beginning = false
2199 | # ## Whether file is a named pipe
2200 | # pipe = false
2201 | #
2202 | # ## Data format to consume.
2203 | # ## Each data format has its own unique set of configuration options, read
2204 | # ## more about them here:
2205 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
2206 | # data_format = "influx"
2207 |
2208 |
2209 | # # Generic TCP listener
2210 | # [[inputs.tcp_listener]]
2211 | # # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
2212 | # # socket_listener plugin
2213 | # # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
2214 |
2215 |
2216 | # # Generic UDP listener
2217 | # [[inputs.udp_listener]]
2218 | # # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
2219 | # # socket_listener plugin
2220 | # # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
2221 |
2222 |
2223 | # # A Webhooks Event collector
2224 | # [[inputs.webhooks]]
2225 | # ## Address and port to host Webhook listener on
2226 | # service_address = ":1619"
2227 | #
2228 | # [inputs.webhooks.filestack]
2229 | # path = "/filestack"
2230 | #
2231 | # [inputs.webhooks.github]
2232 | # path = "/github"
2233 | # # secret = ""
2234 | #
2235 | # [inputs.webhooks.mandrill]
2236 | # path = "/mandrill"
2237 | #
2238 | # [inputs.webhooks.rollbar]
2239 | # path = "/rollbar"
2240 | #
2241 | # [inputs.webhooks.papertrail]
2242 | # path = "/papertrail"
2243 |
2244 |
2245 | # # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures.
2246 | # [[inputs.zipkin]]
2247 | # # path = "/api/v1/spans" # URL path for span data
2248 | # # port = 9411 # Port on which Telegraf listens
2249 |
--------------------------------------------------------------------------------