├── LICENSE ├── README.md ├── docker-compose.yml ├── env.grafana ├── env.influxdb ├── run.sh └── telegraf.conf /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Nicolas Hennion 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Docker-compose files for a simple uptodate 2 | # InfluxDB 3 | # + Grafana stack 4 | # + Telegraf 5 | 6 | Get the stack (only once): 7 | 8 | ``` 9 | git clone https://github.com/nicolargo/docker-influxdb-grafana.git 10 | cd docker-influxdb-grafana 11 | docker pull grafana/grafana 12 | docker pull influxdb 13 | docker pull telegraf 14 | ``` 15 | 16 | Run your stack: 17 | 18 | ``` 19 | sudo mkdir -p /srv/docker/grafana/data 20 | docker-compose up -d 21 | sudo chown -R 472:472 /srv/docker/grafana/data 22 | 23 | ``` 24 | 25 | Show me the logs: 26 | 27 | ``` 28 | docker-compose logs 29 | ``` 30 | 31 | Stop it: 32 | 33 | ``` 34 | docker-compose stop 35 | docker-compose rm 36 | ``` 37 | 38 | Update it: 39 | 40 | ``` 41 | git pull 42 | docker pull grafana/grafana 43 | docker pull influxdb 44 | docker pull telegraf 45 | ``` 46 | 47 | If you want to run Telegraf, edit the telegraf.conf to yours needs and: 48 | 49 | ``` 50 | docker exec telegraf telegraf 51 | ``` 52 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | networks: 2 | influxdb: 3 | 4 | services: 5 | influxdb: 6 | image: influxdb:latest 7 | container_name: influxdb 8 | ports: 9 | - "8083:8083" 10 | - "8086:8086" 11 | - "8090:8090" 12 | - "2003:2003" 13 | networks: 14 | - influxdb 15 | env_file: 16 | - 'env.influxdb' 17 | volumes: 18 | # Data persistency 19 | # sudo mkdir -p /srv/docker/influxdb/data 20 | - /srv/docker/influxdb/data:/var/lib/influxdb 21 | 22 | telegraf: 23 | image: telegraf:latest 24 | container_name: telegraf 25 | networks: 26 | - influxdb 27 | volumes: 28 | - ./telegraf.conf:/etc/telegraf/telegraf.conf:ro 29 | 30 | grafana: 31 | image: grafana/grafana:latest 32 | container_name: grafana 33 | ports: 34 | - "3000:3000" 35 | networks: 36 | - influxdb 37 | env_file: 38 | - 'env.grafana' 39 | user: "0" 40 | links: 41 | - influxdb 42 | volumes: 43 | # Data persistency 44 | # sudo mkdir -p /srv/docker/grafana/data; chown 472:472 /srv/docker/grafana/data 45 | - /srv/docker/grafana/data:/var/lib/grafana 46 | -------------------------------------------------------------------------------- /env.grafana: -------------------------------------------------------------------------------- 1 | GF_INSTALL_PLUGINS=grafana-clock-panel,briangann-gauge-panel,natel-plotly-panel,grafana-simple-json-datasource 2 | -------------------------------------------------------------------------------- /env.influxdb: -------------------------------------------------------------------------------- 1 | INFLUXDB_DATA_ENGINE=tsm1 2 | INFLUXDB_REPORTING_DISABLED=false 3 | INFLUXDB_GRAPHITE_0_ENABLED=true 4 | INFLUXDB_GRAPHITE_0_DATABASE=graphite 5 | INFLUXDB_GRAPHITE_0_BIND_ADDRESS=:2003 6 | INFLUXDB_GRAPHITE_PROTOCOL=tcp 7 | INFLUXDB_GRAPHITE_BATCH_SIZE=1000 -------------------------------------------------------------------------------- /run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo mkdir -p /srv/docker/grafana/data 4 | docker compose up -d 5 | sudo chown -R 472:472 /srv/docker/grafana/data 6 | 7 | echo "Grafana: http://127.0.0.1:3000 - admin/admin" 8 | 9 | echo 10 | echo "Current database list" 11 | curl -G http://localhost:8086/query?pretty=true --data-urlencode "db=glances" --data-urlencode "q=SHOW DATABASES" 12 | 13 | echo 14 | echo "Create a new database ?" 15 | echo "curl -XPOST 'http://localhost:8086/query' --data-urlencode 'q=CREATE DATABASE mydb'" 16 | -------------------------------------------------------------------------------- /telegraf.conf: -------------------------------------------------------------------------------- 1 | # Telegraf Configuration 2 | # 3 | # Telegraf is entirely plugin driven. All metrics are gathered from the 4 | # declared inputs, and sent to the declared outputs. 5 | # 6 | # Plugins must be declared in here to be active. 7 | # To deactivate a plugin, comment out the name and any variables. 8 | # 9 | # Use 'telegraf -config telegraf.conf -test' to see what metrics a config 10 | # file would generate. 11 | # 12 | # Environment variables can be used anywhere in this config file, simply prepend 13 | # them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), 14 | # for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) 15 | 16 | 17 | # Global tags can be specified here in key="value" format. 18 | [global_tags] 19 | # dc = "us-east-1" # will tag all metrics with dc=us-east-1 20 | # rack = "1a" 21 | ## Environment variables can be used as tags, and throughout the config file 22 | # user = "$USER" 23 | 24 | 25 | # Configuration for telegraf agent 26 | [agent] 27 | ## Default data collection interval for all inputs 28 | interval = "10s" 29 | ## Rounds collection interval to 'interval' 30 | ## ie, if interval="10s" then always collect on :00, :10, :20, etc. 31 | round_interval = true 32 | 33 | ## Telegraf will send metrics to outputs in batches of at 34 | ## most metric_batch_size metrics. 35 | metric_batch_size = 1000 36 | ## For failed writes, telegraf will cache metric_buffer_limit metrics for each 37 | ## output, and will flush this buffer on a successful write. Oldest metrics 38 | ## are dropped first when this buffer fills. 39 | metric_buffer_limit = 10000 40 | 41 | ## Collection jitter is used to jitter the collection by a random amount. 42 | ## Each plugin will sleep for a random time within jitter before collecting. 43 | ## This can be used to avoid many plugins querying things like sysfs at the 44 | ## same time, which can have a measurable effect on the system. 45 | collection_jitter = "0s" 46 | 47 | ## Default flushing interval for all outputs. You shouldn't set this below 48 | ## interval. Maximum flush_interval will be flush_interval + flush_jitter 49 | flush_interval = "10s" 50 | ## Jitter the flush interval by a random amount. This is primarily to avoid 51 | ## large write spikes for users running a large number of telegraf instances. 52 | ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s 53 | flush_jitter = "0s" 54 | 55 | ## By default, precision will be set to the same timestamp order as the 56 | ## collection interval, with the maximum being 1s. 57 | ## Precision will NOT be used for service inputs, such as logparser and statsd. 58 | ## Valid values are "ns", "us" (or "µs"), "ms", "s". 59 | precision = "" 60 | ## Run telegraf in debug mode 61 | debug = false 62 | ## Run telegraf in quiet mode 63 | quiet = false 64 | ## Override default hostname, if empty use os.Hostname() 65 | hostname = "" 66 | ## If set to true, do no set the "host" tag in the telegraf agent. 67 | omit_hostname = false 68 | 69 | 70 | ############################################################################### 71 | # OUTPUT PLUGINS # 72 | ############################################################################### 73 | 74 | # Configuration for influxdb server to send metrics to 75 | [[outputs.influxdb]] 76 | ## The full HTTP or UDP endpoint URL for your InfluxDB instance. 77 | ## Multiple urls can be specified as part of the same cluster, 78 | ## this means that only ONE of the urls will be written to each interval. 79 | # urls = ["udp://localhost:8089"] # UDP endpoint example 80 | urls = ["http://influxdb:8086"] # required 81 | ## The target database for metrics (telegraf will create it if not exists). 82 | database = "telegraf" # required 83 | 84 | ## Retention policy to write to. Empty string writes to the default rp. 85 | retention_policy = "" 86 | ## Write consistency (clusters only), can be: "any", "one", "quorum", "all" 87 | write_consistency = "any" 88 | 89 | ## Write timeout (for the InfluxDB client), formatted as a string. 90 | ## If not provided, will default to 5s. 0s means no timeout (not recommended). 91 | timeout = "5s" 92 | # username = "telegraf" 93 | # password = "metricsmetricsmetricsmetrics" 94 | ## Set the user agent for HTTP POSTs (can be useful for log differentiation) 95 | # user_agent = "telegraf" 96 | ## Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes) 97 | # udp_payload = 512 98 | 99 | ## Optional SSL Config 100 | # ssl_ca = "/etc/telegraf/ca.pem" 101 | # ssl_cert = "/etc/telegraf/cert.pem" 102 | # ssl_key = "/etc/telegraf/key.pem" 103 | ## Use SSL but skip chain & host verification 104 | # insecure_skip_verify = false 105 | 106 | 107 | # # Configuration for Amon Server to send metrics to. 108 | # [[outputs.amon]] 109 | # ## Amon Server Key 110 | # server_key = "my-server-key" # required. 111 | # 112 | # ## Amon Instance URL 113 | # amon_instance = "https://youramoninstance" # required 114 | # 115 | # ## Connection timeout. 116 | # # timeout = "5s" 117 | 118 | 119 | # # Configuration for the AMQP server to send metrics to 120 | # [[outputs.amqp]] 121 | # ## AMQP url 122 | # url = "amqp://localhost:5672/influxdb" 123 | # ## AMQP exchange 124 | # exchange = "telegraf" 125 | # ## Auth method. PLAIN and EXTERNAL are supported 126 | # # auth_method = "PLAIN" 127 | # ## Telegraf tag to use as a routing key 128 | # ## ie, if this tag exists, it's value will be used as the routing key 129 | # routing_tag = "host" 130 | # 131 | # ## InfluxDB retention policy 132 | # # retention_policy = "default" 133 | # ## InfluxDB database 134 | # # database = "telegraf" 135 | # ## InfluxDB precision 136 | # # precision = "s" 137 | # 138 | # ## Optional SSL Config 139 | # # ssl_ca = "/etc/telegraf/ca.pem" 140 | # # ssl_cert = "/etc/telegraf/cert.pem" 141 | # # ssl_key = "/etc/telegraf/key.pem" 142 | # ## Use SSL but skip chain & host verification 143 | # # insecure_skip_verify = false 144 | # 145 | # ## Data format to output. 146 | # ## Each data format has it's own unique set of configuration options, read 147 | # ## more about them here: 148 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md 149 | # data_format = "influx" 150 | 151 | 152 | # # Configuration for AWS CloudWatch output. 153 | # [[outputs.cloudwatch]] 154 | # ## Amazon REGION 155 | # region = 'us-east-1' 156 | # 157 | # ## Amazon Credentials 158 | # ## Credentials are loaded in the following order 159 | # ## 1) Assumed credentials via STS if role_arn is specified 160 | # ## 2) explicit credentials from 'access_key' and 'secret_key' 161 | # ## 3) shared profile from 'profile' 162 | # ## 4) environment variables 163 | # ## 5) shared credentials file 164 | # ## 6) EC2 Instance Profile 165 | # #access_key = "" 166 | # #secret_key = "" 167 | # #token = "" 168 | # #role_arn = "" 169 | # #profile = "" 170 | # #shared_credential_file = "" 171 | # 172 | # ## Namespace for the CloudWatch MetricDatums 173 | # namespace = 'InfluxData/Telegraf' 174 | 175 | 176 | # # Configuration for DataDog API to send metrics to. 177 | # [[outputs.datadog]] 178 | # ## Datadog API key 179 | # apikey = "my-secret-key" # required. 180 | # 181 | # ## Connection timeout. 182 | # # timeout = "5s" 183 | 184 | 185 | # # Send telegraf metrics to file(s) 186 | # [[outputs.file]] 187 | # ## Files to write to, "stdout" is a specially handled file. 188 | # files = ["stdout", "/tmp/metrics.out"] 189 | # 190 | # ## Data format to output. 191 | # ## Each data format has it's own unique set of configuration options, read 192 | # ## more about them here: 193 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md 194 | # data_format = "influx" 195 | 196 | 197 | # # Configuration for Graphite server to send metrics to 198 | # [[outputs.graphite]] 199 | # ## TCP endpoint for your graphite instance. 200 | # ## If multiple endpoints are configured, output will be load balanced. 201 | # ## Only one of the endpoints will be written to with each iteration. 202 | # servers = ["localhost:2003"] 203 | # ## Prefix metrics name 204 | # prefix = "" 205 | # ## Graphite output template 206 | # ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md 207 | # template = "host.tags.measurement.field" 208 | # ## timeout in seconds for the write connection to graphite 209 | # timeout = 2 210 | 211 | 212 | # # Send telegraf metrics to graylog(s) 213 | # [[outputs.graylog]] 214 | # ## Udp endpoint for your graylog instance. 215 | # servers = ["127.0.0.1:12201", "192.168.1.1:12201"] 216 | 217 | 218 | # # Configuration for sending metrics to an Instrumental project 219 | # [[outputs.instrumental]] 220 | # ## Project API Token (required) 221 | # api_token = "API Token" # required 222 | # ## Prefix the metrics with a given name 223 | # prefix = "" 224 | # ## Stats output template (Graphite formatting) 225 | # ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite 226 | # template = "host.tags.measurement.field" 227 | # ## Timeout in seconds to connect 228 | # timeout = "2s" 229 | # ## Display Communcation to Instrumental 230 | # debug = false 231 | 232 | 233 | # # Configuration for the Kafka server to send metrics to 234 | # [[outputs.kafka]] 235 | # ## URLs of kafka brokers 236 | # brokers = ["localhost:9092"] 237 | # ## Kafka topic for producer messages 238 | # topic = "telegraf" 239 | # ## Telegraf tag to use as a routing key 240 | # ## ie, if this tag exists, it's value will be used as the routing key 241 | # routing_tag = "host" 242 | # 243 | # ## CompressionCodec represents the various compression codecs recognized by 244 | # ## Kafka in messages. 245 | # ## 0 : No compression 246 | # ## 1 : Gzip compression 247 | # ## 2 : Snappy compression 248 | # compression_codec = 0 249 | # 250 | # ## RequiredAcks is used in Produce Requests to tell the broker how many 251 | # ## replica acknowledgements it must see before responding 252 | # ## 0 : the producer never waits for an acknowledgement from the broker. 253 | # ## This option provides the lowest latency but the weakest durability 254 | # ## guarantees (some data will be lost when a server fails). 255 | # ## 1 : the producer gets an acknowledgement after the leader replica has 256 | # ## received the data. This option provides better durability as the 257 | # ## client waits until the server acknowledges the request as successful 258 | # ## (only messages that were written to the now-dead leader but not yet 259 | # ## replicated will be lost). 260 | # ## -1: the producer gets an acknowledgement after all in-sync replicas have 261 | # ## received the data. This option provides the best durability, we 262 | # ## guarantee that no messages will be lost as long as at least one in 263 | # ## sync replica remains. 264 | # required_acks = -1 265 | # 266 | # ## The total number of times to retry sending a message 267 | # max_retry = 3 268 | # 269 | # ## Optional SSL Config 270 | # # ssl_ca = "/etc/telegraf/ca.pem" 271 | # # ssl_cert = "/etc/telegraf/cert.pem" 272 | # # ssl_key = "/etc/telegraf/key.pem" 273 | # ## Use SSL but skip chain & host verification 274 | # # insecure_skip_verify = false 275 | # 276 | # ## Data format to output. 277 | # ## Each data format has it's own unique set of configuration options, read 278 | # ## more about them here: 279 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md 280 | # data_format = "influx" 281 | 282 | 283 | # # Configuration for the AWS Kinesis output. 284 | # [[outputs.kinesis]] 285 | # ## Amazon REGION of kinesis endpoint. 286 | # region = "ap-southeast-2" 287 | # 288 | # ## Amazon Credentials 289 | # ## Credentials are loaded in the following order 290 | # ## 1) Assumed credentials via STS if role_arn is specified 291 | # ## 2) explicit credentials from 'access_key' and 'secret_key' 292 | # ## 3) shared profile from 'profile' 293 | # ## 4) environment variables 294 | # ## 5) shared credentials file 295 | # ## 6) EC2 Instance Profile 296 | # #access_key = "" 297 | # #secret_key = "" 298 | # #token = "" 299 | # #role_arn = "" 300 | # #profile = "" 301 | # #shared_credential_file = "" 302 | # 303 | # ## Kinesis StreamName must exist prior to starting telegraf. 304 | # streamname = "StreamName" 305 | # ## PartitionKey as used for sharding data. 306 | # partitionkey = "PartitionKey" 307 | # ## format of the Data payload in the kinesis PutRecord, supported 308 | # ## String and Custom. 309 | # format = "string" 310 | # ## debug will show upstream aws messages. 311 | # debug = false 312 | 313 | 314 | # # Configuration for Librato API to send metrics to. 315 | # [[outputs.librato]] 316 | # ## Librator API Docs 317 | # ## http://dev.librato.com/v1/metrics-authentication 318 | # ## Librato API user 319 | # api_user = "telegraf@influxdb.com" # required. 320 | # ## Librato API token 321 | # api_token = "my-secret-token" # required. 322 | # ## Debug 323 | # # debug = false 324 | # ## Connection timeout. 325 | # # timeout = "5s" 326 | # ## Output source Template (same as graphite buckets) 327 | # ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite 328 | # ## This template is used in librato's source (not metric's name) 329 | # template = "host" 330 | # 331 | 332 | 333 | # # Configuration for MQTT server to send metrics to 334 | # [[outputs.mqtt]] 335 | # servers = ["localhost:1883"] # required. 336 | # 337 | # ## MQTT outputs send metrics to this topic format 338 | # ## "///" 339 | # ## ex: prefix/web01.example.com/mem 340 | # topic_prefix = "telegraf" 341 | # 342 | # ## username and password to connect MQTT server. 343 | # # username = "telegraf" 344 | # # password = "metricsmetricsmetricsmetrics" 345 | # 346 | # ## Optional SSL Config 347 | # # ssl_ca = "/etc/telegraf/ca.pem" 348 | # # ssl_cert = "/etc/telegraf/cert.pem" 349 | # # ssl_key = "/etc/telegraf/key.pem" 350 | # ## Use SSL but skip chain & host verification 351 | # # insecure_skip_verify = false 352 | # 353 | # ## Data format to output. 354 | # ## Each data format has it's own unique set of configuration options, read 355 | # ## more about them here: 356 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md 357 | # data_format = "influx" 358 | 359 | 360 | # # Send telegraf measurements to NSQD 361 | # [[outputs.nsq]] 362 | # ## Location of nsqd instance listening on TCP 363 | # server = "localhost:4150" 364 | # ## NSQ topic for producer messages 365 | # topic = "telegraf" 366 | # 367 | # ## Data format to output. 368 | # ## Each data format has it's own unique set of configuration options, read 369 | # ## more about them here: 370 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md 371 | # data_format = "influx" 372 | 373 | 374 | # # Configuration for OpenTSDB server to send metrics to 375 | # [[outputs.opentsdb]] 376 | # ## prefix for metrics keys 377 | # prefix = "my.specific.prefix." 378 | # 379 | # ## Telnet Mode ## 380 | # ## DNS name of the OpenTSDB server in telnet mode 381 | # host = "opentsdb.example.com" 382 | # 383 | # ## Port of the OpenTSDB server in telnet mode 384 | # port = 4242 385 | # 386 | # ## Debug true - Prints OpenTSDB communication 387 | # debug = false 388 | 389 | 390 | # # Configuration for the Prometheus client to spawn 391 | # [[outputs.prometheus_client]] 392 | # ## Address to listen on 393 | # # listen = ":9126" 394 | 395 | 396 | # # Configuration for the Riemann server to send metrics to 397 | # [[outputs.riemann]] 398 | # ## URL of server 399 | # url = "localhost:5555" 400 | # ## transport protocol to use either tcp or udp 401 | # transport = "tcp" 402 | # ## separator to use between input name and field name in Riemann service name 403 | # separator = " " 404 | 405 | 406 | 407 | ############################################################################### 408 | # INPUT PLUGINS # 409 | ############################################################################### 410 | 411 | # Read metrics about cpu usage 412 | [[inputs.cpu]] 413 | ## Whether to report per-cpu stats or not 414 | percpu = true 415 | ## Whether to report total system cpu stats or not 416 | totalcpu = true 417 | ## Comment this line if you want the raw CPU time metrics 418 | fielddrop = ["time_*"] 419 | 420 | 421 | # Read metrics about disk usage by mount point 422 | [[inputs.disk]] 423 | ## By default, telegraf gather stats for all mountpoints. 424 | ## Setting mountpoints will restrict the stats to the specified mountpoints. 425 | # mount_points = ["/"] 426 | 427 | ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually 428 | ## present on /run, /var/run, /dev/shm or /dev). 429 | ignore_fs = ["tmpfs", "devtmpfs"] 430 | 431 | 432 | # Read metrics about disk IO by device 433 | [[inputs.diskio]] 434 | ## By default, telegraf will gather stats for all devices including 435 | ## disk partitions. 436 | ## Setting devices will restrict the stats to the specified devices. 437 | # devices = ["sda", "sdb"] 438 | ## Uncomment the following line if you need disk serial numbers. 439 | # skip_serial_number = false 440 | 441 | 442 | # Get kernel statistics from /proc/stat 443 | [[inputs.kernel]] 444 | # no configuration 445 | 446 | 447 | # Read metrics about memory usage 448 | [[inputs.mem]] 449 | # no configuration 450 | 451 | 452 | # Get the number of processes and group them by status 453 | [[inputs.processes]] 454 | # no configuration 455 | 456 | 457 | # Read metrics about swap memory usage 458 | [[inputs.swap]] 459 | # no configuration 460 | 461 | 462 | # Read metrics about system load & uptime 463 | [[inputs.system]] 464 | # no configuration 465 | 466 | 467 | # # Read stats from aerospike server(s) 468 | # [[inputs.aerospike]] 469 | # ## Aerospike servers to connect to (with port) 470 | # ## This plugin will query all namespaces the aerospike 471 | # ## server has configured and get stats for them. 472 | # servers = ["localhost:3000"] 473 | 474 | 475 | # # Read Apache status information (mod_status) 476 | # [[inputs.apache]] 477 | # ## An array of Apache status URI to gather stats. 478 | # ## Default is "http://localhost/server-status?auto". 479 | # urls = ["http://localhost/server-status?auto"] 480 | 481 | 482 | # # Read metrics of bcache from stats_total and dirty_data 483 | # [[inputs.bcache]] 484 | # ## Bcache sets path 485 | # ## If not specified, then default is: 486 | # bcachePath = "/sys/fs/bcache" 487 | # 488 | # ## By default, telegraf gather stats for all bcache devices 489 | # ## Setting devices will restrict the stats to the specified 490 | # ## bcache devices. 491 | # bcacheDevs = ["bcache0"] 492 | 493 | 494 | # # Read Cassandra metrics through Jolokia 495 | # [[inputs.cassandra]] 496 | # # This is the context root used to compose the jolokia url 497 | # context = "/jolokia/read" 498 | # ## List of cassandra servers exposing jolokia read service 499 | # servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"] 500 | # ## List of metrics collected on above servers 501 | # ## Each metric consists of a jmx path. 502 | # ## This will collect all heap memory usage metrics from the jvm and 503 | # ## ReadLatency metrics for all keyspaces and tables. 504 | # ## "type=Table" in the query works with Cassandra3.0. Older versions might 505 | # ## need to use "type=ColumnFamily" 506 | # metrics = [ 507 | # "/java.lang:type=Memory/HeapMemoryUsage", 508 | # "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency" 509 | # ] 510 | 511 | 512 | # # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster. 513 | # [[inputs.ceph]] 514 | # ## All configuration values are optional, defaults are shown below 515 | # 516 | # ## location of ceph binary 517 | # ceph_binary = "/usr/bin/ceph" 518 | # 519 | # ## directory in which to look for socket files 520 | # socket_dir = "/var/run/ceph" 521 | # 522 | # ## prefix of MON and OSD socket files, used to determine socket type 523 | # mon_prefix = "ceph-mon" 524 | # osd_prefix = "ceph-osd" 525 | # 526 | # ## suffix used to identify socket files 527 | # socket_suffix = "asok" 528 | 529 | 530 | # # Read specific statistics per cgroup 531 | # [[inputs.cgroup]] 532 | # ## Directories in which to look for files, globs are supported. 533 | # # paths = [ 534 | # # "/cgroup/memory", 535 | # # "/cgroup/memory/child1", 536 | # # "/cgroup/memory/child2/*", 537 | # # ] 538 | # ## cgroup stat fields, as file names, globs are supported. 539 | # ## these file names are appended to each path from above. 540 | # # files = ["memory.*usage*", "memory.limit_in_bytes"] 541 | 542 | 543 | # # Get standard chrony metrics, requires chronyc executable. 544 | # [[inputs.chrony]] 545 | # ## If true, chronyc tries to perform a DNS lookup for the time server. 546 | # # dns_lookup = false 547 | 548 | 549 | # # Pull Metric Statistics from Amazon CloudWatch 550 | # [[inputs.cloudwatch]] 551 | # ## Amazon Region 552 | # region = 'us-east-1' 553 | # 554 | # ## Amazon Credentials 555 | # ## Credentials are loaded in the following order 556 | # ## 1) Assumed credentials via STS if role_arn is specified 557 | # ## 2) explicit credentials from 'access_key' and 'secret_key' 558 | # ## 3) shared profile from 'profile' 559 | # ## 4) environment variables 560 | # ## 5) shared credentials file 561 | # ## 6) EC2 Instance Profile 562 | # #access_key = "" 563 | # #secret_key = "" 564 | # #token = "" 565 | # #role_arn = "" 566 | # #profile = "" 567 | # #shared_credential_file = "" 568 | # 569 | # ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s) 570 | # period = '1m' 571 | # 572 | # ## Collection Delay (required - must account for metrics availability via CloudWatch API) 573 | # delay = '1m' 574 | # 575 | # ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid 576 | # ## gaps or overlap in pulled data 577 | # interval = '1m' 578 | # 579 | # ## Configure the TTL for the internal cache of metrics. 580 | # ## Defaults to 1 hr if not specified 581 | # #cache_ttl = '10m' 582 | # 583 | # ## Metric Statistic Namespace (required) 584 | # namespace = 'AWS/ELB' 585 | # 586 | # ## Metrics to Pull (optional) 587 | # ## Defaults to all Metrics in Namespace if nothing is provided 588 | # ## Refreshes Namespace available metrics every 1h 589 | # #[[inputs.cloudwatch.metrics]] 590 | # # names = ['Latency', 'RequestCount'] 591 | # # 592 | # # ## Dimension filters for Metric (optional) 593 | # # [[inputs.cloudwatch.metrics.dimensions]] 594 | # # name = 'LoadBalancerName' 595 | # # value = 'p-example' 596 | 597 | 598 | # # Collects conntrack stats from the configured directories and files. 599 | # [[inputs.conntrack]] 600 | # ## The following defaults would work with multiple versions of conntrack. 601 | # ## Note the nf_ and ip_ filename prefixes are mutually exclusive across 602 | # ## kernel versions, as are the directory locations. 603 | # 604 | # ## Superset of filenames to look for within the conntrack dirs. 605 | # ## Missing files will be ignored. 606 | # files = ["ip_conntrack_count","ip_conntrack_max", 607 | # "nf_conntrack_count","nf_conntrack_max"] 608 | # 609 | # ## Directories to search within for the conntrack files above. 610 | # ## Missing directrories will be ignored. 611 | # dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"] 612 | 613 | 614 | # # Gather health check statuses from services registered in Consul 615 | # [[inputs.consul]] 616 | # ## Most of these values defaults to the one configured on a Consul's agent level. 617 | # ## Optional Consul server address (default: "localhost") 618 | # # address = "localhost" 619 | # ## Optional URI scheme for the Consul server (default: "http") 620 | # # scheme = "http" 621 | # ## Optional ACL token used in every request (default: "") 622 | # # token = "" 623 | # ## Optional username used for request HTTP Basic Authentication (default: "") 624 | # # username = "" 625 | # ## Optional password used for HTTP Basic Authentication (default: "") 626 | # # password = "" 627 | # ## Optional data centre to query the health checks from (default: "") 628 | # # datacentre = "" 629 | 630 | 631 | # # Read metrics from one or many couchbase clusters 632 | # [[inputs.couchbase]] 633 | # ## specify servers via a url matching: 634 | # ## [protocol://][:password]@address[:port] 635 | # ## e.g. 636 | # ## http://couchbase-0.example.com/ 637 | # ## http://admin:secret@couchbase-0.example.com:8091/ 638 | # ## 639 | # ## If no servers are specified, then localhost is used as the host. 640 | # ## If no protocol is specifed, HTTP is used. 641 | # ## If no port is specified, 8091 is used. 642 | # servers = ["http://localhost:8091"] 643 | 644 | 645 | # # Read CouchDB Stats from one or more servers 646 | # [[inputs.couchdb]] 647 | # ## Works with CouchDB stats endpoints out of the box 648 | # ## Multiple HOSTs from which to read CouchDB stats: 649 | # hosts = ["http://localhost:8086/_stats"] 650 | 651 | 652 | # # Read metrics from one or many disque servers 653 | # [[inputs.disque]] 654 | # ## An array of URI to gather stats about. Specify an ip or hostname 655 | # ## with optional port and password. 656 | # ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. 657 | # ## If no servers are specified, then localhost is used as the host. 658 | # servers = ["localhost"] 659 | 660 | 661 | # # Query given DNS server and gives statistics 662 | # [[inputs.dns_query]] 663 | # ## servers to query 664 | # servers = ["8.8.8.8"] # required 665 | # 666 | # ## Domains or subdomains to query. "."(root) is default 667 | # domains = ["."] # optional 668 | # 669 | # ## Query record type. Default is "A" 670 | # ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. 671 | # record_type = "A" # optional 672 | # 673 | # ## Dns server port. 53 is default 674 | # port = 53 # optional 675 | # 676 | # ## Query timeout in seconds. Default is 2 seconds 677 | # timeout = 2 # optional 678 | 679 | 680 | # # Read metrics about docker containers 681 | # [[inputs.docker]] 682 | # ## Docker Endpoint 683 | # ## To use TCP, set endpoint = "tcp://[ip]:[port]" 684 | # ## To use environment variables (ie, docker-machine), set endpoint = "ENV" 685 | # endpoint = "unix:///var/run/docker.sock" 686 | # ## Only collect metrics for these containers, collect all if empty 687 | # container_names = [] 688 | # ## Timeout for docker list, info, and stats commands 689 | # timeout = "5s" 690 | # 691 | # ## Whether to report for each container per-device blkio (8:0, 8:1...) and 692 | # ## network (eth0, eth1, ...) stats or not 693 | # perdevice = true 694 | # ## Whether to report for each container total blkio and network stats or not 695 | # total = false 696 | # 697 | 698 | 699 | # # Read statistics from one or many dovecot servers 700 | # [[inputs.dovecot]] 701 | # ## specify dovecot servers via an address:port list 702 | # ## e.g. 703 | # ## localhost:24242 704 | # ## 705 | # ## If no servers are specified, then localhost is used as the host. 706 | # servers = ["localhost:24242"] 707 | # ## Type is one of "user", "domain", "ip", or "global" 708 | # type = "global" 709 | # ## Wildcard matches like "*.com". An empty string "" is same as "*" 710 | # ## If type = "ip" filters should be 711 | # filters = [""] 712 | 713 | 714 | # # Read stats from one or more Elasticsearch servers or clusters 715 | # [[inputs.elasticsearch]] 716 | # ## specify a list of one or more Elasticsearch servers 717 | # servers = ["http://localhost:9200"] 718 | # 719 | # ## set local to false when you want to read the indices stats from all nodes 720 | # ## within the cluster 721 | # local = true 722 | # 723 | # ## set cluster_health to true when you want to also obtain cluster level stats 724 | # cluster_health = false 725 | # 726 | # ## Optional SSL Config 727 | # # ssl_ca = "/etc/telegraf/ca.pem" 728 | # # ssl_cert = "/etc/telegraf/cert.pem" 729 | # # ssl_key = "/etc/telegraf/key.pem" 730 | # ## Use SSL but skip chain & host verification 731 | # # insecure_skip_verify = false 732 | 733 | 734 | # # Read metrics from one or more commands that can output to stdout 735 | # [[inputs.exec]] 736 | # ## Commands array 737 | # commands = [ 738 | # "/tmp/test.sh", 739 | # "/usr/bin/mycollector --foo=bar", 740 | # "/tmp/collect_*.sh" 741 | # ] 742 | # 743 | # ## Timeout for each command to complete. 744 | # timeout = "5s" 745 | # 746 | # ## measurement name suffix (for separating different commands) 747 | # name_suffix = "_mycollector" 748 | # 749 | # ## Data format to consume. 750 | # ## Each data format has it's own unique set of configuration options, read 751 | # ## more about them here: 752 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md 753 | # data_format = "influx" 754 | 755 | 756 | # # Read stats about given file(s) 757 | # [[inputs.filestat]] 758 | # ## Files to gather stats about. 759 | # ## These accept standard unix glob matching rules, but with the addition of 760 | # ## ** as a "super asterisk". ie: 761 | # ## "/var/log/**.log" -> recursively find all .log files in /var/log 762 | # ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log 763 | # ## "/var/log/apache.log" -> just tail the apache log file 764 | # ## 765 | # ## See https://github.com/gobwas/glob for more examples 766 | # ## 767 | # files = ["/var/log/**.log"] 768 | # ## If true, read the entire file and calculate an md5 checksum. 769 | # md5 = false 770 | 771 | 772 | # # Read flattened metrics from one or more GrayLog HTTP endpoints 773 | # [[inputs.graylog]] 774 | # ## API endpoint, currently supported API: 775 | # ## 776 | # ## - multiple (Ex http://:12900/system/metrics/multiple) 777 | # ## - namespace (Ex http://:12900/system/metrics/namespace/{namespace}) 778 | # ## 779 | # ## For namespace endpoint, the metrics array will be ignored for that call. 780 | # ## Endpoint can contain namespace and multiple type calls. 781 | # ## 782 | # ## Please check http://[graylog-server-ip]:12900/api-browser for full list 783 | # ## of endpoints 784 | # servers = [ 785 | # "http://[graylog-server-ip]:12900/system/metrics/multiple", 786 | # ] 787 | # 788 | # ## Metrics list 789 | # ## List of metrics can be found on Graylog webservice documentation. 790 | # ## Or by hitting the the web service api at: 791 | # ## http://[graylog-host]:12900/system/metrics 792 | # metrics = [ 793 | # "jvm.cl.loaded", 794 | # "jvm.memory.pools.Metaspace.committed" 795 | # ] 796 | # 797 | # ## Username and password 798 | # username = "" 799 | # password = "" 800 | # 801 | # ## Optional SSL Config 802 | # # ssl_ca = "/etc/telegraf/ca.pem" 803 | # # ssl_cert = "/etc/telegraf/cert.pem" 804 | # # ssl_key = "/etc/telegraf/key.pem" 805 | # ## Use SSL but skip chain & host verification 806 | # # insecure_skip_verify = false 807 | 808 | 809 | # # Read metrics of haproxy, via socket or csv stats page 810 | # [[inputs.haproxy]] 811 | # ## An array of address to gather stats about. Specify an ip on hostname 812 | # ## with optional port. ie localhost, 10.10.3.33:1936, etc. 813 | # ## Make sure you specify the complete path to the stats endpoint 814 | # ## ie 10.10.3.33:1936/haproxy?stats 815 | # # 816 | # ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats 817 | # servers = ["http://myhaproxy.com:1936/haproxy?stats"] 818 | # ## Or you can also use local socket 819 | # ## servers = ["socket:/run/haproxy/admin.sock"] 820 | 821 | 822 | # # Monitor disks' temperatures using hddtemp 823 | # [[inputs.hddtemp]] 824 | # ## By default, telegraf gathers temps data from all disks detected by the 825 | # ## hddtemp. 826 | # ## 827 | # ## Only collect temps from the selected disks. 828 | # ## 829 | # ## A * as the device name will return the temperature values of all disks. 830 | # ## 831 | # # address = "127.0.0.1:7634" 832 | # # devices = ["sda", "*"] 833 | 834 | 835 | # # HTTP/HTTPS request given an address a method and a timeout 836 | # [[inputs.http_response]] 837 | # ## Server address (default http://localhost) 838 | # address = "http://github.com" 839 | # ## Set response_timeout (default 5 seconds) 840 | # response_timeout = "5s" 841 | # ## HTTP Request Method 842 | # method = "GET" 843 | # ## Whether to follow redirects from the server (defaults to false) 844 | # follow_redirects = true 845 | # ## HTTP Request Headers (all values must be strings) 846 | # # [inputs.http_response.headers] 847 | # # Host = "github.com" 848 | # ## Optional HTTP Request Body 849 | # # body = ''' 850 | # # {'fake':'data'} 851 | # # ''' 852 | # 853 | # ## Optional SSL Config 854 | # # ssl_ca = "/etc/telegraf/ca.pem" 855 | # # ssl_cert = "/etc/telegraf/cert.pem" 856 | # # ssl_key = "/etc/telegraf/key.pem" 857 | # ## Use SSL but skip chain & host verification 858 | # # insecure_skip_verify = false 859 | 860 | 861 | # # Read flattened metrics from one or more JSON HTTP endpoints 862 | # [[inputs.httpjson]] 863 | # ## NOTE This plugin only reads numerical measurements, strings and booleans 864 | # ## will be ignored. 865 | # 866 | # ## a name for the service being polled 867 | # name = "webserver_stats" 868 | # 869 | # ## URL of each server in the service's cluster 870 | # servers = [ 871 | # "http://localhost:9999/stats/", 872 | # "http://localhost:9998/stats/", 873 | # ] 874 | # 875 | # ## HTTP method to use: GET or POST (case-sensitive) 876 | # method = "GET" 877 | # 878 | # ## List of tag names to extract from top-level of JSON server response 879 | # # tag_keys = [ 880 | # # "my_tag_1", 881 | # # "my_tag_2" 882 | # # ] 883 | # 884 | # ## HTTP parameters (all values must be strings) 885 | # [inputs.httpjson.parameters] 886 | # event_type = "cpu_spike" 887 | # threshold = "0.75" 888 | # 889 | # ## HTTP Header parameters (all values must be strings) 890 | # # [inputs.httpjson.headers] 891 | # # X-Auth-Token = "my-xauth-token" 892 | # # apiVersion = "v1" 893 | # 894 | # ## Optional SSL Config 895 | # # ssl_ca = "/etc/telegraf/ca.pem" 896 | # # ssl_cert = "/etc/telegraf/cert.pem" 897 | # # ssl_key = "/etc/telegraf/key.pem" 898 | # ## Use SSL but skip chain & host verification 899 | # # insecure_skip_verify = false 900 | 901 | 902 | # # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints 903 | # [[inputs.influxdb]] 904 | # ## Works with InfluxDB debug endpoints out of the box, 905 | # ## but other services can use this format too. 906 | # ## See the influxdb plugin's README for more details. 907 | # 908 | # ## Multiple URLs from which to read InfluxDB-formatted JSON 909 | # ## Default is "http://localhost:8086/debug/vars". 910 | # urls = [ 911 | # "http://localhost:8086/debug/vars" 912 | # ] 913 | 914 | 915 | # # Read metrics from one or many bare metal servers 916 | # [[inputs.ipmi_sensor]] 917 | # ## specify servers via a url matching: 918 | # ## [username[:password]@][protocol[(address)]] 919 | # ## e.g. 920 | # ## root:passwd@lan(127.0.0.1) 921 | # ## 922 | # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] 923 | 924 | 925 | # # Read JMX metrics through Jolokia 926 | # [[inputs.jolokia]] 927 | # ## This is the context root used to compose the jolokia url 928 | # context = "/jolokia" 929 | # 930 | # ## This specifies the mode used 931 | # # mode = "proxy" 932 | # # 933 | # ## When in proxy mode this section is used to specify further 934 | # ## proxy address configurations. 935 | # ## Remember to change host address to fit your environment. 936 | # # [inputs.jolokia.proxy] 937 | # # host = "127.0.0.1" 938 | # # port = "8080" 939 | # 940 | # 941 | # ## List of servers exposing jolokia read service 942 | # [[inputs.jolokia.servers]] 943 | # name = "as-server-01" 944 | # host = "127.0.0.1" 945 | # port = "8080" 946 | # # username = "myuser" 947 | # # password = "mypassword" 948 | # 949 | # ## List of metrics collected on above servers 950 | # ## Each metric consists in a name, a jmx path and either 951 | # ## a pass or drop slice attribute. 952 | # ## This collect all heap memory usage metrics. 953 | # [[inputs.jolokia.metrics]] 954 | # name = "heap_memory_usage" 955 | # mbean = "java.lang:type=Memory" 956 | # attribute = "HeapMemoryUsage" 957 | # 958 | # ## This collect thread counts metrics. 959 | # [[inputs.jolokia.metrics]] 960 | # name = "thread_count" 961 | # mbean = "java.lang:type=Threading" 962 | # attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount" 963 | # 964 | # ## This collect number of class loaded/unloaded counts metrics. 965 | # [[inputs.jolokia.metrics]] 966 | # name = "class_count" 967 | # mbean = "java.lang:type=ClassLoading" 968 | # attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount" 969 | 970 | 971 | # # Get kernel statistics from /proc/vmstat 972 | # [[inputs.kernel_vmstat]] 973 | # # no configuration 974 | 975 | 976 | # # Read metrics from a LeoFS Server via SNMP 977 | # [[inputs.leofs]] 978 | # ## An array of URI to gather stats about LeoFS. 979 | # ## Specify an ip or hostname with port. ie 127.0.0.1:4020 980 | # servers = ["127.0.0.1:4021"] 981 | 982 | 983 | # # Read metrics from local Lustre service on OST, MDS 984 | # [[inputs.lustre2]] 985 | # ## An array of /proc globs to search for Lustre stats 986 | # ## If not specified, the default will work on Lustre 2.5.x 987 | # ## 988 | # # ost_procfiles = [ 989 | # # "/proc/fs/lustre/obdfilter/*/stats", 990 | # # "/proc/fs/lustre/osd-ldiskfs/*/stats", 991 | # # "/proc/fs/lustre/obdfilter/*/job_stats", 992 | # # ] 993 | # # mds_procfiles = [ 994 | # # "/proc/fs/lustre/mdt/*/md_stats", 995 | # # "/proc/fs/lustre/mdt/*/job_stats", 996 | # # ] 997 | 998 | 999 | # # Gathers metrics from the /3.0/reports MailChimp API 1000 | # [[inputs.mailchimp]] 1001 | # ## MailChimp API key 1002 | # ## get from https://admin.mailchimp.com/account/api/ 1003 | # api_key = "" # required 1004 | # ## Reports for campaigns sent more than days_old ago will not be collected. 1005 | # ## 0 means collect all. 1006 | # days_old = 0 1007 | # ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old 1008 | # # campaign_id = "" 1009 | 1010 | 1011 | # # Read metrics from one or many memcached servers 1012 | # [[inputs.memcached]] 1013 | # ## An array of address to gather stats about. Specify an ip on hostname 1014 | # ## with optional port. ie localhost, 10.0.0.1:11211, etc. 1015 | # servers = ["localhost:11211"] 1016 | # # unix_sockets = ["/var/run/memcached.sock"] 1017 | 1018 | 1019 | # # Telegraf plugin for gathering metrics from N Mesos masters 1020 | # [[inputs.mesos]] 1021 | # ## Timeout, in ms. 1022 | # timeout = 100 1023 | # ## A list of Mesos masters. 1024 | # masters = ["localhost:5050"] 1025 | # ## Master metrics groups to be collected, by default, all enabled. 1026 | # master_collections = [ 1027 | # "resources", 1028 | # "master", 1029 | # "system", 1030 | # "agents", 1031 | # "frameworks", 1032 | # "tasks", 1033 | # "messages", 1034 | # "evqueue", 1035 | # "registrar", 1036 | # ] 1037 | # ## A list of Mesos slaves, default is [] 1038 | # # slaves = [] 1039 | # ## Slave metrics groups to be collected, by default, all enabled. 1040 | # # slave_collections = [ 1041 | # # "resources", 1042 | # # "agent", 1043 | # # "system", 1044 | # # "executors", 1045 | # # "tasks", 1046 | # # "messages", 1047 | # # ] 1048 | # ## Include mesos tasks statistics, default is false 1049 | # # slave_tasks = true 1050 | 1051 | 1052 | # # Read metrics from one or many MongoDB servers 1053 | # [[inputs.mongodb]] 1054 | # ## An array of URI to gather stats about. Specify an ip or hostname 1055 | # ## with optional port add password. ie, 1056 | # ## mongodb://user:auth_key@10.10.3.30:27017, 1057 | # ## mongodb://10.10.3.33:18832, 1058 | # ## 10.0.0.1:10000, etc. 1059 | # servers = ["127.0.0.1:27017"] 1060 | # gather_perdb_stats = false 1061 | 1062 | 1063 | # # Read metrics from one or many mysql servers 1064 | # [[inputs.mysql]] 1065 | # ## specify servers via a url matching: 1066 | # ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] 1067 | # ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name 1068 | # ## e.g. 1069 | # ## db_user:passwd@tcp(127.0.0.1:3306)/?tls=false 1070 | # ## db_user@tcp(127.0.0.1:3306)/?tls=false 1071 | # # 1072 | # ## If no servers are specified, then localhost is used as the host. 1073 | # servers = ["tcp(127.0.0.1:3306)/"] 1074 | # ## the limits for metrics form perf_events_statements 1075 | # perf_events_statements_digest_text_limit = 120 1076 | # perf_events_statements_limit = 250 1077 | # perf_events_statements_time_limit = 86400 1078 | # # 1079 | # ## if the list is empty, then metrics are gathered from all databasee tables 1080 | # table_schema_databases = [] 1081 | # # 1082 | # ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list 1083 | # gather_table_schema = false 1084 | # # 1085 | # ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST 1086 | # gather_process_list = true 1087 | # # 1088 | # ## gather auto_increment columns and max values from information schema 1089 | # gather_info_schema_auto_inc = true 1090 | # # 1091 | # ## gather metrics from SHOW SLAVE STATUS command output 1092 | # gather_slave_status = true 1093 | # # 1094 | # ## gather metrics from SHOW BINARY LOGS command output 1095 | # gather_binary_logs = false 1096 | # # 1097 | # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_TABLE 1098 | # gather_table_io_waits = false 1099 | # # 1100 | # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS 1101 | # gather_table_lock_waits = false 1102 | # # 1103 | # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_INDEX_USAGE 1104 | # gather_index_io_waits = false 1105 | # # 1106 | # ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS 1107 | # gather_event_waits = false 1108 | # # 1109 | # ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME 1110 | # gather_file_events_stats = false 1111 | # # 1112 | # ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST 1113 | # gather_perf_events_statements = false 1114 | # # 1115 | # ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) 1116 | # interval_slow = "30m" 1117 | 1118 | 1119 | # # Read metrics about network interface usage 1120 | # [[inputs.net]] 1121 | # ## By default, telegraf gathers stats from any up interface (excluding loopback) 1122 | # ## Setting interfaces will tell it to gather these explicit interfaces, 1123 | # ## regardless of status. 1124 | # ## 1125 | # # interfaces = ["eth0"] 1126 | 1127 | 1128 | # # TCP or UDP 'ping' given url and collect response time in seconds 1129 | # [[inputs.net_response]] 1130 | # ## Protocol, must be "tcp" or "udp" 1131 | # protocol = "tcp" 1132 | # ## Server address (default localhost) 1133 | # address = "github.com:80" 1134 | # ## Set timeout 1135 | # timeout = "1s" 1136 | # 1137 | # ## Optional string sent to the server 1138 | # # send = "ssh" 1139 | # ## Optional expected string in answer 1140 | # # expect = "ssh" 1141 | # ## Set read timeout (only used if expecting a response) 1142 | # read_timeout = "1s" 1143 | 1144 | 1145 | # # Read TCP metrics such as established, time wait and sockets counts. 1146 | # [[inputs.netstat]] 1147 | # # no configuration 1148 | 1149 | 1150 | # # Read Nginx's basic status information (ngx_http_stub_status_module) 1151 | # [[inputs.nginx]] 1152 | # ## An array of Nginx stub_status URI to gather stats. 1153 | # urls = ["http://localhost/status"] 1154 | 1155 | 1156 | # # Read NSQ topic and channel statistics. 1157 | # [[inputs.nsq]] 1158 | # ## An array of NSQD HTTP API endpoints 1159 | # endpoints = ["http://localhost:4151"] 1160 | 1161 | 1162 | # # Collect kernel snmp counters and network interface statistics 1163 | # [[inputs.nstat]] 1164 | # ## file paths for proc files. If empty default paths will be used: 1165 | # ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 1166 | # ## These can also be overridden with env variables, see README. 1167 | # proc_net_netstat = "/proc/net/netstat" 1168 | # proc_net_snmp = "/proc/net/snmp" 1169 | # proc_net_snmp6 = "/proc/net/snmp6" 1170 | # ## dump metrics with 0 values too 1171 | # dump_zeros = true 1172 | 1173 | 1174 | # # Get standard NTP query metrics, requires ntpq executable. 1175 | # [[inputs.ntpq]] 1176 | # ## If false, set the -n ntpq flag. Can reduce metric gather time. 1177 | # dns_lookup = true 1178 | 1179 | 1180 | # # Read metrics of passenger using passenger-status 1181 | # [[inputs.passenger]] 1182 | # ## Path of passenger-status. 1183 | # ## 1184 | # ## Plugin gather metric via parsing XML output of passenger-status 1185 | # ## More information about the tool: 1186 | # ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html 1187 | # ## 1188 | # ## If no path is specified, then the plugin simply execute passenger-status 1189 | # ## hopefully it can be found in your PATH 1190 | # command = "passenger-status -v --show=xml" 1191 | 1192 | 1193 | # # Read metrics of phpfpm, via HTTP status page or socket 1194 | # [[inputs.phpfpm]] 1195 | # ## An array of addresses to gather stats about. Specify an ip or hostname 1196 | # ## with optional port and path 1197 | # ## 1198 | # ## Plugin can be configured in three modes (either can be used): 1199 | # ## - http: the URL must start with http:// or https://, ie: 1200 | # ## "http://localhost/status" 1201 | # ## "http://192.168.130.1/status?full" 1202 | # ## 1203 | # ## - unixsocket: path to fpm socket, ie: 1204 | # ## "/var/run/php5-fpm.sock" 1205 | # ## or using a custom fpm status path: 1206 | # ## "/var/run/php5-fpm.sock:fpm-custom-status-path" 1207 | # ## 1208 | # ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: 1209 | # ## "fcgi://10.0.0.12:9000/status" 1210 | # ## "cgi://10.0.10.12:9001/status" 1211 | # ## 1212 | # ## Example of multiple gathering from local socket and remove host 1213 | # ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] 1214 | # urls = ["http://localhost/status"] 1215 | 1216 | 1217 | # # Ping given url(s) and return statistics 1218 | # [[inputs.ping]] 1219 | # ## NOTE: this plugin forks the ping command. You may need to set capabilities 1220 | # ## via setcap cap_net_raw+p /bin/ping 1221 | # # 1222 | # ## urls to ping 1223 | # urls = ["www.google.com"] # required 1224 | # ## number of pings to send per collection (ping -c ) 1225 | # count = 1 # required 1226 | # ## interval, in s, at which to ping. 0 == default (ping -i ) 1227 | # ping_interval = 0.0 1228 | # ## per-ping timeout, in s. 0 == no timeout (ping -W ) 1229 | # timeout = 1.0 1230 | # ## interface to send ping from (ping -I ) 1231 | # interface = "" 1232 | 1233 | 1234 | # # Read metrics from one or many postgresql servers 1235 | # [[inputs.postgresql]] 1236 | # ## specify address via a url matching: 1237 | # ## postgres://[pqgotest[:password]]@localhost[/dbname]\ 1238 | # ## ?sslmode=[disable|verify-ca|verify-full] 1239 | # ## or a simple string: 1240 | # ## host=localhost user=pqotest password=... sslmode=... dbname=app_production 1241 | # ## 1242 | # ## All connection parameters are optional. 1243 | # ## 1244 | # ## Without the dbname parameter, the driver will default to a database 1245 | # ## with the same name as the user. This dbname is just for instantiating a 1246 | # ## connection with the server and doesn't restrict the databases we are trying 1247 | # ## to grab metrics for. 1248 | # ## 1249 | # address = "host=localhost user=postgres sslmode=disable" 1250 | # 1251 | # ## A list of databases to pull metrics about. If not specified, metrics for all 1252 | # ## databases are gathered. 1253 | # # databases = ["app_production", "testing"] 1254 | 1255 | 1256 | # # Read metrics from one or many postgresql servers 1257 | # [[inputs.postgresql_extensible]] 1258 | # ## specify address via a url matching: 1259 | # ## postgres://[pqgotest[:password]]@localhost[/dbname]\ 1260 | # ## ?sslmode=[disable|verify-ca|verify-full] 1261 | # ## or a simple string: 1262 | # ## host=localhost user=pqotest password=... sslmode=... dbname=app_production 1263 | # # 1264 | # ## All connection parameters are optional. # 1265 | # ## Without the dbname parameter, the driver will default to a database 1266 | # ## with the same name as the user. This dbname is just for instantiating a 1267 | # ## connection with the server and doesn't restrict the databases we are trying 1268 | # ## to grab metrics for. 1269 | # # 1270 | # address = "host=localhost user=postgres sslmode=disable" 1271 | # ## A list of databases to pull metrics about. If not specified, metrics for all 1272 | # ## databases are gathered. 1273 | # ## databases = ["app_production", "testing"] 1274 | # # 1275 | # # outputaddress = "db01" 1276 | # ## A custom name for the database that will be used as the "server" tag in the 1277 | # ## measurement output. If not specified, a default one generated from 1278 | # ## the connection address is used. 1279 | # # 1280 | # ## Define the toml config where the sql queries are stored 1281 | # ## New queries can be added, if the withdbname is set to true and there is no 1282 | # ## databases defined in the 'databases field', the sql query is ended by a 1283 | # ## 'is not null' in order to make the query succeed. 1284 | # ## Example : 1285 | # ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become 1286 | # ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" 1287 | # ## because the databases variable was set to ['postgres', 'pgbench' ] and the 1288 | # ## withdbname was true. Be careful that if the withdbname is set to false you 1289 | # ## don't have to define the where clause (aka with the dbname) the tagvalue 1290 | # ## field is used to define custom tags (separated by commas) 1291 | # ## The optional "measurement" value can be used to override the default 1292 | # ## output measurement name ("postgresql"). 1293 | # # 1294 | # ## Structure : 1295 | # ## [[inputs.postgresql_extensible.query]] 1296 | # ## sqlquery string 1297 | # ## version string 1298 | # ## withdbname boolean 1299 | # ## tagvalue string (comma separated) 1300 | # ## measurement string 1301 | # [[inputs.postgresql_extensible.query]] 1302 | # sqlquery="SELECT * FROM pg_stat_database" 1303 | # version=901 1304 | # withdbname=false 1305 | # tagvalue="" 1306 | # measurement="" 1307 | # [[inputs.postgresql_extensible.query]] 1308 | # sqlquery="SELECT * FROM pg_stat_bgwriter" 1309 | # version=901 1310 | # withdbname=false 1311 | # tagvalue="postgresql.stats" 1312 | 1313 | 1314 | # # Read metrics from one or many PowerDNS servers 1315 | # [[inputs.powerdns]] 1316 | # ## An array of sockets to gather stats about. 1317 | # ## Specify a path to unix socket. 1318 | # unix_sockets = ["/var/run/pdns.controlsocket"] 1319 | 1320 | 1321 | # # Monitor process cpu and memory usage 1322 | # [[inputs.procstat]] 1323 | # ## Must specify one of: pid_file, exe, or pattern 1324 | # ## PID file to monitor process 1325 | # pid_file = "/var/run/nginx.pid" 1326 | # ## executable name (ie, pgrep ) 1327 | # # exe = "nginx" 1328 | # ## pattern as argument for pgrep (ie, pgrep -f ) 1329 | # # pattern = "nginx" 1330 | # ## user as argument for pgrep (ie, pgrep -u ) 1331 | # # user = "nginx" 1332 | # 1333 | # ## override for process_name 1334 | # ## This is optional; default is sourced from /proc//status 1335 | # # process_name = "bar" 1336 | # ## Field name prefix 1337 | # prefix = "" 1338 | # ## comment this out if you want raw cpu_time stats 1339 | # fielddrop = ["cpu_time_*"] 1340 | 1341 | 1342 | # # Read metrics from one or many prometheus clients 1343 | # [[inputs.prometheus]] 1344 | # ## An array of urls to scrape metrics from. 1345 | # urls = ["http://localhost:9100/metrics"] 1346 | # 1347 | # ## Use bearer token for authorization 1348 | # # bearer_token = /path/to/bearer/token 1349 | # 1350 | # ## Optional SSL Config 1351 | # # ssl_ca = /path/to/cafile 1352 | # # ssl_cert = /path/to/certfile 1353 | # # ssl_key = /path/to/keyfile 1354 | # ## Use SSL but skip chain & host verification 1355 | # # insecure_skip_verify = false 1356 | 1357 | 1358 | # # Reads last_run_summary.yaml file and converts to measurments 1359 | # [[inputs.puppetagent]] 1360 | # ## Location of puppet last run summary file 1361 | # location = "/var/lib/puppet/state/last_run_summary.yaml" 1362 | 1363 | 1364 | # # Read metrics from one or many RabbitMQ servers via the management API 1365 | # [[inputs.rabbitmq]] 1366 | # # url = "http://localhost:15672" 1367 | # # name = "rmq-server-1" # optional tag 1368 | # # username = "guest" 1369 | # # password = "guest" 1370 | # 1371 | # ## Optional SSL Config 1372 | # # ssl_ca = "/etc/telegraf/ca.pem" 1373 | # # ssl_cert = "/etc/telegraf/cert.pem" 1374 | # # ssl_key = "/etc/telegraf/key.pem" 1375 | # ## Use SSL but skip chain & host verification 1376 | # # insecure_skip_verify = false 1377 | # 1378 | # ## A list of nodes to pull metrics about. If not specified, metrics for 1379 | # ## all nodes are gathered. 1380 | # # nodes = ["rabbit@node1", "rabbit@node2"] 1381 | 1382 | 1383 | # # Read raindrops stats (raindrops - real-time stats for preforking Rack servers) 1384 | # [[inputs.raindrops]] 1385 | # ## An array of raindrops middleware URI to gather stats. 1386 | # urls = ["http://localhost:8080/_raindrops"] 1387 | 1388 | 1389 | # # Read metrics from one or many redis servers 1390 | # [[inputs.redis]] 1391 | # ## specify servers via a url matching: 1392 | # ## [protocol://][:password]@address[:port] 1393 | # ## e.g. 1394 | # ## tcp://localhost:6379 1395 | # ## tcp://:password@192.168.99.100 1396 | # ## unix:///var/run/redis.sock 1397 | # ## 1398 | # ## If no servers are specified, then localhost is used as the host. 1399 | # ## If no port is specified, 6379 is used 1400 | # servers = ["tcp://localhost:6379"] 1401 | 1402 | 1403 | # # Read metrics from one or many RethinkDB servers 1404 | # [[inputs.rethinkdb]] 1405 | # ## An array of URI to gather stats about. Specify an ip or hostname 1406 | # ## with optional port add password. ie, 1407 | # ## rethinkdb://user:auth_key@10.10.3.30:28105, 1408 | # ## rethinkdb://10.10.3.33:18832, 1409 | # ## 10.0.0.1:10000, etc. 1410 | # servers = ["127.0.0.1:28015"] 1411 | 1412 | 1413 | # # Read metrics one or many Riak servers 1414 | # [[inputs.riak]] 1415 | # # Specify a list of one or more riak http servers 1416 | # servers = ["http://localhost:8098"] 1417 | 1418 | 1419 | # # Monitor sensors, requires lm-sensors package 1420 | # [[inputs.sensors]] 1421 | # ## Remove numbers from field names. 1422 | # ## If true, a field name like 'temp1_input' will be changed to 'temp_input'. 1423 | # # remove_numbers = true 1424 | 1425 | 1426 | # # Retrieves SNMP values from remote agents 1427 | # [[inputs.snmp]] 1428 | # agents = [ "127.0.0.1:161" ] 1429 | # timeout = "5s" 1430 | # version = 2 1431 | # 1432 | # # SNMPv1 & SNMPv2 parameters 1433 | # community = "public" 1434 | # 1435 | # # SNMPv2 & SNMPv3 parameters 1436 | # max_repetitions = 50 1437 | # 1438 | # # SNMPv3 parameters 1439 | # #sec_name = "myuser" 1440 | # #auth_protocol = "md5" # Values: "MD5", "SHA", "" 1441 | # #auth_password = "password123" 1442 | # #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv" 1443 | # #context_name = "" 1444 | # #priv_protocol = "" # Values: "DES", "AES", "" 1445 | # #priv_password = "" 1446 | # 1447 | # # measurement name 1448 | # name = "system" 1449 | # [[inputs.snmp.field]] 1450 | # name = "hostname" 1451 | # oid = ".1.0.0.1.1" 1452 | # [[inputs.snmp.field]] 1453 | # name = "uptime" 1454 | # oid = ".1.0.0.1.2" 1455 | # [[inputs.snmp.field]] 1456 | # name = "load" 1457 | # oid = ".1.0.0.1.3" 1458 | # [[inputs.snmp.field]] 1459 | # oid = "HOST-RESOURCES-MIB::hrMemorySize" 1460 | # 1461 | # [[inputs.snmp.table]] 1462 | # # measurement name 1463 | # name = "remote_servers" 1464 | # inherit_tags = [ "hostname" ] 1465 | # [[inputs.snmp.table.field]] 1466 | # name = "server" 1467 | # oid = ".1.0.0.0.1.0" 1468 | # is_tag = true 1469 | # [[inputs.snmp.table.field]] 1470 | # name = "connections" 1471 | # oid = ".1.0.0.0.1.1" 1472 | # [[inputs.snmp.table.field]] 1473 | # name = "latency" 1474 | # oid = ".1.0.0.0.1.2" 1475 | # 1476 | # [[inputs.snmp.table]] 1477 | # # auto populate table's fields using the MIB 1478 | # oid = "HOST-RESOURCES-MIB::hrNetworkTable" 1479 | 1480 | 1481 | # # DEPRECATED! PLEASE USE inputs.snmp INSTEAD. 1482 | # [[inputs.snmp_legacy]] 1483 | # ## Use 'oids.txt' file to translate oids to names 1484 | # ## To generate 'oids.txt' you need to run: 1485 | # ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt 1486 | # ## Or if you have an other MIB folder with custom MIBs 1487 | # ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt 1488 | # snmptranslate_file = "/tmp/oids.txt" 1489 | # [[inputs.snmp.host]] 1490 | # address = "192.168.2.2:161" 1491 | # # SNMP community 1492 | # community = "public" # default public 1493 | # # SNMP version (1, 2 or 3) 1494 | # # Version 3 not supported yet 1495 | # version = 2 # default 2 1496 | # # SNMP response timeout 1497 | # timeout = 2.0 # default 2.0 1498 | # # SNMP request retries 1499 | # retries = 2 # default 2 1500 | # # Which get/bulk do you want to collect for this host 1501 | # collect = ["mybulk", "sysservices", "sysdescr"] 1502 | # # Simple list of OIDs to get, in addition to "collect" 1503 | # get_oids = [] 1504 | # 1505 | # [[inputs.snmp.host]] 1506 | # address = "192.168.2.3:161" 1507 | # community = "public" 1508 | # version = 2 1509 | # timeout = 2.0 1510 | # retries = 2 1511 | # collect = ["mybulk"] 1512 | # get_oids = [ 1513 | # "ifNumber", 1514 | # ".1.3.6.1.2.1.1.3.0", 1515 | # ] 1516 | # 1517 | # [[inputs.snmp.get]] 1518 | # name = "ifnumber" 1519 | # oid = "ifNumber" 1520 | # 1521 | # [[inputs.snmp.get]] 1522 | # name = "interface_speed" 1523 | # oid = "ifSpeed" 1524 | # instance = "0" 1525 | # 1526 | # [[inputs.snmp.get]] 1527 | # name = "sysuptime" 1528 | # oid = ".1.3.6.1.2.1.1.3.0" 1529 | # unit = "second" 1530 | # 1531 | # [[inputs.snmp.bulk]] 1532 | # name = "mybulk" 1533 | # max_repetition = 127 1534 | # oid = ".1.3.6.1.2.1.1" 1535 | # 1536 | # [[inputs.snmp.bulk]] 1537 | # name = "ifoutoctets" 1538 | # max_repetition = 127 1539 | # oid = "ifOutOctets" 1540 | # 1541 | # [[inputs.snmp.host]] 1542 | # address = "192.168.2.13:161" 1543 | # #address = "127.0.0.1:161" 1544 | # community = "public" 1545 | # version = 2 1546 | # timeout = 2.0 1547 | # retries = 2 1548 | # #collect = ["mybulk", "sysservices", "sysdescr", "systype"] 1549 | # collect = ["sysuptime" ] 1550 | # [[inputs.snmp.host.table]] 1551 | # name = "iftable3" 1552 | # include_instances = ["enp5s0", "eth1"] 1553 | # 1554 | # # SNMP TABLEs 1555 | # # table without mapping neither subtables 1556 | # [[inputs.snmp.table]] 1557 | # name = "iftable1" 1558 | # oid = ".1.3.6.1.2.1.31.1.1.1" 1559 | # 1560 | # # table without mapping but with subtables 1561 | # [[inputs.snmp.table]] 1562 | # name = "iftable2" 1563 | # oid = ".1.3.6.1.2.1.31.1.1.1" 1564 | # sub_tables = [".1.3.6.1.2.1.2.2.1.13"] 1565 | # 1566 | # # table with mapping but without subtables 1567 | # [[inputs.snmp.table]] 1568 | # name = "iftable3" 1569 | # oid = ".1.3.6.1.2.1.31.1.1.1" 1570 | # # if empty. get all instances 1571 | # mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" 1572 | # # if empty, get all subtables 1573 | # 1574 | # # table with both mapping and subtables 1575 | # [[inputs.snmp.table]] 1576 | # name = "iftable4" 1577 | # oid = ".1.3.6.1.2.1.31.1.1.1" 1578 | # # if empty get all instances 1579 | # mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" 1580 | # # if empty get all subtables 1581 | # # sub_tables could be not "real subtables" 1582 | # sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] 1583 | 1584 | 1585 | # # Read metrics from Microsoft SQL Server 1586 | # [[inputs.sqlserver]] 1587 | # ## Specify instances to monitor with a list of connection strings. 1588 | # ## All connection parameters are optional. 1589 | # ## By default, the host is localhost, listening on default port, TCP 1433. 1590 | # ## for Windows, the user is the currently running AD user (SSO). 1591 | # ## See https://github.com/denisenkom/go-mssqldb for detailed connection 1592 | # ## parameters. 1593 | # # servers = [ 1594 | # # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", 1595 | # # ] 1596 | 1597 | 1598 | # # Sysstat metrics collector 1599 | # [[inputs.sysstat]] 1600 | # ## Path to the sadc command. 1601 | # # 1602 | # ## Common Defaults: 1603 | # ## Debian/Ubuntu: /usr/lib/sysstat/sadc 1604 | # ## Arch: /usr/lib/sa/sadc 1605 | # ## RHEL/CentOS: /usr/lib64/sa/sadc 1606 | # sadc_path = "/usr/lib/sa/sadc" # required 1607 | # # 1608 | # # 1609 | # ## Path to the sadf command, if it is not in PATH 1610 | # # sadf_path = "/usr/bin/sadf" 1611 | # # 1612 | # # 1613 | # ## Activities is a list of activities, that are passed as argument to the 1614 | # ## sadc collector utility (e.g: DISK, SNMP etc...) 1615 | # ## The more activities that are added, the more data is collected. 1616 | # # activities = ["DISK"] 1617 | # # 1618 | # # 1619 | # ## Group metrics to measurements. 1620 | # ## 1621 | # ## If group is false each metric will be prefixed with a description 1622 | # ## and represents itself a measurement. 1623 | # ## 1624 | # ## If Group is true, corresponding metrics are grouped to a single measurement. 1625 | # # group = true 1626 | # # 1627 | # # 1628 | # ## Options for the sadf command. The values on the left represent the sadf 1629 | # ## options and the values on the right their description (wich are used for 1630 | # ## grouping and prefixing metrics). 1631 | # ## 1632 | # ## Run 'sar -h' or 'man sar' to find out the supported options for your 1633 | # ## sysstat version. 1634 | # [inputs.sysstat.options] 1635 | # -C = "cpu" 1636 | # -B = "paging" 1637 | # -b = "io" 1638 | # -d = "disk" # requires DISK activity 1639 | # "-n ALL" = "network" 1640 | # "-P ALL" = "per_cpu" 1641 | # -q = "queue" 1642 | # -R = "mem" 1643 | # -r = "mem_util" 1644 | # -S = "swap_util" 1645 | # -u = "cpu_util" 1646 | # -v = "inode" 1647 | # -W = "swap" 1648 | # -w = "task" 1649 | # # -H = "hugepages" # only available for newer linux distributions 1650 | # # "-I ALL" = "interrupts" # requires INT activity 1651 | # # 1652 | # # 1653 | # ## Device tags can be used to add additional tags for devices. 1654 | # ## For example the configuration below adds a tag vg with value rootvg for 1655 | # ## all metrics with sda devices. 1656 | # # [[inputs.sysstat.device_tags.sda]] 1657 | # # vg = "rootvg" 1658 | 1659 | 1660 | # # Inserts sine and cosine waves for demonstration purposes 1661 | # [[inputs.trig]] 1662 | # ## Set the amplitude 1663 | # amplitude = 10.0 1664 | 1665 | 1666 | # # Read Twemproxy stats data 1667 | # [[inputs.twemproxy]] 1668 | # ## Twemproxy stats address and port (no scheme) 1669 | # addr = "localhost:22222" 1670 | # ## Monitor pool name 1671 | # pools = ["redis_pool", "mc_pool"] 1672 | 1673 | 1674 | # # A plugin to collect stats from Varnish HTTP Cache 1675 | # [[inputs.varnish]] 1676 | # ## The default location of the varnishstat binary can be overridden with: 1677 | # binary = "/usr/bin/varnishstat" 1678 | # 1679 | # ## By default, telegraf gather stats for 3 metric points. 1680 | # ## Setting stats will override the defaults shown below. 1681 | # ## Glob matching can be used, ie, stats = ["MAIN.*"] 1682 | # ## stats may also be set to ["*"], which will collect all stats 1683 | # stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"] 1684 | 1685 | 1686 | # # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools 1687 | # [[inputs.zfs]] 1688 | # ## ZFS kstat path. Ignored on FreeBSD 1689 | # ## If not specified, then default is: 1690 | # # kstatPath = "/proc/spl/kstat/zfs" 1691 | # 1692 | # ## By default, telegraf gather all zfs stats 1693 | # ## If not specified, then default is: 1694 | # # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] 1695 | # 1696 | # ## By default, don't gather zpool stats 1697 | # # poolMetrics = false 1698 | 1699 | 1700 | # # Reads 'mntr' stats from one or many zookeeper servers 1701 | # [[inputs.zookeeper]] 1702 | # ## An array of address to gather stats about. Specify an ip or hostname 1703 | # ## with port. ie localhost:2181, 10.0.0.1:2181, etc. 1704 | # 1705 | # ## If no servers are specified, then localhost is used as the host. 1706 | # ## If no port is specified, 2181 is used 1707 | # servers = [":2181"] 1708 | 1709 | 1710 | 1711 | ############################################################################### 1712 | # SERVICE INPUT PLUGINS # 1713 | ############################################################################### 1714 | 1715 | # # Read metrics from Kafka topic(s) 1716 | # [[inputs.kafka_consumer]] 1717 | # ## topic(s) to consume 1718 | # topics = ["telegraf"] 1719 | # ## an array of Zookeeper connection strings 1720 | # zookeeper_peers = ["localhost:2181"] 1721 | # ## Zookeeper Chroot 1722 | # zookeeper_chroot = "" 1723 | # ## the name of the consumer group 1724 | # consumer_group = "telegraf_metrics_consumers" 1725 | # ## Offset (must be either "oldest" or "newest") 1726 | # offset = "oldest" 1727 | # 1728 | # ## Data format to consume. 1729 | # ## Each data format has it's own unique set of configuration options, read 1730 | # ## more about them here: 1731 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md 1732 | # data_format = "influx" 1733 | 1734 | 1735 | # # Stream and parse log file(s). 1736 | # [[inputs.logparser]] 1737 | # ## Log files to parse. 1738 | # ## These accept standard unix glob matching rules, but with the addition of 1739 | # ## ** as a "super asterisk". ie: 1740 | # ## /var/log/**.log -> recursively find all .log files in /var/log 1741 | # ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log 1742 | # ## /var/log/apache.log -> only tail the apache log file 1743 | # files = ["/var/log/apache/access.log"] 1744 | # ## Read file from beginning. 1745 | # from_beginning = false 1746 | # 1747 | # ## Parse logstash-style "grok" patterns: 1748 | # ## Telegraf built-in parsing patterns: https://goo.gl/dkay10 1749 | # [inputs.logparser.grok] 1750 | # ## This is a list of patterns to check the given log file(s) for. 1751 | # ## Note that adding patterns here increases processing time. The most 1752 | # ## efficient configuration is to have one pattern per logparser. 1753 | # ## Other common built-in patterns are: 1754 | # ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) 1755 | # ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) 1756 | # patterns = ["%{COMBINED_LOG_FORMAT}"] 1757 | # ## Name of the outputted measurement name. 1758 | # measurement = "apache_access_log" 1759 | # ## Full path(s) to custom pattern files. 1760 | # custom_pattern_files = [] 1761 | # ## Custom patterns can also be defined here. Put one pattern per line. 1762 | # custom_patterns = ''' 1763 | # ''' 1764 | 1765 | 1766 | # # Read metrics from MQTT topic(s) 1767 | # [[inputs.mqtt_consumer]] 1768 | # servers = ["localhost:1883"] 1769 | # ## MQTT QoS, must be 0, 1, or 2 1770 | # qos = 0 1771 | # 1772 | # ## Topics to subscribe to 1773 | # topics = [ 1774 | # "telegraf/host01/cpu", 1775 | # "telegraf/+/mem", 1776 | # "sensors/#", 1777 | # ] 1778 | # 1779 | # # if true, messages that can't be delivered while the subscriber is offline 1780 | # # will be delivered when it comes back (such as on service restart). 1781 | # # NOTE: if true, client_id MUST be set 1782 | # persistent_session = false 1783 | # # If empty, a random client ID will be generated. 1784 | # client_id = "" 1785 | # 1786 | # ## username and password to connect MQTT server. 1787 | # # username = "telegraf" 1788 | # # password = "metricsmetricsmetricsmetrics" 1789 | # 1790 | # ## Optional SSL Config 1791 | # # ssl_ca = "/etc/telegraf/ca.pem" 1792 | # # ssl_cert = "/etc/telegraf/cert.pem" 1793 | # # ssl_key = "/etc/telegraf/key.pem" 1794 | # ## Use SSL but skip chain & host verification 1795 | # # insecure_skip_verify = false 1796 | # 1797 | # ## Data format to consume. 1798 | # ## Each data format has it's own unique set of configuration options, read 1799 | # ## more about them here: 1800 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md 1801 | # data_format = "influx" 1802 | 1803 | 1804 | # # Read metrics from NATS subject(s) 1805 | # [[inputs.nats_consumer]] 1806 | # ## urls of NATS servers 1807 | # servers = ["nats://localhost:4222"] 1808 | # ## Use Transport Layer Security 1809 | # secure = false 1810 | # ## subject(s) to consume 1811 | # subjects = ["telegraf"] 1812 | # ## name a queue group 1813 | # queue_group = "telegraf_consumers" 1814 | # 1815 | # ## Data format to consume. 1816 | # ## Each data format has it's own unique set of configuration options, read 1817 | # ## more about them here: 1818 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md 1819 | # data_format = "influx" 1820 | 1821 | 1822 | # # Read NSQ topic for metrics. 1823 | # [[inputs.nsq_consumer]] 1824 | # ## An string representing the NSQD TCP Endpoint 1825 | # server = "localhost:4150" 1826 | # topic = "telegraf" 1827 | # channel = "consumer" 1828 | # max_in_flight = 100 1829 | # 1830 | # ## Data format to consume. 1831 | # ## Each data format has it's own unique set of configuration options, read 1832 | # ## more about them here: 1833 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md 1834 | # data_format = "influx" 1835 | 1836 | 1837 | # # Statsd Server 1838 | # [[inputs.statsd]] 1839 | # ## Address and port to host UDP listener on 1840 | # service_address = ":8125" 1841 | # ## Delete gauges every interval (default=false) 1842 | # delete_gauges = false 1843 | # ## Delete counters every interval (default=false) 1844 | # delete_counters = false 1845 | # ## Delete sets every interval (default=false) 1846 | # delete_sets = false 1847 | # ## Delete timings & histograms every interval (default=true) 1848 | # delete_timings = true 1849 | # ## Percentiles to calculate for timing & histogram stats 1850 | # percentiles = [90] 1851 | # 1852 | # ## separator to use between elements of a statsd metric 1853 | # metric_separator = "_" 1854 | # 1855 | # ## Parses tags in the datadog statsd format 1856 | # ## http://docs.datadoghq.com/guides/dogstatsd/ 1857 | # parse_data_dog_tags = false 1858 | # 1859 | # ## Statsd data translation templates, more info can be read here: 1860 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite 1861 | # # templates = [ 1862 | # # "cpu.* measurement*" 1863 | # # ] 1864 | # 1865 | # ## Number of UDP messages allowed to queue up, once filled, 1866 | # ## the statsd server will start dropping packets 1867 | # allowed_pending_messages = 10000 1868 | # 1869 | # ## Number of timing/histogram values to track per-measurement in the 1870 | # ## calculation of percentiles. Raising this limit increases the accuracy 1871 | # ## of percentiles but also increases the memory usage and cpu time. 1872 | # percentile_limit = 1000 1873 | 1874 | 1875 | # # Stream a log file, like the tail -f command 1876 | # [[inputs.tail]] 1877 | # ## files to tail. 1878 | # ## These accept standard unix glob matching rules, but with the addition of 1879 | # ## ** as a "super asterisk". ie: 1880 | # ## "/var/log/**.log" -> recursively find all .log files in /var/log 1881 | # ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log 1882 | # ## "/var/log/apache.log" -> just tail the apache log file 1883 | # ## 1884 | # ## See https://github.com/gobwas/glob for more examples 1885 | # ## 1886 | # files = ["/var/mymetrics.out"] 1887 | # ## Read file from beginning. 1888 | # from_beginning = false 1889 | # 1890 | # ## Data format to consume. 1891 | # ## Each data format has it's own unique set of configuration options, read 1892 | # ## more about them here: 1893 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md 1894 | # data_format = "influx" 1895 | 1896 | 1897 | # # Generic TCP listener 1898 | # [[inputs.tcp_listener]] 1899 | # ## Address and port to host TCP listener on 1900 | # service_address = ":8094" 1901 | # 1902 | # ## Number of TCP messages allowed to queue up. Once filled, the 1903 | # ## TCP listener will start dropping packets. 1904 | # allowed_pending_messages = 10000 1905 | # 1906 | # ## Maximum number of concurrent TCP connections to allow 1907 | # max_tcp_connections = 250 1908 | # 1909 | # ## Data format to consume. 1910 | # ## Each data format has it's own unique set of configuration options, read 1911 | # ## more about them here: 1912 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md 1913 | # data_format = "influx" 1914 | 1915 | 1916 | # # Generic UDP listener 1917 | # [[inputs.udp_listener]] 1918 | # ## Address and port to host UDP listener on 1919 | # service_address = ":8092" 1920 | # 1921 | # ## Number of UDP messages allowed to queue up. Once filled, the 1922 | # ## UDP listener will start dropping packets. 1923 | # allowed_pending_messages = 10000 1924 | # 1925 | # ## Data format to consume. 1926 | # ## Each data format has it's own unique set of configuration options, read 1927 | # ## more about them here: 1928 | # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md 1929 | # data_format = "influx" 1930 | 1931 | 1932 | # # A Webhooks Event collector 1933 | # [[inputs.webhooks]] 1934 | # ## Address and port to host Webhook listener on 1935 | # service_address = ":1619" 1936 | # 1937 | # [inputs.webhooks.github] 1938 | # path = "/github" 1939 | # 1940 | # [inputs.webhooks.mandrill] 1941 | # path = "/mandrill" 1942 | # 1943 | # [inputs.webhooks.rollbar] 1944 | # path = "/rollbar" 1945 | --------------------------------------------------------------------------------