├── ngnix ├── .htpasswd ├── sites-enabled │ └── kibana └── nginx.conf ├── logstash ├── drivers │ └── sqljdbc41.jar ├── config │ ├── pipelines.yml │ ├── jvm.options │ ├── logstash.yml │ └── log4j2.properties └── pipeline │ └── jdbc-sql-pipeline.conf ├── elastalert ├── rules │ ├── table_count_zero.yaml │ └── memory_high_alert.yaml └── config │ ├── config.json │ └── elastalert.yaml ├── README.md ├── docker-instalation └── docker-compose-METRICS.yml ├── elasticsearch ├── jvm.options ├── elasticsearch.yml └── log4j2.properties ├── kibana └── kibana.yml └── metricbeat └── metricbeat.yml /ngnix/.htpasswd: -------------------------------------------------------------------------------- 1 | kibanaUser:$apr5$T661F4lz$PQ8rVxZY.F/dfr9rsR0Zr. -------------------------------------------------------------------------------- /logstash/drivers/sqljdbc41.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManuelMourato25/elastic-stack-architecture-example/HEAD/logstash/drivers/sqljdbc41.jar -------------------------------------------------------------------------------- /ngnix/sites-enabled/kibana: -------------------------------------------------------------------------------- 1 | server { 2 | listen 9299; 3 | server_name ; 4 | 5 | location / { 6 | proxy_pass http://:5601; 7 | auth_basic "Restricted Content"; 8 | auth_basic_user_file /etc/nginx/.htpasswd; 9 | 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /elastalert/rules/table_count_zero.yaml: -------------------------------------------------------------------------------- 1 | es_host: 2 | es_port: 9200 3 | name: Table_Empty_Rule 4 | type: frequency 5 | index: business-metrics 6 | num_events: 1 7 | timeframe: 8 | minutes: 2 9 | filter: 10 | - script: 11 | script: 12 | inline: doc['count_table1'].value > 0 13 | alert: 14 | - "slack" 15 | slack: 16 | slack_webhook_url: "" -------------------------------------------------------------------------------- /elastalert/rules/memory_high_alert.yaml: -------------------------------------------------------------------------------- 1 | es_host: 2 | es_port: 9200 3 | name: Memory_Too_High_Rule 4 | type: frequency 5 | index: infrastructure-metrics 6 | num_events: 1 7 | timeframe: 8 | minutes: 2 9 | filter: 10 | - range: 11 | system.memory.used: 12 | to: 15000000000 13 | alert: 14 | - "slack" 15 | slack: 16 | slack_webhook_url: "" 17 | -------------------------------------------------------------------------------- /elastalert/config/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "appName": "elastalert-server", 3 | "port": 3030, 4 | "elastalertPath": "/opt/elastalert", 5 | "verbose": true, 6 | "es_debug": true, 7 | "debug": false, 8 | "rulesPath": { 9 | "relative": true, 10 | "path": "/rules" 11 | }, 12 | "templatesPath": { 13 | "relative": true, 14 | "path": "/rule_templates" 15 | }, 16 | "dataPath": { 17 | "relative": true, 18 | "path": "/server_data" 19 | }, 20 | "es_host": "", 21 | "es_port": 9200, 22 | "writeback_index": "elastalert_status" 23 | } 24 | -------------------------------------------------------------------------------- /logstash/config/pipelines.yml: -------------------------------------------------------------------------------- 1 | # List of pipelines to be loaded by Logstash 2 | # 3 | # This document must be a list of dictionaries/hashes, where the keys/values are pipeline settings. 4 | # Default values for omitted settings are read from the `logstash.yml` file. 5 | # When declaring multiple pipelines, each MUST have its own `pipeline.id`. 6 | 7 | - pipeline.id: sql-pipeline 8 | path.config: "/usr/share/logstash/pipeline/jdbc-sql-pipeline.conf" 9 | pipeline.workers: 2 10 | pipeline.batch.size: 50 11 | pipeline.batch.delay: 50 12 | pipeline.output.workers: 1 13 | 14 | - pipeline.id: jt400-pipeline 15 | path.config: "/usr/share/logstash/pipeline/jdbc-jt400-pipeline.conf" 16 | pipeline.workers: 2 17 | pipeline.batch.size: 50 18 | pipeline.batch.delay: 50 19 | pipeline.output.workers: 1 -------------------------------------------------------------------------------- /elastalert/config/elastalert.yaml: -------------------------------------------------------------------------------- 1 | # The elasticsearch hostname for metadata writeback 2 | # Note that every rule can have its own elasticsearch host 3 | es_host: 4 | 5 | # The elasticsearch port 6 | es_port: 9200 7 | 8 | # The index on es_host which is used for metadata storage 9 | # This can be a unmapped index, but it is recommended that you run 10 | # elastalert-create-index to set a mapping 11 | writeback_index: elastalert_status 12 | 13 | # This is the folder that contains the rule yaml files 14 | # Any .yaml file will be loaded as a rule 15 | rules_folder: rules 16 | 17 | # How often ElastAlert will query elasticsearch 18 | # The unit can be anything from weeks to seconds 19 | run_every: 20 | seconds: 5 21 | 22 | # ElastAlert will buffer results from the most recent 23 | # period of time, in case some log sources are not in real time 24 | buffer_time: 25 | minutes: 1 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Monitoring Applications and Services: an ELK approach 2 | 3 | The current repository contains a dockerized implementation of the following services: 4 | * Elasticsearch 5 | * Kibana 6 | * Metricbeat 7 | * Logstash 8 | * Elastalert 9 | * NGINX (optional) 10 | 11 | ## How to start the docker containers 12 | 13 | 1) Replace all variables inside <...> with their correct values depending on your host configurations/slack_hook/etc. 14 | 2) Place all directories listed in this repository into the /opt folder (or replace /opt in docker-instalation/docker-compose-METRICS.yml ) 15 | 3) Run the following command (assuming you have docker and docker-compose installed in your machine): 16 | 17 | ``` 18 | docker-compose -f docker-compose-METRICS.yml up -d elasticsearch kibana metricbeat logstash elastalert ngnix 19 | 20 | ``` 21 | 22 | For more information on the overall architecture and this specific implementation, refer to https://medium.com/@manuelmourato25/elk-stack-alerting-how-to-monitor-your-business-and-infrastructure-data-part-one-a4a1c3427745 -------------------------------------------------------------------------------- /logstash/pipeline/jdbc-sql-pipeline.conf: -------------------------------------------------------------------------------- 1 | input { 2 | jdbc { 3 | jdbc_driver_library => "/opt/logstash/drivers/sqljdbc41.jar" 4 | jdbc_driver_class => "com.microsoft.sqlserver.jdbc.SQLServerDriver" 5 | jdbc_connection_string => "jdbc:sqlserver://;InstanceName=;DatabaseName=;" 6 | jdbc_user => "USERNAME" 7 | jdbc_password => "PASSWORD" 8 | parameters => { "table_name" => "TEST_TABLE" } 9 | schedule => "* * * * *" 10 | statement => "SELECT count(1) as COUNT_TEST_TABLE from TEST_TABLE" 11 | } 12 | 13 | jdbc { 14 | jdbc_driver_library => "/opt/logstash/drivers/sqljdbc41.jar" 15 | jdbc_driver_class => "com.microsoft.sqlserver.jdbc.SQLServerDriver" 16 | jdbc_connection_string => "jdbc:sqlserver://;InstanceName=;DatabaseName=;" 17 | jdbc_user => "USERNAME" 18 | jdbc_password => "PASSWORD" 19 | parameters => { "table_name" => "TEST_TABLE2" } 20 | schedule => "* * * * *" 21 | statement => "SELECT count(1) as COUNT_TEST_TABLE2 from TEST_TABLE2" 22 | } 23 | 24 | 25 | } 26 | output { 27 | elasticsearch { 28 | hosts => "" 29 | index => "business-metrics-non-life" 30 | document_type => "long" 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /logstash/config/jvm.options: -------------------------------------------------------------------------------- 1 | ## JVM configuration 2 | 3 | # Xms represents the initial size of total heap space 4 | # Xmx represents the maximum size of total heap space 5 | 6 | -Xms1g 7 | -Xmx2g 8 | 9 | ################################################################ 10 | ## Expert settings 11 | ################################################################ 12 | ## 13 | ## All settings below this section are considered 14 | ## expert settings. Don't tamper with them unless 15 | ## you understand what you are doing 16 | ## 17 | ################################################################ 18 | 19 | ## GC configuration 20 | -XX:+UseParNewGC 21 | -XX:+UseConcMarkSweepGC 22 | -XX:CMSInitiatingOccupancyFraction=75 23 | -XX:+UseCMSInitiatingOccupancyOnly 24 | 25 | ## Locale 26 | # Set the locale language 27 | -Duser.language=en 28 | 29 | # Set the locale country 30 | -Duser.country=US 31 | 32 | # Set the locale variant, if any 33 | #-Duser.variant= 34 | 35 | ## basic 36 | 37 | # set the I/O temp directory 38 | #-Djava.io.tmpdir=$HOME 39 | 40 | # set to headless, just in case 41 | -Djava.awt.headless=true 42 | 43 | # ensure UTF-8 encoding by default (e.g. filenames) 44 | -Dfile.encoding=UTF-8 45 | 46 | # use our provided JNA always versus the system one 47 | #-Djna.nosys=true 48 | 49 | # Turn on JRuby invokedynamic 50 | -Djruby.compile.invokedynamic=true 51 | # Force Compilation 52 | -Djruby.jit.threshold=0 53 | 54 | ## heap dumps 55 | 56 | # generate a heap dump when an allocation from the Java heap fails 57 | # heap dumps are created in the working directory of the JVM 58 | -XX:+HeapDumpOnOutOfMemoryError 59 | 60 | # specify an alternative path for heap dumps 61 | # ensure the directory exists and has sufficient space 62 | #-XX:HeapDumpPath=${LOGSTASH_HOME}/heapdump.hprof 63 | 64 | ## GC logging 65 | #-XX:+PrintGCDetails 66 | #-XX:+PrintGCTimeStamps 67 | #-XX:+PrintGCDateStamps 68 | #-XX:+PrintClassHistogram 69 | #-XX:+PrintTenuringDistribution 70 | #-XX:+PrintGCApplicationStoppedTime 71 | 72 | # log GC status to a file with time stamps 73 | # ensure the directory exists 74 | #-Xloggc:${LS_GC_LOG_FILE} 75 | 76 | # Entropy source for randomness 77 | -Djava.security.egd=file:/dev/urandom 78 | -------------------------------------------------------------------------------- /docker-instalation/docker-compose-METRICS.yml: -------------------------------------------------------------------------------- 1 | version: '2.1' 2 | services: 3 | elasticsearch: 4 | image: docker.elastic.co/elasticsearch/elasticsearch:6.5.1 5 | container_name: elasticsearch 6 | network_mode: host 7 | environment: 8 | - discovery.type=single-node 9 | ports: 10 | - 9200:9200 11 | - 9300:9300 12 | volumes: 13 | - /opt/elasticsearch/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml 14 | - /opt/elasticsearch/jvm.options:/etc/elasticsearch/jvm.options 15 | - /opt/elasticsearch/log4j2.properties:/opt/elasticsearch/log4j2.properties 16 | - /opt/elasticsearch/data:/usr/share/elasticsearch/data 17 | kibana: 18 | image: docker.elastic.co/kibana/kibana:6.5.1 19 | container_name: kibana 20 | depends_on: 21 | - elasticsearch 22 | network_mode: host 23 | volumes: 24 | - /opt/kibana/kibana.yml:/usr/share/kibana/config/kibana.yml 25 | metricbeat: 26 | image: docker.elastic.co/beats/metricbeat:6.5.1 27 | container_name: metricbeat 28 | depends_on: 29 | - elasticsearch 30 | network_mode: host 31 | volumes: 32 | - /opt/metricbeat/metric-beat.yml:/usr/share/metricbeat/metricbeat.yml 33 | environment: 34 | - output.elasticsearch.hosts=[":9200"] 35 | logstash: 36 | image: docker.elastic.co/logstash/logstash:6.5.1 37 | container_name: logstash 38 | network_mode: host 39 | ports: 40 | - 9600:9600 41 | volumes: 42 | - /opt/logstash/config:/usr/share/logstash/config 43 | - /opt/logstash/pipeline:/usr/share/logstash/pipeline 44 | - /opt/logstash/data:/usr/share/logstash/data 45 | - /opt/logstash/drivers:/opt/logstash/drivers 46 | dns: 47 | - # IP necessary to connect to a database instance external to where the server in which the container is running 48 | - # IP necessary to connect to a database instance external to where the server in which the container is running 49 | elastalert: 50 | image: bitsensor/elastalert:1.0.0 51 | container_name: elastalert 52 | network_mode: host 53 | ports: 54 | - 3030:3030 55 | volumes: 56 | - /opt/elastalert/config/elastalert.yaml:/opt/elastalert/config.yaml 57 | - /opt/elastalert/config/config.json:/opt/elastalert-server/config/config.json 58 | - /opt/elastalert/rules:/opt/elastalert/rules 59 | nginx: 60 | image: nginx:1.14-alpine 61 | container_name: nginx 62 | volumes: 63 | - /opt/nginx/nginx.conf:/etc/nginx/nginx.conf 64 | - /opt/nginx/sites-enabled/kibana:/etc/nginx/conf.d/kibana.conf 65 | - /opt/nginx/.htpasswd:/etc/nginx/.htpasswd 66 | network_mode: host 67 | ports: 68 | - 9299:9299 69 | -------------------------------------------------------------------------------- /ngnix/nginx.conf: -------------------------------------------------------------------------------- 1 | #user nobody; 2 | worker_processes 1; 3 | 4 | #error_log logs/error.log; 5 | #error_log logs/error.log notice; 6 | #error_log logs/error.log info; 7 | 8 | #pid logs/nginx.pid; 9 | 10 | 11 | events { 12 | worker_connections 1024; 13 | } 14 | 15 | 16 | http { 17 | include mime.types; 18 | default_type application/octet-stream; 19 | include /etc/nginx/conf.d/*.conf; 20 | 21 | #log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 22 | # '$status $body_bytes_sent "$http_referer" ' 23 | # '"$http_user_agent" "$http_x_forwarded_for"'; 24 | 25 | #access_log logs/access.log main; 26 | 27 | sendfile on; 28 | #tcp_nopush on; 29 | 30 | #keepalive_timeout 0; 31 | keepalive_timeout 65; 32 | 33 | #gzip on; 34 | 35 | server { 36 | listen 80; 37 | server_name localhost; 38 | 39 | #charset koi8-r; 40 | 41 | #access_log logs/host.access.log main; 42 | 43 | location / { 44 | root html; 45 | index index.html index.htm; 46 | } 47 | 48 | #error_page 404 /404.html; 49 | 50 | # redirect server error pages to the static page /50x.html 51 | # 52 | error_page 500 502 503 504 /50x.html; 53 | location = /50x.html { 54 | root html; 55 | } 56 | 57 | # proxy the PHP scripts to Apache listening on 127.0.0.1:80 58 | # 59 | #location ~ \.php$ { 60 | # proxy_pass http://127.0.0.1; 61 | #} 62 | 63 | # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000 64 | # 65 | #location ~ \.php$ { 66 | # root html; 67 | # fastcgi_pass 127.0.0.1:9000; 68 | # fastcgi_index index.php; 69 | # fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name; 70 | # include fastcgi_params; 71 | #} 72 | 73 | # deny access to .htaccess files, if Apache's document root 74 | # concurs with nginx's one 75 | # 76 | #location ~ /\.ht { 77 | # deny all; 78 | #} 79 | } 80 | 81 | 82 | # another virtual host using mix of IP-, name-, and port-based configuration 83 | # 84 | #server { 85 | # listen 8000; 86 | # listen somename:8080; 87 | # server_name somename alias another.alias; 88 | 89 | # location / { 90 | # root html; 91 | # index index.html index.htm; 92 | # } 93 | #} 94 | 95 | 96 | # HTTPS server 97 | # 98 | #server { 99 | # listen 443 ssl; 100 | # server_name localhost; 101 | 102 | # ssl_certificate cert.pem; 103 | # ssl_certificate_key cert.key; 104 | 105 | # ssl_session_cache shared:SSL:1m; 106 | # ssl_session_timeout 5m; 107 | 108 | # ssl_ciphers HIGH:!aNULL:!MD5; 109 | # ssl_prefer_server_ciphers on; 110 | 111 | # location / { 112 | # root html; 113 | # index index.html index.htm; 114 | # } 115 | #} 116 | 117 | } 118 | -------------------------------------------------------------------------------- /elasticsearch/jvm.options: -------------------------------------------------------------------------------- 1 | ## JVM configuration 2 | 3 | ################################################################ 4 | ## IMPORTANT: JVM heap size 5 | ################################################################ 6 | ## 7 | ## You should always set the min and max JVM heap 8 | ## size to the same value. For example, to set 9 | ## the heap to 4 GB, set: 10 | ## 11 | ## -Xms4g 12 | ## -Xmx4g 13 | ## 14 | ## See https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html 15 | ## for more information 16 | ## 17 | ################################################################ 18 | 19 | # Xms represents the initial size of total heap space 20 | # Xmx represents the maximum size of total heap space 21 | 22 | ################################################################ 23 | ## Expert settings 24 | ################################################################ 25 | ## 26 | ## All settings below this section are considered 27 | ## expert settings. Don't tamper with them unless 28 | ## you understand what you are doing 29 | ## 30 | ################################################################ 31 | 32 | ## GC configuration 33 | -XX:+UseConcMarkSweepGC 34 | -XX:CMSInitiatingOccupancyFraction=75 35 | -XX:+UseCMSInitiatingOccupancyOnly 36 | 37 | ## optimizations 38 | 39 | # disable calls to System#gc 40 | -XX:+DisableExplicitGC 41 | 42 | # pre-touch memory pages used by the JVM during initialization 43 | -XX:+AlwaysPreTouch 44 | 45 | ## basic 46 | 47 | # force the server VM (remove on 32-bit client JVMs) 48 | -server 49 | 50 | # explicitly set the stack size (reduce to 320k on 32-bit client JVMs) 51 | -Xss1m 52 | 53 | # set to headless, just in case 54 | -Djava.awt.headless=true 55 | 56 | # ensure UTF-8 encoding by default (e.g. filenames) 57 | -Dfile.encoding=UTF-8 58 | 59 | # use our provided JNA always versus the system one 60 | -Djna.nosys=true 61 | 62 | # use old-style file permissions on JDK9 63 | -Djdk.io.permissionsUseCanonicalPath=true 64 | 65 | # flags to configure Netty 66 | -Dio.netty.noUnsafe=true 67 | -Dio.netty.noKeySetOptimization=true 68 | -Dio.netty.recycler.maxCapacityPerThread=0 69 | 70 | # log4j 2 71 | -Dlog4j.shutdownHookEnabled=false 72 | -Dlog4j2.disable.jmx=true 73 | -Dlog4j.skipJansi=true 74 | 75 | ## heap dumps 76 | 77 | # generate a heap dump when an allocation from the Java heap fails 78 | # heap dumps are created in the working directory of the JVM 79 | -XX:+HeapDumpOnOutOfMemoryError 80 | 81 | # specify an alternative path for heap dumps 82 | # ensure the directory exists and has sufficient space 83 | #-XX:HeapDumpPath=${heap.dump.path} 84 | 85 | ## GC logging 86 | 87 | #-XX:+PrintGCDetails 88 | #-XX:+PrintGCTimeStamps 89 | #-XX:+PrintGCDateStamps 90 | #-XX:+PrintClassHistogram 91 | #-XX:+PrintTenuringDistribution 92 | #-XX:+PrintGCApplicationStoppedTime 93 | 94 | # log GC status to a file with time stamps 95 | # ensure the directory exists 96 | #-Xloggc:${loggc} 97 | 98 | # Elasticsearch 5.0.0 will throw an exception on unquoted field names in JSON. 99 | # If documents were already indexed with unquoted fields in a previous version 100 | # of Elasticsearch, some operations may throw errors. 101 | # 102 | # WARNING: This option will be removed in Elasticsearch 6.0.0 and is provided 103 | # only for migration purposes. 104 | #-Delasticsearch.json.allow_unquoted_field_names=true 105 | -------------------------------------------------------------------------------- /kibana/kibana.yml: -------------------------------------------------------------------------------- 1 | # Kibana is served by a back end server. This setting specifies the port to use. 2 | server.port: 5601 3 | 4 | # Specifies the address to which the Kibana server will bind. IP addresses and host names are both valid values. 5 | # The default is 'localhost', which usually means remote machines will not be able to connect. 6 | # To allow connections from remote users, set this parameter to a non-loopback address. 7 | server.host: "" 8 | 9 | # Enables you to specify a path to mount Kibana at if you are running behind a proxy. 10 | # Use the `server.rewriteBasePath` setting to tell Kibana if it should remove the basePath 11 | # from requests it receives, and to prevent a deprecation warning at startup. 12 | # This setting cannot end in a slash. 13 | server.basePath: "/kibana" 14 | 15 | # Specifies whether Kibana should rewrite requests that are prefixed with 16 | # `server.basePath` or require that they are rewritten by your reverse proxy. 17 | # This setting was effectively always `false` before Kibana 6.3 and will 18 | # default to `true` starting in Kibana 7.0. 19 | server.rewriteBasePath: true 20 | 21 | # The maximum payload size in bytes for incoming server requests. 22 | #server.maxPayloadBytes: 1048576 23 | 24 | # The Kibana server's name. This is used for display purposes. 25 | server.name: "My Machine" 26 | 27 | # The URLs of the Elasticsearch instances to use for all your queries. 28 | elasticsearch.url: "http://:9200" 29 | 30 | # When this setting's value is true Kibana uses the hostname specified in the server.host 31 | # setting. When the value of this setting is false, Kibana uses the hostname of the host 32 | # that connects to this Kibana instance. 33 | elasticsearch.preserveHost: true 34 | 35 | # Kibana uses an index in Elasticsearch to store saved searches, visualizations and 36 | # dashboards. Kibana creates a new index if the index doesn't already exist. 37 | kibana.index: ".kibana" 38 | 39 | # The default application to load. 40 | kibana.defaultAppId: "home" 41 | 42 | 43 | # Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of 44 | # the elasticsearch.requestTimeout setting. 45 | elasticsearch.pingTimeout: 1500 46 | 47 | # Time in milliseconds to wait for responses from the back end or Elasticsearch. This value 48 | # must be a positive integer. 49 | elasticsearch.requestTimeout: 30000 50 | 51 | 52 | # Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable. 53 | elasticsearch.shardTimeout: 30000 54 | 55 | # Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying. 56 | elasticsearch.startupTimeout: 5000 57 | 58 | # Logs queries sent to Elasticsearch. Requires logging.verbose set to true. 59 | elasticsearch.logQueries: false 60 | 61 | # Specifies the path where Kibana creates the process ID file. 62 | #pid.file: /var/run/kibana.pid 63 | 64 | # Enables you specify a file where Kibana stores log output. 65 | logging.dest: stdout 66 | 67 | # Set the value of this setting to true to suppress all logging output. 68 | logging.silent: false 69 | 70 | # Set the value of this setting to true to suppress all logging output other than error messages. 71 | logging.quiet: false 72 | 73 | # Set the value of this setting to true to log all events, including system usage information 74 | # and all requests. 75 | logging.verbose: false 76 | 77 | # Set the interval in milliseconds to sample system and process performance 78 | # metrics. Minimum is 100ms. Defaults to 5000. 79 | ops.interval: 5000 80 | 81 | # Specifies locale to be used for all localizable strings, dates and number formats. 82 | i18n.locale: "en" 83 | -------------------------------------------------------------------------------- /logstash/config/logstash.yml: -------------------------------------------------------------------------------- 1 | # Settings file in YAML 2 | # 3 | # ------------ Node identity ------------ 4 | # 5 | # Use a descriptive name for the node: 6 | # 7 | node.name: Logstash_Server 8 | # 9 | # If omitted the node name will default to the machine's host name 10 | # 11 | # ------------ Data path ------------------ 12 | # 13 | # Which directory should be used by logstash and its plugins 14 | # for any persistent needs. Defaults to LOGSTASH_HOME/data 15 | # 16 | path.data: /usr/share/logstash/data 17 | # 18 | # 19 | # ------------ Queuing Settings -------------- 20 | # 21 | # Internal queuing model, "memory" for legacy in-memory based queuing and 22 | # "persisted" for disk-based acked queueing. Defaults is memory 23 | # 24 | queue.type: persisted 25 | # 26 | # If using queue.type: persisted, the directory path where the data files will be stored. 27 | # Default is path.data/queue 28 | # 29 | path.queue:/usr/share/logstash/queue 30 | # 31 | # If using queue.type: persisted, the page data files size. The queue data consists of 32 | # append-only data files separated into pages. Default is 64mb 33 | # 34 | queue.page_capacity: 64mb 35 | # 36 | # If using queue.type: persisted, the maximum number of unread events in the queue. 37 | # Default is 0 (unlimited) 38 | # 39 | queue.max_events: 0 40 | # 41 | # If using queue.type: persisted, the total capacity of the queue in number of bytes. 42 | # If you would like more unacked events to be buffered in Logstash, you can increase the 43 | # capacity using this setting. Please make sure your disk drive has capacity greater than 44 | # the size specified here. If both max_bytes and max_events are specified, Logstash will pick 45 | # whichever criteria is reached first 46 | # Default is 1024mb or 1gb 47 | # 48 | queue.max_bytes: 1024mb 49 | # 50 | # If using queue.type: persisted, the maximum number of acked events before forcing a checkpoint 51 | # Default is 1024, 0 for unlimited 52 | # 53 | queue.checkpoint.acks: 0 54 | # 55 | # If using queue.type: persisted, the maximum number of written events before forcing a checkpoint 56 | # Default is 1024, 0 for unlimited 57 | # 58 | # queue.checkpoint.writes: 1024 59 | # 60 | # If using queue.type: persisted, the interval in milliseconds when a checkpoint is forced on the head page 61 | # Default is 1000, 0 for no periodic checkpoint. 62 | # 63 | # queue.checkpoint.interval: 1000 64 | # 65 | # ------------ Dead-Letter Queue Settings -------------- 66 | # Flag to turn on dead-letter queue. 67 | # 68 | # dead_letter_queue.enable: false 69 | 70 | # If using dead_letter_queue.enable: true, the maximum size of each dead letter queue. Entries 71 | # will be dropped if they would increase the size of the dead letter queue beyond this setting. 72 | # Default is 1024mb 73 | # dead_letter_queue.max_bytes: 1024mb 74 | 75 | # If using dead_letter_queue.enable: true, the directory path where the data files will be stored. 76 | # Default is path.data/dead_letter_queue 77 | # 78 | # path.dead_letter_queue: 79 | # 80 | # ------------ Metrics Settings -------------- 81 | # 82 | # Bind address for the metrics REST endpoint 83 | # 84 | http.host: "127.0.0.1" 85 | # 86 | # Bind port for the metrics REST endpoint, this option also accept a range 87 | # (9600-9700) and logstash will pick up the first available ports. 88 | # 89 | http.port: 9600-9700 90 | # 91 | # ------------ Debugging Settings -------------- 92 | # 93 | # Options for log.level: 94 | # * fatal 95 | # * error 96 | # * warn 97 | # * info (default) 98 | # * debug 99 | # * trace 100 | # 101 | log.level: info 102 | path.logs: /usr/share/logstash/logs 103 | # 104 | # ------------ Other Settings -------------- 105 | # 106 | # Where to find custom plugins 107 | # path.plugins: [] 108 | # -------------------------------------------------------------------------------- /elasticsearch/elasticsearch.yml: -------------------------------------------------------------------------------- 1 | ##################### ElasticSearch Configuration ##################### 2 | 3 | ################################### Cluster ################################### 4 | 5 | # Cluster name identifies your cluster for auto-discovery. If you're running 6 | # multiple clusters on the same network, make sure you're using unique names. 7 | # 8 | cluster.name: metricsCluster 9 | 10 | 11 | #################################### Node ##################################### 12 | 13 | # Node names are generated dynamically on startup, so you're relieved 14 | # from configuring them manually. You can tie this node to a specific name: 15 | # 16 | node.name: "singleNode1" 17 | 18 | # Every node can be configured to allow or deny being eligible as the master, 19 | # and to allow or deny to store the data. 20 | # 21 | # Allow this node to be eligible as a master node (enabled by default): 22 | # 23 | node.master: true 24 | # 25 | # Allow this node to store data (enabled by default): 26 | # 27 | node.data: true 28 | 29 | 30 | #################################### Index #################################### 31 | 32 | # You can set a number of options (such as shard/replica options, mapping 33 | # or analyzer definitions, translog settings, ...) for indices globally, 34 | # in this file. 35 | # 36 | # Note, that it makes more sense to configure index settings specifically for 37 | # a certain index, either when creating it or by using the index templates API. 38 | # 39 | # See and 40 | # 41 | # for more information. 42 | 43 | # Set the number of shards (splits) of an index (5 by default): 44 | # 45 | #index.number_of_shards: 5 46 | 47 | # Set the number of replicas (additional copies) of an index (1 by default): 48 | # 49 | #index.number_of_replicas: 0 50 | 51 | # Note, that for development on a local machine, with small indices, it usually 52 | # makes sense to "disable" the distributed features: 53 | # 54 | index.number_of_shards: 1 55 | index.number_of_replicas: 0 56 | 57 | 58 | #################################### Paths #################################### 59 | 60 | # Path to directory containing configuration (this file and logging.yml): 61 | # 62 | # path.conf: /path/to/conf 63 | 64 | # Path to directory where to store index data allocated for this node. 65 | # 66 | # path.data: /path/to/data 67 | # 68 | # Can optionally include more than one location, causing data to be striped across 69 | # the locations (a la RAID 0) on a file level, favouring locations with most free 70 | # space on creation. For example: 71 | # 72 | # path.data: /path/to/data1,/path/to/data2 73 | 74 | # Path to temporary files: 75 | # 76 | # path.work: /path/to/work 77 | 78 | # Path to log files: 79 | # 80 | # path.logs: /path/to/logs 81 | 82 | # Path to where plugins are installed: 83 | # 84 | # path.plugins: /path/to/plugins 85 | 86 | 87 | ################################### Memory #################################### 88 | 89 | # ElasticSearch performs poorly when JVM starts swapping: you should ensure that 90 | # it _never_ swaps. 91 | # 92 | # Set this property to true to lock the memory: 93 | # 94 | #bootstrap.mlockall: true 95 | 96 | # Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set 97 | # to the same value, and that the machine has enough memory to allocate 98 | # for ElasticSearch, leaving enough memory for the operating system itself. 99 | # 100 | # You should also make sure that the ElasticSearch process is allowed to lock 101 | # the memory, eg. by using `ulimit -l unlimited`. 102 | 103 | 104 | ############################## Network And HTTP ############################### 105 | 106 | # ElasticSearch, by default, binds itself to the 0.0.0.0 address, and listens 107 | # on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node 108 | # communication. (the range means that if the port is busy, it will automatically 109 | # try the next port). 110 | 111 | # Set the bind address specifically (IPv4 or IPv6): 112 | # 113 | network.bind_host: 114 | 115 | # Set the address other nodes will use to communicate with this node. If not 116 | # set, it is automatically derived. It must point to an actual IP address. 117 | # 118 | network.publish_host: 119 | 120 | # Set both 'bind_host' and 'publish_host': 121 | # 122 | network.host: http:// 123 | 124 | # Set a custom port for the node to node communication (9300 by default): 125 | # 126 | transport.tcp.port: 9300 127 | 128 | # Enable compression for all communication between nodes (disabled by default): 129 | # 130 | transport.tcp.compress: false 131 | 132 | # Set a custom port to listen for HTTP traffic: 133 | # 134 | http.port: 9200 135 | 136 | # Set a custom allowed content length: 137 | # 138 | # http.max_content_length: 100mb 139 | 140 | # Disable HTTP completely: 141 | # 142 | http.enabled: true 143 | 144 | -------------------------------------------------------------------------------- /logstash/config/log4j2.properties: -------------------------------------------------------------------------------- 1 | status = error 2 | name = LogstashPropertiesConfig 3 | 4 | appender.console.type = Console 5 | appender.console.name = plain_console 6 | appender.console.layout.type = PatternLayout 7 | appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n 8 | 9 | appender.json_console.type = Console 10 | appender.json_console.name = json_console 11 | appender.json_console.layout.type = JSONLayout 12 | appender.json_console.layout.compact = true 13 | appender.json_console.layout.eventEol = true 14 | 15 | appender.rolling.type = RollingFile 16 | appender.rolling.name = plain_rolling 17 | appender.rolling.fileName = ${sys:ls.logs}/logstash-${sys:ls.log.format}.log 18 | appender.rolling.filePattern = ${sys:ls.logs}/logstash-${sys:ls.log.format}-%d{yyyy-MM-dd}-%i.log.gz 19 | appender.rolling.policies.type = Policies 20 | appender.rolling.policies.time.type = TimeBasedTriggeringPolicy 21 | appender.rolling.policies.time.interval = 1 22 | appender.rolling.policies.time.modulate = true 23 | appender.rolling.layout.type = PatternLayout 24 | appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %-.10000m%n 25 | appender.rolling.policies.size.type = SizeBasedTriggeringPolicy 26 | appender.rolling.policies.size.size = 100MB 27 | appender.rolling.strategy.type = DefaultRolloverStrategy 28 | appender.rolling.strategy.max = 30 29 | 30 | appender.json_rolling.type = RollingFile 31 | appender.json_rolling.name = json_rolling 32 | appender.json_rolling.fileName = ${sys:ls.logs}/logstash-${sys:ls.log.format}.log 33 | appender.json_rolling.filePattern = ${sys:ls.logs}/logstash-${sys:ls.log.format}-%d{yyyy-MM-dd}-%i.log.gz 34 | appender.json_rolling.policies.type = Policies 35 | appender.json_rolling.policies.time.type = TimeBasedTriggeringPolicy 36 | appender.json_rolling.policies.time.interval = 1 37 | appender.json_rolling.policies.time.modulate = true 38 | appender.json_rolling.layout.type = JSONLayout 39 | appender.json_rolling.layout.compact = true 40 | appender.json_rolling.layout.eventEol = true 41 | appender.json_rolling.policies.size.type = SizeBasedTriggeringPolicy 42 | appender.json_rolling.policies.size.size = 100MB 43 | appender.json_rolling.strategy.type = DefaultRolloverStrategy 44 | appender.json_rolling.strategy.max = 30 45 | 46 | rootLogger.level = ${sys:ls.log.level} 47 | rootLogger.appenderRef.console.ref = ${sys:ls.log.format}_console 48 | rootLogger.appenderRef.rolling.ref = ${sys:ls.log.format}_rolling 49 | 50 | # Slowlog 51 | 52 | appender.console_slowlog.type = Console 53 | appender.console_slowlog.name = plain_console_slowlog 54 | appender.console_slowlog.layout.type = PatternLayout 55 | appender.console_slowlog.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n 56 | 57 | appender.json_console_slowlog.type = Console 58 | appender.json_console_slowlog.name = json_console_slowlog 59 | appender.json_console_slowlog.layout.type = JSONLayout 60 | appender.json_console_slowlog.layout.compact = true 61 | appender.json_console_slowlog.layout.eventEol = true 62 | 63 | appender.rolling_slowlog.type = RollingFile 64 | appender.rolling_slowlog.name = plain_rolling_slowlog 65 | appender.rolling_slowlog.fileName = ${sys:ls.logs}/logstash-slowlog-${sys:ls.log.format}.log 66 | appender.rolling_slowlog.filePattern = ${sys:ls.logs}/logstash-slowlog-${sys:ls.log.format}-%d{yyyy-MM-dd}-%i.log.gz 67 | appender.rolling_slowlog.policies.type = Policies 68 | appender.rolling_slowlog.policies.time.type = TimeBasedTriggeringPolicy 69 | appender.rolling_slowlog.policies.time.interval = 1 70 | appender.rolling_slowlog.policies.time.modulate = true 71 | appender.rolling_slowlog.layout.type = PatternLayout 72 | appender.rolling_slowlog.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %.10000m%n 73 | appender.rolling_slowlog.policies.size.type = SizeBasedTriggeringPolicy 74 | appender.rolling_slowlog.policies.size.size = 100MB 75 | appender.rolling_slowlog.strategy.type = DefaultRolloverStrategy 76 | appender.rolling_slowlog.strategy.max = 30 77 | 78 | appender.json_rolling_slowlog.type = RollingFile 79 | appender.json_rolling_slowlog.name = json_rolling_slowlog 80 | appender.json_rolling_slowlog.fileName = ${sys:ls.logs}/logstash-slowlog-${sys:ls.log.format}.log 81 | appender.json_rolling_slowlog.filePattern = ${sys:ls.logs}/logstash-slowlog-${sys:ls.log.format}-%d{yyyy-MM-dd}-%i.log.gz 82 | appender.json_rolling_slowlog.policies.type = Policies 83 | appender.json_rolling_slowlog.policies.time.type = TimeBasedTriggeringPolicy 84 | appender.json_rolling_slowlog.policies.time.interval = 1 85 | appender.json_rolling_slowlog.policies.time.modulate = true 86 | appender.json_rolling_slowlog.layout.type = JSONLayout 87 | appender.json_rolling_slowlog.layout.compact = true 88 | appender.json_rolling_slowlog.layout.eventEol = true 89 | appender.json_rolling_slowlog.policies.size.type = SizeBasedTriggeringPolicy 90 | appender.json_rolling_slowlog.policies.size.size = 100MB 91 | appender.json_rolling_slowlog.strategy.type = DefaultRolloverStrategy 92 | appender.json_rolling_slowlog.strategy.max = 30 93 | 94 | logger.slowlog.name = slowlog 95 | logger.slowlog.level = trace 96 | logger.slowlog.appenderRef.console_slowlog.ref = ${sys:ls.log.format}_console_slowlog 97 | logger.slowlog.appenderRef.rolling_slowlog.ref = ${sys:ls.log.format}_rolling_slowlog 98 | logger.slowlog.additivity = false 99 | 100 | logger.licensereader.name = logstash.licensechecker.licensereader 101 | logger.licensereader.level = error 102 | -------------------------------------------------------------------------------- /elasticsearch/log4j2.properties: -------------------------------------------------------------------------------- 1 | status = error 2 | 3 | # log action execution errors for easier debugging 4 | logger.action.name = org.elasticsearch.action 5 | logger.action.level = debug 6 | 7 | appender.console.type = Console 8 | appender.console.name = console 9 | appender.console.layout.type = PatternLayout 10 | appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n 11 | 12 | appender.rolling.type = RollingFile 13 | appender.rolling.name = rolling 14 | appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log 15 | appender.rolling.layout.type = PatternLayout 16 | appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %.-10000m%n 17 | appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz 18 | appender.rolling.policies.type = Policies 19 | appender.rolling.policies.time.type = TimeBasedTriggeringPolicy 20 | appender.rolling.policies.time.interval = 1 21 | appender.rolling.policies.time.modulate = true 22 | appender.rolling.policies.size.type = SizeBasedTriggeringPolicy 23 | appender.rolling.policies.size.size = 128MB 24 | appender.rolling.strategy.type = DefaultRolloverStrategy 25 | appender.rolling.strategy.fileIndex = nomax 26 | appender.rolling.strategy.action.type = Delete 27 | appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path} 28 | appender.rolling.strategy.action.condition.type = IfFileName 29 | appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* 30 | appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize 31 | appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB 32 | 33 | rootLogger.level = info 34 | rootLogger.appenderRef.console.ref = console 35 | rootLogger.appenderRef.rolling.ref = rolling 36 | 37 | appender.deprecation_rolling.type = RollingFile 38 | appender.deprecation_rolling.name = deprecation_rolling 39 | appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log 40 | appender.deprecation_rolling.layout.type = PatternLayout 41 | appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %.-10000m%n 42 | appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.log.gz 43 | appender.deprecation_rolling.policies.type = Policies 44 | appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy 45 | appender.deprecation_rolling.policies.size.size = 1GB 46 | appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy 47 | appender.deprecation_rolling.strategy.max = 4 48 | 49 | logger.deprecation.name = org.elasticsearch.deprecation 50 | logger.deprecation.level = warn 51 | logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling 52 | logger.deprecation.additivity = false 53 | 54 | appender.index_search_slowlog_rolling.type = RollingFile 55 | appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling 56 | appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog.log 57 | appender.index_search_slowlog_rolling.layout.type = PatternLayout 58 | appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] [%node_name]%marker %.-10000m%n 59 | appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%i.log.gz 60 | appender.index_search_slowlog_rolling.policies.type = Policies 61 | appender.index_search_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy 62 | appender.index_search_slowlog_rolling.policies.size.size = 1GB 63 | appender.index_search_slowlog_rolling.strategy.type = DefaultRolloverStrategy 64 | appender.index_search_slowlog_rolling.strategy.max = 4 65 | 66 | logger.index_search_slowlog_rolling.name = index.search.slowlog 67 | logger.index_search_slowlog_rolling.level = trace 68 | logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling 69 | logger.index_search_slowlog_rolling.additivity = false 70 | 71 | appender.index_indexing_slowlog_rolling.type = RollingFile 72 | appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling 73 | appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog.log 74 | appender.index_indexing_slowlog_rolling.layout.type = PatternLayout 75 | appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] [%node_name]%marker %.-10000m%n 76 | appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%i.log.gz 77 | appender.index_indexing_slowlog_rolling.policies.type = Policies 78 | appender.index_indexing_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy 79 | appender.index_indexing_slowlog_rolling.policies.size.size = 1GB 80 | appender.index_indexing_slowlog_rolling.strategy.type = DefaultRolloverStrategy 81 | appender.index_indexing_slowlog_rolling.strategy.max = 4 82 | 83 | logger.index_indexing_slowlog.name = index.indexing.slowlog.index 84 | logger.index_indexing_slowlog.level = trace 85 | logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling 86 | logger.index_indexing_slowlog.additivity = false 87 | -------------------------------------------------------------------------------- /metricbeat/metricbeat.yml: -------------------------------------------------------------------------------- 1 | ########################## Metricbeat Configuration ########################### 2 | 3 | # This file is a full configuration example documenting all non-deprecated 4 | # options in comments. For a shorter configuration example, that contains only 5 | # the most common options, please see metricbeat.yml in the same directory. 6 | # 7 | # You can find the full configuration reference here: 8 | # https://www.elastic.co/guide/en/beats/metricbeat/index.html 9 | 10 | #============================ Config Reloading =============================== 11 | 12 | # Config reloading allows to dynamically load modules. Each file which is 13 | # monitored must contain one or multiple modules as a list. 14 | metricbeat.config.modules: 15 | 16 | # Glob pattern for configuration reloading 17 | path: ${path.config}/conf.d/*.yml 18 | 19 | # Period on which files under path should be checked for changes 20 | reload.period: 10s 21 | 22 | # Set to true to enable config reloading 23 | reload.enabled: false 24 | 25 | # Maximum amount of time to randomly delay the start of a metricset. Use 0 to 26 | # disable startup delay. 27 | metricbeat.max_start_delay: 10s 28 | 29 | 30 | 31 | #========================== Modules configuration ============================ 32 | metricbeat.modules: 33 | 34 | #------------------------------- System Module ------------------------------- 35 | - module: system 36 | metricsets: 37 | - cpu # CPU usage 38 | - load # CPU load averages 39 | - memory # Memory usage 40 | - network # Network IO 41 | - process # Per process metrics 42 | - process_summary # Process summary 43 | - uptime # System Uptime 44 | #- core # Per CPU core usage 45 | #- diskio # Disk IO 46 | #- filesystem # File system usage for each mountpoint 47 | #- fsstat # File system summary metrics 48 | #- raid # Raid 49 | #- socket # Sockets and connection info (linux only) 50 | enabled: true 51 | period: 10s 52 | processes: ['.*'] 53 | 54 | # Configure the metric types that are included by these metricsets. 55 | cpu.metrics: ["percentages"] # The other available options are normalized_percentages and ticks. 56 | core.metrics: ["percentages"] # The other available option is ticks. 57 | 58 | 59 | #-------------------------------- Kafka Module ------------------------------- 60 | - module: kafka 61 | metricsets: ["consumergroup", "partition"] 62 | period: 10s 63 | hosts: [":9092"] 64 | enabled: true 65 | 66 | #client_id: metricbeat 67 | #retries: 3 68 | #backoff: 250ms 69 | 70 | # List of Topics to query metadata for. If empty, all topics will be queried. 71 | #topics: [] 72 | 73 | 74 | #------------------------------- Kibana Module ------------------------------- 75 | - module: kibana 76 | metricsets: ["status"] 77 | period: 10s 78 | hosts: [":5601"] 79 | basepath: "" 80 | enabled: true 81 | 82 | #================================ Outputs ====================================== 83 | 84 | # Configure what output to use when sending the data collected by the beat. 85 | 86 | #-------------------------- Elasticsearch output ------------------------------- 87 | output.elasticsearch: 88 | # Boolean flag to enable or disable the output module. 89 | enabled: true 90 | 91 | # Array of hosts to connect to. 92 | # Scheme and port can be left out and will be set to the default (http and 9200) 93 | # In case you specify and additional path, the scheme is required: http://localhost:9200/path 94 | # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 95 | hosts: [":9200"] 96 | 97 | # Set gzip compression level. 98 | #compression_level: 0 99 | 100 | # Configure escaping html symbols in strings. 101 | #escape_html: true 102 | 103 | # Optional protocol and basic auth credentials. 104 | #protocol: "https" 105 | #username: "elastic" 106 | #password: "changeme" 107 | 108 | # Dictionary of HTTP parameters to pass within the url with index operations. 109 | #parameters: 110 | #param1: value1 111 | #param2: value2 112 | 113 | # Number of workers per Elasticsearch host. 114 | worker: 1 115 | 116 | # Optional index name. The default is "metricbeat" plus date 117 | # and generates [metricbeat-]YYYY.MM.DD keys. 118 | # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. 119 | index: "infrastructure-metrics" 120 | 121 | # Optional ingest node pipeline. By default no pipeline will be used. 122 | #pipeline: "" 123 | 124 | # Optional HTTP Path 125 | #path: "/elasticsearch" 126 | 127 | # Custom HTTP headers to add to each request 128 | #headers: 129 | # X-My-Header: Contents of the header 130 | 131 | # Proxy server url 132 | #proxy_url: http://proxy:3128 133 | 134 | # The number of times a particular Elasticsearch index operation is attempted. If 135 | # the indexing operation doesn't succeed after this many retries, the events are 136 | # dropped. The default is 3. 137 | max_retries: 3 138 | 139 | # The maximum number of events to bulk in a single Elasticsearch bulk API index request. 140 | # The default is 50. 141 | bulk_max_size: 50 142 | 143 | # The number of seconds to wait before trying to reconnect to Elasticsearch 144 | # after a network error. After waiting backoff.init seconds, the Beat 145 | # tries to reconnect. If the attempt fails, the backoff timer is increased 146 | # exponentially up to backoff.max. After a successful connection, the backoff 147 | # timer is reset. The default is 1s. 148 | backoff.init: 1s 149 | 150 | # The maximum number of seconds to wait before attempting to connect to 151 | # Elasticsearch after a network error. The default is 60s. 152 | backoff.max: 60s 153 | 154 | # Configure http request timeout before failing a request to Elasticsearch. 155 | timeout: 90 156 | 157 | 158 | #================================= Paths ====================================== 159 | 160 | # The home path for the metricbeat installation. This is the default base path 161 | # for all other path settings and for miscellaneous files that come with the 162 | # distribution (for example, the sample dashboards). 163 | # If not set by a CLI flag or in the configuration file, the default for the 164 | # home path is the location of the binary. 165 | #path.home: 166 | 167 | # The configuration path for the metricbeat installation. This is the default 168 | # base path for configuration files, including the main YAML configuration file 169 | # and the Elasticsearch template file. If not set by a CLI flag or in the 170 | # configuration file, the default for the configuration path is the home path. 171 | #path.config: ${path.home} 172 | 173 | # The data path for the metricbeat installation. This is the default base path 174 | # for all the files in which metricbeat needs to store its data. If not set by a 175 | # CLI flag or in the configuration file, the default for the data path is a data 176 | # subdirectory inside the home path. 177 | #path.data: ${path.home}/data 178 | 179 | # The logs path for a metricbeat installation. This is the default location for 180 | # the Beat's log files. If not set by a CLI flag or in the configuration file, 181 | # the default for the logs path is a logs subdirectory inside the home path. 182 | #path.logs: ${path.home}/logs 183 | 184 | 185 | #================================ Logging ====================================== 186 | # There are four options for the log output: file, stderr, syslog, eventlog 187 | # The file output is the default. 188 | 189 | # Sets log level. The default log level is info. 190 | # Available log levels are: error, warning, info, debug 191 | logging.level: info 192 | 193 | # Enable debug output for selected components. To enable all selectors use ["*"] 194 | # Other available selectors are "beat", "publish", "service" 195 | # Multiple selectors can be chained. 196 | #logging.selectors: [ ] 197 | 198 | # Send all logging output to syslog. The default is false. 199 | #logging.to_syslog: false 200 | 201 | # Send all logging output to Windows Event Logs. The default is false. 202 | #logging.to_eventlog: false 203 | 204 | # If enabled, metricbeat periodically logs its internal metrics that have changed 205 | # in the last period. For each metric that changed, the delta from the value at 206 | # the beginning of the period is logged. Also, the total values for 207 | # all non-zero internal metrics are logged on shutdown. The default is true. 208 | logging.metrics.enabled: true 209 | 210 | # The period after which to log the internal metrics. The default is 30s. 211 | logging.metrics.period: 30s 212 | 213 | # Logging to rotating files. Set logging.to_files to false to disable logging to 214 | # files. 215 | logging.to_files: true 216 | logging.files: 217 | # Configure the path where the logs are written. The default is the logs directory 218 | # under the home path (the binary location). 219 | #path: /var/log/metricbeat 220 | 221 | # The name of the files where the logs are written to. 222 | name: metricbeat 223 | 224 | # Configure log file size limit. If limit is reached, log file will be 225 | # automatically rotated 226 | #rotateeverybytes: 10485760 # = 10MB 227 | 228 | # Number of rotated log files to keep. Oldest files will be deleted first. 229 | keepfiles: 10 230 | 231 | # The permissions mask to apply when rotating log files. The default value is 0600. 232 | # Must be a valid Unix-style file permissions mask expressed in octal notation. 233 | #permissions: 0600 234 | 235 | # Enable log file rotation on time intervals in addition to size-based rotation. 236 | # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h 237 | # are boundary-aligned with minutes, hours, days, weeks, months, and years as 238 | # reported by the local system clock. All other intervals are calculated from the 239 | # unix epoch. Defaults to disabled. 240 | #interval: 0 241 | 242 | # Set to true to log messages in json format. 243 | #logging.json: false 244 | 245 | #================================ HTTP Endpoint ====================================== 246 | # Each beat can expose internal metrics through a HTTP endpoint. For security 247 | # reasons the endpoint is disabled by default. This feature is currently experimental. 248 | # Stats can be access through http://localhost:5066/stats . For pretty JSON output 249 | # append ?pretty to the URL. 250 | 251 | # Defines if the HTTP endpoint is enabled. 252 | #http.enabled: false 253 | 254 | # The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. 255 | #http.host: localhost 256 | 257 | # Port on which the HTTP endpoint will bind. Default is 5066. 258 | #http.port: 5066 259 | --------------------------------------------------------------------------------