├── files ├── aws-ecr-credential-helper │ └── config.json ├── cli53 │ ├── config │ └── update-route53-dns ├── logstash │ ├── pipelines.yml │ ├── conf.d │ │ ├── api.conf │ │ ├── aws-api-gateway.conf │ │ └── aws-lambda.conf │ └── logstash.yml ├── influxdb │ ├── influxdb.conf │ └── types.db ├── grafana │ └── grafana.ini ├── elastic-apm-server │ └── apm-server.yml └── collectd │ └── collectd.conf ├── .gitignore ├── scripts ├── configure-timezone.sh ├── clean-apt.sh ├── configure-locale.sh ├── install-common-tools.sh ├── update-apt.sh ├── install-chrony.sh ├── install-portainer-docker.sh ├── install-aws-ecr-credential-helper.sh ├── install-docker.sh ├── install-collectd.sh ├── install-oracle-java.sh ├── install-nodejs.sh ├── install-elastic-apm-server.sh ├── configure-hostname.sh ├── install-influxdb-docker.sh ├── install-grafana-docker.sh ├── install-logstash.sh └── install-kong-docker.sh ├── .circleci └── config.yml ├── LICENSE ├── README.md ├── ubuntu-amd64-nodejs.json ├── ubuntu-amd64-elastic-apm-server.json ├── ubuntu-amd64-kong.json ├── ubuntu-amd64-grafana.json ├── ubuntu-amd64-logstash.json ├── ubuntu-amd64-influxdb.json ├── ubuntu-amd64-docker.json └── ubuntu-amd64-portainer.json /files/aws-ecr-credential-helper/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "credsStore": "ecr-login" 3 | } 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Output files 2 | dist*/ 3 | 4 | # Cache objects 5 | packer_cache/ 6 | 7 | # For built boxes 8 | *.box 9 | -------------------------------------------------------------------------------- /files/cli53/config: -------------------------------------------------------------------------------- 1 | # AWS credential used by AWS CLI and cli53 2 | AWS_ACCESS_KEY_ID= 3 | AWS_SECRET_ACCESS_KEY= 4 | 5 | # Time to Live in seconds (default: 60) 6 | TTL=300 7 | -------------------------------------------------------------------------------- /scripts/configure-timezone.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euf -o pipefail 4 | 5 | TIMEZONE="Asia/Seoul" 6 | 7 | # Configure TimeZone 8 | ln -sf /usr/share/zoneinfo/$TIMEZONE /etc/localtime 9 | -------------------------------------------------------------------------------- /scripts/clean-apt.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euf -o pipefail 4 | 5 | apt-get autoremove -y 6 | apt-get clean -y 7 | 8 | # Add `sync` so Packer doesn't quit too early, before the large file is deleted. 9 | sync 10 | -------------------------------------------------------------------------------- /scripts/configure-locale.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euf -o pipefail 4 | 5 | LOCALE="ko_KR.UTF-8" 6 | 7 | # Install language pack 8 | apt-get install -y language-pack-ko 9 | 10 | # Configure locale 11 | locale-gen $LOCALE 12 | -------------------------------------------------------------------------------- /scripts/install-common-tools.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euf -o pipefail 4 | 5 | apt-get install -y \ 6 | build-essential python-software-properties \ 7 | software-properties-common apt-transport-https \ 8 | ca-certificates 9 | 10 | apt-get install -y htop jq awscli curl wget git 11 | -------------------------------------------------------------------------------- /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | jobs: 4 | build: 5 | docker: 6 | # Primary container 7 | - image: hashicorp/packer:light 8 | steps: 9 | - checkout 10 | - run: 11 | name: Validate Templates 12 | command: ls *.json | xargs -I{} packer validate {} 13 | -------------------------------------------------------------------------------- /scripts/update-apt.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euf -o pipefail 4 | 5 | # Set default answer for any questions a package might ask 6 | export DEBIAN_FRONTEND=noninteractive 7 | 8 | # Retrieve APT package sources 9 | apt-get update -qq 10 | 11 | # Install the newest versions of all installed packages 12 | apt-get upgrade -y 13 | -------------------------------------------------------------------------------- /scripts/install-chrony.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euf -o pipefail 4 | 5 | NTP_SERVER="169.254.169.123" 6 | 7 | # Install chrony 8 | apt-get install -y chrony 9 | 10 | # Configure NTP server 11 | echo "server $NTP_SERVER prefer iburst" | tee -a /etc/chrony/chrony.conf 12 | 13 | # Verify the time synchronization 14 | chronyc tracking 15 | -------------------------------------------------------------------------------- /scripts/install-portainer-docker.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euf -o pipefail 4 | 5 | PORTAINER_VERSION=1.16.4 6 | 7 | # Run Portainer automatically with Docker 8 | docker run -d -p 9000:9000 \ 9 | --restart unless-stopped \ 10 | -v /var/run/docker.sock:/var/run/docker.sock \ 11 | -v $PWD/portainer-data:/data \ 12 | --name=portainer \ 13 | portainer/portainer:$PORTAINER_VERSION 14 | -------------------------------------------------------------------------------- /files/logstash/pipelines.yml: -------------------------------------------------------------------------------- 1 | # This file is where you define your pipelines. You can define multiple. 2 | # For more information on multiple pipelines, see the documentation: 3 | # https://www.elastic.co/guide/en/logstash/current/multiple-pipelines.html 4 | 5 | - pipeline.id: api 6 | path.config: "/etc/logstash/conf.d/api.conf" 7 | - pipeline.id: aws-lambda 8 | path.config: "/etc/logstash/conf.d/aws-lambda.conf" 9 | - pipeline.id: aws-api-gateway 10 | path.config: "/etc/logstash/conf.d/aws-api-gateway.conf" 11 | -------------------------------------------------------------------------------- /scripts/install-aws-ecr-credential-helper.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euf -o pipefail 4 | 5 | # Clone the official repository 6 | git clone https://github.com/awslabs/amazon-ecr-credential-helper /tmp/ecr-helper 7 | 8 | # Build binary 9 | cd /tmp/ecr-helper && make docker 10 | 11 | # Move the executable binary to $PATH 12 | mv ./bin/local/docker-credential-ecr-login /usr/local/bin/ 13 | 14 | # Copy configuration files 15 | mkdir -p ~/.docker 16 | cp /tmp/files/aws-ecr-credential-helper/config.json ~/.docker/config.json 17 | -------------------------------------------------------------------------------- /scripts/install-docker.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euf -o pipefail 4 | 5 | DOCKER_USER=ubuntu 6 | 7 | # Add Docker’s official GPG key 8 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - 9 | 10 | # Set up the stable repository 11 | add-apt-repository \ 12 | "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ 13 | $(lsb_release -cs) stable" 14 | 15 | # Install Docker CE 16 | apt-get update && apt-get install -y docker-ce 17 | 18 | # Use Docker without root 19 | usermod -aG docker $DOCKER_USER 20 | -------------------------------------------------------------------------------- /scripts/install-collectd.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euf -o pipefail 4 | 5 | COLLECTD_VERSION=5.8 6 | 7 | # Add collectd’s official GPG key 8 | apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 3994D24FB8543576 9 | 10 | # Set up the latest repository 11 | add-apt-repository \ 12 | "deb http://pkg.ci.collectd.org/deb \ 13 | $(lsb_release -c -s) collectd-$COLLECTD_VERSION" 14 | 15 | # Install collectd 16 | apt-get update -qq && apt-get install -y collectd 17 | 18 | # Copy configuration files 19 | cp /tmp/files/collectd/collectd.conf /etc/collectd/collectd.conf 20 | -------------------------------------------------------------------------------- /scripts/install-oracle-java.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euf -o pipefail 4 | 5 | JAVA_VERSION=8 6 | 7 | # Add Oracle JAVA PPA 8 | sudo add-apt-repository -y ppa:webupd8team/java 9 | 10 | # Accept the Oracle License 11 | echo debconf shared/accepted-oracle-license-v1-1 select true | debconf-set-selections 12 | echo debconf shared/accepted-oracle-license-v1-1 seen true | debconf-set-selections 13 | 14 | # Set default answer for any questions a package might ask 15 | export DEBIAN_FRONTEND=noninteractive 16 | 17 | # Install Oracle JAVA 18 | apt-get update && apt-get install -y oracle-java$JAVA_VERSION-installer 19 | -------------------------------------------------------------------------------- /scripts/install-nodejs.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euf -o pipefail 4 | 5 | NODEJS_VERSION=8 6 | 7 | # Set up the latest Node.js repository and add official GPG key 8 | curl -sL "https://deb.nodesource.com/setup_$NODEJS_VERSION.x" | bash - 9 | 10 | # Set up the latest Yarn repository and add official GPG key 11 | add-apt-repository \ 12 | "deb https://dl.yarnpkg.com/debian/ stable main" 13 | curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - 14 | 15 | # Install Node.js and Yarn 16 | apt-get update -qq && apt-get install -y nodejs yarn 17 | 18 | # Upgrade to the latest of NPM 19 | npm install -g npm 20 | 21 | # Install PM2 process manager 22 | npm install -g pm2 23 | -------------------------------------------------------------------------------- /scripts/install-elastic-apm-server.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euf -o pipefail 4 | 5 | # Add Elastic’s official GPG key 6 | wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add - 7 | 8 | # Set up the stable repository 9 | # Do not use add-apt-repository as it will add a deb-src entry as well 10 | echo "deb https://artifacts.elastic.co/packages/6.x/apt stable main" \ 11 | | sudo tee -a /etc/apt/sources.list.d/elastic-6.x.list 12 | 13 | # Install Logstash 14 | apt-get update && apt-get install -y apm-server 15 | 16 | # Copy configuration files 17 | cp /tmp/files/elastic-apm-server/apm-server.yml /etc/apm-server/apm-server.yml 18 | 19 | # Enable as service daemon 20 | systemctl daemon-reload && systemctl enable apm-server.service 21 | -------------------------------------------------------------------------------- /scripts/configure-hostname.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euf -o pipefail 4 | 5 | CLI53_VERSION=0.8.12 6 | CLI53_ARCH=linux-amd64 7 | 8 | # Install cli53 binary 9 | wget https://github.com/barnybug/cli53/releases/download/$CLI53_VERSION/cli53-$CLI53_ARCH -q -O /usr/local/bin/cli53 10 | chmod 755 /usr/local/bin/cli53 11 | 12 | # Setting configuration file 13 | mkdir -p /etc/route53 14 | mv /tmp/files/cli53/config /etc/route53/config 15 | chmod 700 /etc/route53 16 | chmod 600 /etc/route53/config 17 | 18 | # Upload update-route53-dns 19 | mv /tmp/files/cli53/update-route53-dns /usr/sbin/ 20 | 21 | # Enable as dhclient exit hook 22 | ln -s /usr/sbin/update-route53-dns /etc/dhcp/dhclient-exit-hooks.d/ 23 | ln -s /usr/sbin/update-route53-dns /var/lib/cloud/scripts/per-boot/ 24 | -------------------------------------------------------------------------------- /scripts/install-influxdb-docker.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euf -o pipefail 4 | 5 | INFLUXDB_VERSION=1.4.3 6 | 7 | # Increase OS UDP traffic limit to 25MB 8 | echo "net.core.rmem_max=26214400" | tee -a /etc/sysctl.conf 9 | echo "net.core.rmem_default=26214400" | tee -a /etc/sysctl.conf 10 | 11 | # Copy configuration files 12 | cp /tmp/files/influxdb/influxdb.conf $PWD 13 | cp /tmp/files/influxdb/types.db $PWD 14 | 15 | # Run InfluxDB automatically with Docker 16 | docker run -d -p 8086:8086 -p 25826:25826/udp \ 17 | --restart unless-stopped \ 18 | -v /var/lib/influxdb:/var/lib/influxdb \ 19 | -v $PWD/influxdb.conf:/etc/influxdb/influxdb.conf:ro \ 20 | -v $PWD/types.db:/usr/share/collectd/types.db:ro \ 21 | --name=influxdb \ 22 | influxdb:$INFLUXDB_VERSION-alpine 23 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Byungjin Park 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /scripts/install-grafana-docker.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euf -o pipefail 4 | 5 | GRAFANA_VERSION=4.6.3 6 | # Plugins to install as default 7 | GRAFANA_PLUGINS="raintank-worldping-app,cloudflare-app,grafana-worldmap-panel," 8 | GRAFANA_PLUGINS+="grafana-clock-panel,grafana-piechart-panel,btplc-alarm-box-panel," 9 | GRAFANA_PLUGINS+="novalabs-annotations-panel,digiapulssi-breadcrumb-panel," 10 | GRAFANA_PLUGINS+="digrich-bubblechart-panel,neocat-cal-heatmap-panel," 11 | GRAFANA_PLUGINS+="petrslavotinek-carpetplot-panel,briangann-datatable-panel," 12 | GRAFANA_PLUGINS+="jdbranham-diagram-panel,natel-discrete-panel," 13 | GRAFANA_PLUGINS+="mtanda-heatmap-epoch-panel,mtanda-histogram-panel," 14 | GRAFANA_PLUGINS+="natel-plotly-panel,vonage-status-panel" 15 | 16 | # Copy configuration files 17 | cp /tmp/files/grafana/grafana.ini $PWD 18 | 19 | # Run Grafana automatically with Docker 20 | docker run -d -p 3000:3000 \ 21 | --restart unless-stopped \ 22 | -v /var/lib/grafana:/var/lib/grafana \ 23 | -v $PWD/grafana.ini:/etc/grafana/grafana.ini \ 24 | -e "GF_INSTALL_PLUGINS=$GRAFANA_PLUGINS"\ 25 | --name=grafana \ 26 | grafana/grafana:$GRAFANA_VERSION 27 | -------------------------------------------------------------------------------- /scripts/install-logstash.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euf -o pipefail 4 | 5 | # Add Elastic’s official GPG key 6 | wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add - 7 | 8 | # Set up the stable repository 9 | # Do not use add-apt-repository as it will add a deb-src entry as well 10 | echo "deb https://artifacts.elastic.co/packages/6.x/apt stable main" \ 11 | | sudo tee -a /etc/apt/sources.list.d/elastic-6.x.list 12 | 13 | # Install Logstash 14 | apt-get update && apt-get install -y logstash 15 | 16 | # Install Logstash X-Pack and other plugins 17 | cd /usr/share/logstash/bin && ./logstash-plugin install x-pack 18 | cd /usr/share/logstash/bin && ./logstash-plugin install logstash-input-cloudwatch_logs 19 | 20 | # Update all plugins 21 | cd /usr/share/logstash/bin && ./logstash-plugin update 22 | 23 | # Copy configuration files 24 | cp /tmp/files/logstash/logstash.yml /etc/logstash/logstash.yml 25 | cp /tmp/files/logstash/pipelines.yml /etc/logstash/pipelines.yml 26 | cp -a /tmp/files/logstash/conf.d/. /etc/logstash/conf.d 27 | 28 | # Enable as service daemon 29 | systemctl daemon-reload && systemctl enable logstash.service 30 | -------------------------------------------------------------------------------- /files/logstash/conf.d/api.conf: -------------------------------------------------------------------------------- 1 | # Pipeline for API server 2 | 3 | input { 4 | beats { 5 | # Filebeat 6 | port => 5044 7 | type => "api" 8 | } 9 | } 10 | 11 | 12 | filter { 13 | date { 14 | match => [ "time", "ISO8601" ] 15 | remove_field => [ "time" ] 16 | } 17 | 18 | mutate { 19 | remove_field => [ "v", "pid" ] 20 | } 21 | 22 | if [event] == "request" { 23 | geoip { 24 | source => "[req][ip]" 25 | target => "geoip" 26 | tag_on_failure => [ "_geoip_lookup_failure" ] 27 | } 28 | useragent { 29 | source => "[req][headers][user-agent]" 30 | target => "agent" 31 | } 32 | } 33 | } 34 | 35 | 36 | output { 37 | elasticsearch { 38 | hosts => [ "elasticsearch:9200" ] 39 | index => "log-%{env}-%{[@metadata][beat]}-%{+YYYY.MM.dd}" 40 | } 41 | 42 | if [env] == "production" { 43 | s3 { 44 | # access_key_id => "" 45 | # secret_access_key => "" 46 | region => "ap-northeast-2" 47 | bucket => "log.app.my-company" 48 | prefix => "api-%{env}/" 49 | canned_acl => "private" 50 | encoding => "none" 51 | restore => true 52 | rotation_strategy => "size_and_time" 53 | size_file => 5242880 54 | time_file => 60 55 | temporary_directory => "/tmp/logstash" 56 | codec => "json_lines" 57 | } 58 | } 59 | 60 | # For Debugging 61 | # stdout { 62 | # codec => rubydebug { 63 | # metadata => true 64 | # } 65 | # } 66 | } 67 | -------------------------------------------------------------------------------- /scripts/install-kong-docker.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euf -o pipefail 4 | 5 | KONG_VERSION=0.14.1 6 | TRUSTED_IPS="10.0.0.0/16" 7 | 8 | # Create a Docker network for Kong 9 | docker network create kong-net 10 | 11 | # Run Postgres DB for Kong 12 | docker run -d --name kong-database \ 13 | --restart unless-stopped \ 14 | --network=kong-net \ 15 | -p 5432:5432 \ 16 | -e "POSTGRES_USER=kong" \ 17 | -e "POSTGRES_DB=kong" \ 18 | postgres:9.6 19 | # Run DB migration jobs 20 | docker run --rm \ 21 | --network=kong-net \ 22 | -e "KONG_DATABASE=postgres" \ 23 | -e "KONG_PG_HOST=kong-database" \ 24 | -e "KONG_CASSANDRA_CONTACT_POINTS=kong-database" \ 25 | kong:latest kong migrations up 26 | # Run Kong automatically with Docker 27 | docker run -d --name kong \ 28 | --restart unless-stopped \ 29 | --network=kong-net \ 30 | -e "KONG_DATABASE=postgres" \ 31 | -e "KONG_PG_HOST=kong-database" \ 32 | -e "KONG_CASSANDRA_CONTACT_POINTS=kong-database" \ 33 | -e "KONG_PROXY_ACCESS_LOG=/dev/stdout" \ 34 | -e "KONG_ADMIN_ACCESS_LOG=/dev/stdout" \ 35 | -e "KONG_PROXY_ERROR_LOG=/dev/stderr" \ 36 | -e "KONG_ADMIN_ERROR_LOG=/dev/stderr" \ 37 | -e "KONG_TRUSTED_IPS=${TRUSTED_IPS}" \ 38 | -e "KONG_ANONYMOUS_REPORTS=off" \ 39 | -e "KONG_HEADERS=off" \ 40 | -e "KONG_ERROR_DEFAULT_TYPE=application/json" \ 41 | -e "KONG_PROXY_LISTEN=0.0.0.0:8000, 0.0.0.0:8443 http2 ssl" \ 42 | -e "KONG_ADMIN_LISTEN=0.0.0.0:8001, 0.0.0.0:8444 ssl" \ 43 | -p 8000:8000 \ 44 | -p 8443:8443 \ 45 | -p 8001:8001 \ 46 | -p 8444:8444 \ 47 | kong:${KONG_VERSION}-alpine 48 | -------------------------------------------------------------------------------- /files/cli53/update-route53-dns: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Make sure only root can run our script 4 | if [ "$(id -u)" != "0" ]; then 5 | echo "This script must be run as root" 1>&2 6 | exit 1 7 | fi 8 | 9 | # Load configuration 10 | . /etc/route53/config 11 | 12 | # Export access key ID and secret for cli53 13 | # export AWS_ACCESS_KEY_ID 14 | # export AWS_SECRET_ACCESS_KEY 15 | 16 | REGION=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document|jq -r '.region') 17 | 18 | INSTANCE_ID=$(ec2metadata --instance-id) 19 | INSTANCE_NAME=$(aws ec2 describe-tags --region $REGION --output=text --filters "Name=resource-id,Values=$INSTANCE_ID" "Name=key,Values=Name" | cut -f5) 20 | PRIVATE_IP=$(ec2metadata --local-ipv4) 21 | PRIVATE_DOMAIN=$(aws ec2 describe-tags --region $REGION --output=text --filters "Name=resource-id,Values=$INSTANCE_ID" "Name=key,Values=PrivateDomain" | cut -f5) 22 | 23 | if [ -z "$INSTANCE_NAME" ]; then 24 | echo "This script needs \`Name\` tag" 1>&2 25 | exit 0 26 | fi 27 | 28 | logger "Setting hostname to $INSTANCE_NAME" 29 | 30 | # Set also the hostname to the running instance 31 | hostnamectl set-hostname $INSTANCE_NAME 32 | 33 | if [ -z "$PRIVATE_DOMAIN" ]; then 34 | echo "\`PrivateDomain\` tag is needed to set the private domain" 1>&2 35 | exit 0 36 | fi 37 | 38 | logger "Setting Route53 DNS A record $PRIVATE_IP to $HOSTNAME" 39 | 40 | # Update a new A record on Route 53 41 | /usr/local/bin/cli53 instances \ 42 | --region $REGION \ 43 | --internal \ 44 | --a-record \ 45 | --ttl ${TTL:-300} \ 46 | --match "^$INSTANCE_NAME$" \ 47 | $PRIVATE_DOMAIN 48 | -------------------------------------------------------------------------------- /files/logstash/conf.d/aws-api-gateway.conf: -------------------------------------------------------------------------------- 1 | # Pipeline for AWS API Gateway 2 | 3 | input { 4 | cloudwatch_logs { 5 | log_group => "/aws/apigateway/" 6 | log_group_prefix => true 7 | start_position => "end" 8 | interval => 10 9 | region => "ap-northeast-2" 10 | # access_key_id => "" 11 | # secret_access_key => "" 12 | type => "aws-api-gateway" 13 | } 14 | } 15 | 16 | 17 | filter { 18 | grok { 19 | match => { 20 | "[cloudwatch_logs][log_group]" => [ 21 | "^/aws/apigateway/%{GREEDYDATA:name}$" 22 | ] 23 | } 24 | break_on_match => false 25 | tag_on_failure => [] 26 | } 27 | 28 | json { 29 | source => "message" 30 | skip_on_invalid_json => true 31 | remove_field => [ "message" ] 32 | } 33 | 34 | if [apigateway][stage] =~ /prod/ { 35 | mutate { 36 | add_field => { "env" => "production" } 37 | } 38 | } else if [apigateway][stage] =~ /dev/ { 39 | mutate { 40 | add_field => { "env" => "development" } 41 | } 42 | } else { 43 | drop {} 44 | } 45 | 46 | mutate { 47 | add_field => { "event" => "access" } 48 | convert => { 49 | "[req][time]" => "integer" 50 | "[res][length]" => "integer" 51 | "[res][statusCode]" => "integer" 52 | } 53 | } 54 | 55 | geoip { 56 | source => "[req][ip]" 57 | target => "geoip" 58 | tag_on_failure => [ "_geoip_lookup_failure" ] 59 | } 60 | 61 | date { 62 | match => [ "[req][time]", "UNIX_MS" ] 63 | target => "[req][time]" 64 | } 65 | 66 | useragent { 67 | source => "[req][userAgent]" 68 | target => "agent" 69 | } 70 | } 71 | 72 | 73 | output { 74 | elasticsearch { 75 | hosts => [ "elasticsearch:9200" ] 76 | index => "log-%{env}-apigateway-%{+YYYY.MM.dd}" 77 | } 78 | 79 | # For Debugging 80 | # stdout { 81 | # codec => rubydebug 82 | # } 83 | } 84 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 | 3 | packer-templates 4 | 5 |

Packer Templates

6 |
7 | 8 |

9 | Packer Templates for AWS AMI and Vagrant Box 10 |

11 | 12 |
13 | 14 | CircleCI 15 | 16 | 17 | MIT License 18 | 19 | 20 | Open Source Love 21 | 22 |
23 | 24 |
25 | 26 | **Packer Templates** include [Packer](https://www.packer.io) templates and installation scripts which can be used to generate AWS AMI and [Vagrant](https://www.vagrantup.com/) Box. 27 | 28 | 29 | ## Usage 30 | 31 | To be updated. 32 | 33 | ```bash 34 | $ git clone https://github.com/posquit0/packer-templates 35 | $ cd packer-templates 36 | 37 | # Usage: packer build ${packer-template-file} 38 | $ packer build ubuntu-amd64-grafana.json 39 | ``` 40 | 41 | 42 | ## Contributing 43 | 44 | This project follows the [**Contributor Covenant**](http://contributor-covenant.org/version/1/4/) Code of Conduct. 45 | 46 | #### Bug Reports & Feature Requests 47 | 48 | Please use the [issue tracker](https://github.com/posquit0/packer-templates/issues) to report any bugs or ask feature requests. 49 | 50 | 51 | ## License 52 | 53 | Provided under the terms of the [MIT License](https://github.com/posquit0/packer-templates/blob/master/LICENSE). 54 | 55 | Copyright © 2017, [Byungjin Park](http://www.posquit0.com). 56 | -------------------------------------------------------------------------------- /ubuntu-amd64-nodejs.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "name": "ubuntu-amd64-nodejs", 4 | "version": "{{isotime \"2006-01-02-1504\"}}", 5 | "aws_region": "ap-northeast-2", 6 | "aws_access_key": "", 7 | "aws_secret_key": "", 8 | "aws_profile": "{{env `AWS_PROFILE`}}", 9 | "vpc_id": "", 10 | "subnet_id": "", 11 | "security_group_id": "" 12 | }, 13 | "builders": [{ 14 | "type": "amazon-ebs", 15 | "access_key": "{{user `aws_access_key`}}", 16 | "secret_key": "{{user `aws_secret_key`}}", 17 | "profile": "{{ user `aws_profile`}}", 18 | "region": "{{user `aws_region`}}", 19 | "instance_type": "t2.micro", 20 | "source_ami_filter": { 21 | "filters": { 22 | "virtualization-type": "hvm", 23 | "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*", 24 | "root-device-type": "ebs" 25 | }, 26 | "owners": ["099720109477"], 27 | "most_recent": true 28 | }, 29 | "vpc_id": "{{user `vpc_id`}}", 30 | "subnet_id": "{{user `subnet_id`}}", 31 | "security_group_id": "{{user `security_group_id`}}", 32 | "ssh_username": "ubuntu", 33 | "ami_name": "{{user `name`}}-{{user `version`}}", 34 | "ami_description": "AMI for {{user `name`}}", 35 | "tags": { 36 | "Name": "{{user `name`}}", 37 | "Version": "{{user `version`}}" 38 | } 39 | }], 40 | "provisioners": [{ 41 | "type": "file", 42 | "source": "files", 43 | "destination": "/tmp" 44 | }, { 45 | "type": "shell", 46 | "scripts": [ 47 | "scripts/update-apt.sh", 48 | "scripts/install-common-tools.sh", 49 | "scripts/install-chrony.sh", 50 | "scripts/configure-timezone.sh", 51 | "scripts/configure-locale.sh", 52 | "scripts/configure-hostname.sh", 53 | "scripts/install-collectd.sh", 54 | "scripts/install-nodejs.sh", 55 | "scripts/clean-apt.sh" 56 | ], 57 | "execute_command": "sudo -S sh -c '{{ .Vars }} {{ .Path }}'" 58 | }], 59 | "post-processors": [{ 60 | "type": "vagrant", 61 | "compression_level": 6, 62 | "output": "dist/{{user `name`}}-{{.Provider}}.box", 63 | "keep_input_artifact": true 64 | }, { 65 | "type": "checksum", 66 | "checksum_types": ["md5", "sha256"], 67 | "output": "dist/{{user `name`}}-{{.BuilderType}}.{{.ChecksumType}}" 68 | }, { 69 | "type": "manifest", 70 | "output": "dist/{{user `name`}}-manifest.json", 71 | "strip_path": false 72 | }] 73 | } 74 | -------------------------------------------------------------------------------- /ubuntu-amd64-elastic-apm-server.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "name": "ubuntu-amd64-elastic-apm-server", 4 | "version": "{{isotime \"2006-01-02-1504\"}}", 5 | "aws_region": "ap-northeast-2", 6 | "aws_access_key": "", 7 | "aws_secret_key": "", 8 | "aws_profile": "{{env `AWS_PROFILE`}}", 9 | "vpc_id": "", 10 | "subnet_id": "", 11 | "security_group_id": "" 12 | }, 13 | "builders": [{ 14 | "type": "amazon-ebs", 15 | "access_key": "{{user `aws_access_key`}}", 16 | "secret_key": "{{user `aws_secret_key`}}", 17 | "profile": "{{ user `aws_profile`}}", 18 | "region": "{{user `aws_region`}}", 19 | "instance_type": "t2.micro", 20 | "source_ami_filter": { 21 | "filters": { 22 | "virtualization-type": "hvm", 23 | "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*", 24 | "root-device-type": "ebs" 25 | }, 26 | "owners": ["099720109477"], 27 | "most_recent": true 28 | }, 29 | "vpc_id": "{{user `vpc_id`}}", 30 | "subnet_id": "{{user `subnet_id`}}", 31 | "security_group_id": "{{user `security_group_id`}}", 32 | "ssh_username": "ubuntu", 33 | "ami_name": "{{user `name`}}-{{user `version`}}", 34 | "ami_description": "AMI for {{user `name`}}", 35 | "tags": { 36 | "Name": "{{user `name`}}", 37 | "Version": "{{user `version`}}" 38 | } 39 | }], 40 | "provisioners": [{ 41 | "type": "file", 42 | "source": "files", 43 | "destination": "/tmp" 44 | }, { 45 | "type": "shell", 46 | "scripts": [ 47 | "scripts/update-apt.sh", 48 | "scripts/install-common-tools.sh", 49 | "scripts/install-chrony.sh", 50 | "scripts/configure-timezone.sh", 51 | "scripts/configure-locale.sh", 52 | "scripts/configure-hostname.sh", 53 | "scripts/install-collectd.sh", 54 | "scripts/install-elastic-apm-server.sh", 55 | "scripts/clean-apt.sh" 56 | ], 57 | "execute_command": "sudo -S sh -c '{{ .Vars }} {{ .Path }}'" 58 | }], 59 | "post-processors": [{ 60 | "type": "vagrant", 61 | "compression_level": 6, 62 | "output": "dist/{{user `name`}}-{{.Provider}}.box", 63 | "keep_input_artifact": true 64 | }, { 65 | "type": "checksum", 66 | "checksum_types": ["md5", "sha256"], 67 | "output": "dist/{{user `name`}}-{{.BuilderType}}.{{.ChecksumType}}" 68 | }, { 69 | "type": "manifest", 70 | "output": "dist/{{user `name`}}-manifest.json", 71 | "strip_path": false 72 | }] 73 | } 74 | -------------------------------------------------------------------------------- /ubuntu-amd64-kong.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "name": "ubuntu-amd64-kong", 4 | "version": "{{isotime \"2006-01-02-1504\"}}", 5 | "aws_region": "ap-northeast-2", 6 | "aws_access_key": "", 7 | "aws_secret_key": "", 8 | "aws_profile": "{{env `AWS_PROFILE`}}", 9 | "vpc_id": "", 10 | "subnet_id": "", 11 | "security_group_id": "" 12 | }, 13 | "builders": [{ 14 | "type": "amazon-ebs", 15 | "access_key": "{{user `aws_access_key`}}", 16 | "secret_key": "{{user `aws_secret_key`}}", 17 | "profile": "{{ user `aws_profile`}}", 18 | "region": "{{user `aws_region`}}", 19 | "instance_type": "t2.micro", 20 | "source_ami_filter": { 21 | "filters": { 22 | "virtualization-type": "hvm", 23 | "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*", 24 | "root-device-type": "ebs" 25 | }, 26 | "owners": ["099720109477"], 27 | "most_recent": true 28 | }, 29 | "vpc_id": "{{user `vpc_id`}}", 30 | "subnet_id": "{{user `subnet_id`}}", 31 | "security_group_id": "{{user `security_group_id`}}", 32 | "ssh_username": "ubuntu", 33 | "ami_name": "{{user `name`}}-{{user `version`}}", 34 | "ami_description": "AMI for {{user `name`}}", 35 | "tags": { 36 | "Name": "{{user `name`}}", 37 | "Version": "{{user `version`}}" 38 | } 39 | }], 40 | "provisioners": [{ 41 | "type": "file", 42 | "source": "files", 43 | "destination": "/tmp" 44 | }, { 45 | "type": "shell", 46 | "scripts": [ 47 | "scripts/update-apt.sh", 48 | "scripts/install-common-tools.sh", 49 | "scripts/install-chrony.sh", 50 | "scripts/configure-timezone.sh", 51 | "scripts/configure-locale.sh", 52 | "scripts/configure-hostname.sh", 53 | "scripts/install-collectd.sh", 54 | "scripts/install-docker.sh", 55 | "scripts/install-kong-docker.sh", 56 | "scripts/clean-apt.sh" 57 | ], 58 | "execute_command": "sudo -S sh -c '{{ .Vars }} {{ .Path }}'" 59 | }], 60 | "post-processors": [{ 61 | "type": "vagrant", 62 | "compression_level": 6, 63 | "output": "dist/{{user `name`}}-{{.Provider}}.box", 64 | "keep_input_artifact": true 65 | }, { 66 | "type": "checksum", 67 | "checksum_types": ["md5", "sha256"], 68 | "output": "dist/{{user `name`}}-{{.BuilderType}}.{{.ChecksumType}}" 69 | }, { 70 | "type": "manifest", 71 | "output": "dist/{{user `name`}}-manifest.json", 72 | "strip_path": false 73 | }] 74 | } 75 | -------------------------------------------------------------------------------- /ubuntu-amd64-grafana.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "name": "ubuntu-amd64-grafana", 4 | "version": "{{isotime \"2006-01-02-1504\"}}", 5 | "aws_region": "ap-northeast-2", 6 | "aws_access_key": "", 7 | "aws_secret_key": "", 8 | "aws_profile": "{{env `AWS_PROFILE`}}", 9 | "vpc_id": "", 10 | "subnet_id": "", 11 | "security_group_id": "" 12 | }, 13 | "builders": [{ 14 | "type": "amazon-ebs", 15 | "access_key": "{{user `aws_access_key`}}", 16 | "secret_key": "{{user `aws_secret_key`}}", 17 | "profile": "{{ user `aws_profile`}}", 18 | "region": "{{user `aws_region`}}", 19 | "instance_type": "t2.micro", 20 | "source_ami_filter": { 21 | "filters": { 22 | "virtualization-type": "hvm", 23 | "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*", 24 | "root-device-type": "ebs" 25 | }, 26 | "owners": ["099720109477"], 27 | "most_recent": true 28 | }, 29 | "vpc_id": "{{user `vpc_id`}}", 30 | "subnet_id": "{{user `subnet_id`}}", 31 | "security_group_id": "{{user `security_group_id`}}", 32 | "ssh_username": "ubuntu", 33 | "ami_name": "{{user `name`}}-{{user `version`}}", 34 | "ami_description": "AMI for {{user `name`}}", 35 | "tags": { 36 | "Name": "{{user `name`}}", 37 | "Version": "{{user `version`}}" 38 | } 39 | }], 40 | "provisioners": [{ 41 | "type": "file", 42 | "source": "files", 43 | "destination": "/tmp" 44 | }, { 45 | "type": "shell", 46 | "scripts": [ 47 | "scripts/update-apt.sh", 48 | "scripts/install-common-tools.sh", 49 | "scripts/install-chrony.sh", 50 | "scripts/configure-timezone.sh", 51 | "scripts/configure-locale.sh", 52 | "scripts/configure-hostname.sh", 53 | "scripts/install-collectd.sh", 54 | "scripts/install-docker.sh", 55 | "scripts/install-grafana-docker.sh", 56 | "scripts/clean-apt.sh" 57 | ], 58 | "execute_command": "sudo -S sh -c '{{ .Vars }} {{ .Path }}'" 59 | }], 60 | "post-processors": [{ 61 | "type": "vagrant", 62 | "compression_level": 6, 63 | "output": "dist/{{user `name`}}-{{.Provider}}.box", 64 | "keep_input_artifact": true 65 | }, { 66 | "type": "checksum", 67 | "checksum_types": ["md5", "sha256"], 68 | "output": "dist/{{user `name`}}-{{.BuilderType}}.{{.ChecksumType}}" 69 | }, { 70 | "type": "manifest", 71 | "output": "dist/{{user `name`}}-manifest.json", 72 | "strip_path": false 73 | }] 74 | } 75 | -------------------------------------------------------------------------------- /ubuntu-amd64-logstash.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "name": "ubuntu-amd64-logstash", 4 | "version": "{{isotime \"2006-01-02-1504\"}}", 5 | "aws_region": "ap-northeast-2", 6 | "aws_access_key": "", 7 | "aws_secret_key": "", 8 | "aws_profile": "{{env `AWS_PROFILE`}}", 9 | "vpc_id": "", 10 | "subnet_id": "", 11 | "security_group_id": "" 12 | }, 13 | "builders": [{ 14 | "type": "amazon-ebs", 15 | "access_key": "{{user `aws_access_key`}}", 16 | "secret_key": "{{user `aws_secret_key`}}", 17 | "profile": "{{ user `aws_profile`}}", 18 | "region": "{{user `aws_region`}}", 19 | "instance_type": "t2.micro", 20 | "source_ami_filter": { 21 | "filters": { 22 | "virtualization-type": "hvm", 23 | "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*", 24 | "root-device-type": "ebs" 25 | }, 26 | "owners": ["099720109477"], 27 | "most_recent": true 28 | }, 29 | "vpc_id": "{{user `vpc_id`}}", 30 | "subnet_id": "{{user `subnet_id`}}", 31 | "security_group_id": "{{user `security_group_id`}}", 32 | "ssh_username": "ubuntu", 33 | "ami_name": "{{user `name`}}-{{user `version`}}", 34 | "ami_description": "AMI for {{user `name`}}", 35 | "tags": { 36 | "Name": "{{user `name`}}", 37 | "Version": "{{user `version`}}" 38 | } 39 | }], 40 | "provisioners": [{ 41 | "type": "file", 42 | "source": "files", 43 | "destination": "/tmp" 44 | }, { 45 | "type": "shell", 46 | "scripts": [ 47 | "scripts/update-apt.sh", 48 | "scripts/install-common-tools.sh", 49 | "scripts/install-chrony.sh", 50 | "scripts/configure-timezone.sh", 51 | "scripts/configure-locale.sh", 52 | "scripts/configure-hostname.sh", 53 | "scripts/install-collectd.sh", 54 | "scripts/install-oracle-java.sh", 55 | "scripts/install-logstash.sh", 56 | "scripts/clean-apt.sh" 57 | ], 58 | "execute_command": "sudo -S sh -c '{{ .Vars }} {{ .Path }}'" 59 | }], 60 | "post-processors": [{ 61 | "type": "vagrant", 62 | "compression_level": 6, 63 | "output": "dist/{{user `name`}}-{{.Provider}}.box", 64 | "keep_input_artifact": true 65 | }, { 66 | "type": "checksum", 67 | "checksum_types": ["md5", "sha256"], 68 | "output": "dist/{{user `name`}}-{{.BuilderType}}.{{.ChecksumType}}" 69 | }, { 70 | "type": "manifest", 71 | "output": "dist/{{user `name`}}-manifest.json", 72 | "strip_path": false 73 | }] 74 | } 75 | -------------------------------------------------------------------------------- /ubuntu-amd64-influxdb.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "name": "ubuntu-amd64-influxdb", 4 | "version": "{{isotime \"2006-01-02-1504\"}}", 5 | "aws_region": "ap-northeast-2", 6 | "aws_access_key": "", 7 | "aws_secret_key": "", 8 | "aws_profile": "{{env `AWS_PROFILE`}}", 9 | "vpc_id": "", 10 | "subnet_id": "", 11 | "security_group_id": "" 12 | }, 13 | "builders": [{ 14 | "type": "amazon-ebs", 15 | "access_key": "{{user `aws_access_key`}}", 16 | "secret_key": "{{user `aws_secret_key`}}", 17 | "profile": "{{ user `aws_profile`}}", 18 | "region": "{{user `aws_region`}}", 19 | "instance_type": "t2.micro", 20 | "source_ami_filter": { 21 | "filters": { 22 | "virtualization-type": "hvm", 23 | "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*", 24 | "root-device-type": "ebs" 25 | }, 26 | "owners": ["099720109477"], 27 | "most_recent": true 28 | }, 29 | "vpc_id": "{{user `vpc_id`}}", 30 | "subnet_id": "{{user `subnet_id`}}", 31 | "security_group_id": "{{user `security_group_id`}}", 32 | "ssh_username": "ubuntu", 33 | "ami_name": "{{user `name`}}-{{user `version`}}", 34 | "ami_description": "AMI for {{user `name`}}", 35 | "tags": { 36 | "Name": "{{user `name`}}", 37 | "Version": "{{user `version`}}" 38 | } 39 | }], 40 | "provisioners": [{ 41 | "type": "file", 42 | "source": "files", 43 | "destination": "/tmp" 44 | }, { 45 | "type": "shell", 46 | "scripts": [ 47 | "scripts/update-apt.sh", 48 | "scripts/install-common-tools.sh", 49 | "scripts/install-chrony.sh", 50 | "scripts/configure-timezone.sh", 51 | "scripts/configure-locale.sh", 52 | "scripts/configure-hostname.sh", 53 | "scripts/install-collectd.sh", 54 | "scripts/install-docker.sh", 55 | "scripts/install-influxdb-docker.sh", 56 | "scripts/clean-apt.sh" 57 | ], 58 | "execute_command": "sudo -S sh -c '{{ .Vars }} {{ .Path }}'" 59 | }], 60 | "post-processors": [{ 61 | "type": "vagrant", 62 | "compression_level": 6, 63 | "output": "dist/{{user `name`}}-{{.Provider}}.box", 64 | "keep_input_artifact": true 65 | }, { 66 | "type": "checksum", 67 | "checksum_types": ["md5", "sha256"], 68 | "output": "dist/{{user `name`}}-{{.BuilderType}}.{{.ChecksumType}}" 69 | }, { 70 | "type": "manifest", 71 | "output": "dist/{{user `name`}}-manifest.json", 72 | "strip_path": false 73 | }] 74 | } 75 | -------------------------------------------------------------------------------- /ubuntu-amd64-docker.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "name": "ubuntu-amd64-docker", 4 | "version": "{{isotime \"2006-01-02-1504\"}}", 5 | "aws_region": "ap-northeast-2", 6 | "aws_access_key": "", 7 | "aws_secret_key": "", 8 | "aws_profile": "{{env `AWS_PROFILE`}}", 9 | "vpc_id": "", 10 | "subnet_id": "", 11 | "security_group_id": "" 12 | }, 13 | "builders": [{ 14 | "type": "amazon-ebs", 15 | "access_key": "{{user `aws_access_key`}}", 16 | "secret_key": "{{user `aws_secret_key`}}", 17 | "profile": "{{ user `aws_profile`}}", 18 | "region": "{{user `aws_region`}}", 19 | "instance_type": "t2.micro", 20 | "source_ami_filter": { 21 | "filters": { 22 | "virtualization-type": "hvm", 23 | "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*", 24 | "root-device-type": "ebs" 25 | }, 26 | "owners": ["099720109477"], 27 | "most_recent": true 28 | }, 29 | "vpc_id": "{{user `vpc_id`}}", 30 | "subnet_id": "{{user `subnet_id`}}", 31 | "security_group_id": "{{user `security_group_id`}}", 32 | "ssh_username": "ubuntu", 33 | "ami_name": "{{user `name`}}-{{user `version`}}", 34 | "ami_description": "AMI for {{user `name`}}", 35 | "tags": { 36 | "Name": "{{user `name`}}", 37 | "Version": "{{user `version`}}" 38 | } 39 | }], 40 | "provisioners": [{ 41 | "type": "file", 42 | "source": "files", 43 | "destination": "/tmp" 44 | }, { 45 | "type": "shell", 46 | "scripts": [ 47 | "scripts/update-apt.sh", 48 | "scripts/install-common-tools.sh", 49 | "scripts/install-chrony.sh", 50 | "scripts/configure-timezone.sh", 51 | "scripts/configure-locale.sh", 52 | "scripts/configure-hostname.sh", 53 | "scripts/install-collectd.sh", 54 | "scripts/install-docker.sh", 55 | "scripts/install-aws-ecr-credential-helper.sh", 56 | "scripts/clean-apt.sh" 57 | ], 58 | "execute_command": "sudo -S sh -c '{{ .Vars }} {{ .Path }}'" 59 | }], 60 | "post-processors": [{ 61 | "type": "vagrant", 62 | "compression_level": 6, 63 | "output": "dist/{{user `name`}}-{{.Provider}}.box", 64 | "keep_input_artifact": true 65 | }, { 66 | "type": "checksum", 67 | "checksum_types": ["md5", "sha256"], 68 | "output": "dist/{{user `name`}}-{{.BuilderType}}.{{.ChecksumType}}" 69 | }, { 70 | "type": "manifest", 71 | "output": "dist/{{user `name`}}-manifest.json", 72 | "strip_path": false 73 | }] 74 | } 75 | -------------------------------------------------------------------------------- /ubuntu-amd64-portainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "name": "ubuntu-amd64-portainer", 4 | "version": "{{isotime \"2006-01-02-1504\"}}", 5 | "aws_region": "ap-northeast-2", 6 | "aws_access_key": "", 7 | "aws_secret_key": "", 8 | "aws_profile": "{{env `AWS_PROFILE`}}", 9 | "vpc_id": "", 10 | "subnet_id": "", 11 | "security_group_id": "" 12 | }, 13 | "builders": [{ 14 | "type": "amazon-ebs", 15 | "access_key": "{{user `aws_access_key`}}", 16 | "secret_key": "{{user `aws_secret_key`}}", 17 | "profile": "{{ user `aws_profile`}}", 18 | "region": "{{user `aws_region`}}", 19 | "instance_type": "t2.micro", 20 | "source_ami_filter": { 21 | "filters": { 22 | "virtualization-type": "hvm", 23 | "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*", 24 | "root-device-type": "ebs" 25 | }, 26 | "owners": ["099720109477"], 27 | "most_recent": true 28 | }, 29 | "vpc_id": "{{user `vpc_id`}}", 30 | "subnet_id": "{{user `subnet_id`}}", 31 | "security_group_id": "{{user `security_group_id`}}", 32 | "ssh_username": "ubuntu", 33 | "ami_name": "{{user `name`}}-{{user `version`}}", 34 | "ami_description": "AMI for {{user `name`}}", 35 | "tags": { 36 | "Name": "{{user `name`}}", 37 | "Version": "{{user `version`}}" 38 | } 39 | }], 40 | "provisioners": [{ 41 | "type": "file", 42 | "source": "files", 43 | "destination": "/tmp" 44 | }, { 45 | "type": "shell", 46 | "scripts": [ 47 | "scripts/update-apt.sh", 48 | "scripts/install-common-tools.sh", 49 | "scripts/install-chrony.sh", 50 | "scripts/configure-timezone.sh", 51 | "scripts/configure-locale.sh", 52 | "scripts/configure-hostname.sh", 53 | "scripts/install-collectd.sh", 54 | "scripts/install-docker.sh", 55 | "scripts/install-aws-ecr-credential-helper.sh", 56 | "scripts/install-portainer-docker.sh", 57 | "scripts/clean-apt.sh" 58 | ], 59 | "execute_command": "sudo -S sh -c '{{ .Vars }} {{ .Path }}'" 60 | }], 61 | "post-processors": [{ 62 | "type": "vagrant", 63 | "compression_level": 6, 64 | "output": "dist/{{user `name`}}-{{.Provider}}.box", 65 | "keep_input_artifact": true 66 | }, { 67 | "type": "checksum", 68 | "checksum_types": ["md5", "sha256"], 69 | "output": "dist/{{user `name`}}-{{.BuilderType}}.{{.ChecksumType}}" 70 | }, { 71 | "type": "manifest", 72 | "output": "dist/{{user `name`}}-manifest.json", 73 | "strip_path": false 74 | }] 75 | } 76 | -------------------------------------------------------------------------------- /files/logstash/conf.d/aws-lambda.conf: -------------------------------------------------------------------------------- 1 | # Pipeline for AWS Lambda 2 | 3 | input { 4 | cloudwatch_logs { 5 | log_group => "/aws/lambda/" 6 | log_group_prefix => true 7 | start_position => "end" 8 | interval => 10 9 | region => "ap-northeast-2" 10 | # access_key_id => "" 11 | # secret_access_key => "" 12 | type => "aws-lambda" 13 | } 14 | } 15 | 16 | 17 | filter { 18 | grok { 19 | match => { 20 | "[cloudwatch_logs][log_group]" => [ 21 | "^/aws/lambda/%{GREEDYDATA:name}$", 22 | "^/aws/lambda/.*-%{GREEDYDATA:[lambda][stage]}$" 23 | ] 24 | } 25 | break_on_match => false 26 | tag_on_failure => [] 27 | } 28 | 29 | if [lambda][stage] =~ /prod/ { 30 | mutate { 31 | add_field => { "env" => "production" } 32 | } 33 | } else if [lambda][stage] =~ /dev/ { 34 | mutate { 35 | add_field => { "env" => "development" } 36 | } 37 | } else { 38 | drop {} 39 | } 40 | 41 | grok { 42 | match => { 43 | "message" => [ 44 | "^%{WORD:event} RequestId: %{UUID:reqId}\s*%{GREEDYDATA:message}\s*\n$", 45 | "^%{TIMESTAMP_ISO8601}\t%{UUID:reqId}\t%{GREEDYDATA:message}$" 46 | ] 47 | } 48 | overwrite => [ "message" ] 49 | keep_empty_captures => true 50 | tag_on_failure => [] 51 | } 52 | 53 | if [event] == "REPORT" { 54 | grok { 55 | match => { 56 | "message" => "^Duration: %{BASE16FLOAT:[lambda][duration]} ms\tBilled Duration: %{BASE16FLOAT:[lambda][billed_duration]} ms \tMemory Size: %{BASE10NUM:[lambda][memory_size]} MB\tMax Memory Used: %{BASE10NUM:[lambda][memory_used]} MB" 57 | } 58 | tag_on_failure => [] 59 | } 60 | 61 | mutate { 62 | convert => { 63 | "[lambda][duration]" => "float" 64 | "[lambda][billed_duration]" => "integer" 65 | "[lambda][memory_size]" => "integer" 66 | "[lambda][memory_used]" => "integer" 67 | } 68 | } 69 | } else if [event] == "START" { 70 | grok { 71 | match => { 72 | "message" => "^Version: %{GREEDYDATA:[lambda][version]}" 73 | } 74 | tag_on_failure => [] 75 | } 76 | } else if ![event] { 77 | json { 78 | source => "message" 79 | skip_on_invalid_json => true 80 | remove_field => [ "message", "time", "v", "pid" ] 81 | } 82 | } 83 | 84 | mutate { 85 | lowercase => [ "event" ] 86 | rename => { "message" => "msg" } 87 | } 88 | } 89 | 90 | 91 | output { 92 | elasticsearch { 93 | hosts => [ "elasticsearch:9200" ] 94 | index => "log-%{env}-lambda-%{+YYYY.MM.dd}" 95 | } 96 | 97 | # For Debugging 98 | # stdout { 99 | # codec => rubydebug 100 | # } 101 | } 102 | -------------------------------------------------------------------------------- /files/influxdb/influxdb.conf: -------------------------------------------------------------------------------- 1 | reporting-disabled = false 2 | bind-address = "127.0.0.1:8088" 3 | 4 | [meta] 5 | dir = "/var/lib/influxdb/meta" 6 | retention-autocreate = true 7 | logging-enabled = true 8 | 9 | [data] 10 | dir = "/var/lib/influxdb/data" 11 | index-version = "inmem" 12 | wal-dir = "/var/lib/influxdb/wal" 13 | wal-fsync-delay = "0s" 14 | query-log-enabled = true 15 | cache-max-memory-size = 1073741824 16 | cache-snapshot-memory-size = 26214400 17 | cache-snapshot-write-cold-duration = "10m0s" 18 | compact-full-write-cold-duration = "4h0m0s" 19 | max-series-per-database = 1000000 20 | max-values-per-tag = 100000 21 | max-concurrent-compactions = 0 22 | trace-logging-enabled = false 23 | 24 | [coordinator] 25 | write-timeout = "10s" 26 | max-concurrent-queries = 0 27 | query-timeout = "0s" 28 | log-queries-after = "0s" 29 | max-select-point = 0 30 | max-select-series = 0 31 | max-select-buckets = 0 32 | 33 | [retention] 34 | enabled = true 35 | check-interval = "30m0s" 36 | 37 | [shard-precreation] 38 | enabled = true 39 | check-interval = "10m0s" 40 | advance-period = "30m0s" 41 | 42 | [monitor] 43 | store-enabled = true 44 | store-database = "_internal" 45 | store-interval = "10s" 46 | 47 | [subscriber] 48 | enabled = true 49 | http-timeout = "30s" 50 | insecure-skip-verify = false 51 | ca-certs = "" 52 | write-concurrency = 40 53 | write-buffer-size = 1000 54 | 55 | [http] 56 | enabled = true 57 | bind-address = ":8086" 58 | auth-enabled = false 59 | log-enabled = true 60 | write-tracing = false 61 | pprof-enabled = true 62 | https-enabled = false 63 | https-certificate = "/etc/ssl/influxdb.pem" 64 | https-private-key = "" 65 | max-row-limit = 0 66 | max-connection-limit = 0 67 | shared-secret = "" 68 | realm = "InfluxDB" 69 | unix-socket-enabled = false 70 | bind-socket = "/var/run/influxdb.sock" 71 | 72 | [[graphite]] 73 | enabled = false 74 | bind-address = ":2003" 75 | database = "graphite" 76 | retention-policy = "" 77 | protocol = "tcp" 78 | batch-size = 5000 79 | batch-pending = 10 80 | batch-timeout = "1s" 81 | consistency-level = "one" 82 | separator = "." 83 | udp-read-buffer = 0 84 | 85 | [[collectd]] 86 | enabled = true 87 | bind-address = ":25826" 88 | database = "collectd" 89 | retention-policy = "" 90 | batch-size = 5000 91 | batch-pending = 10 92 | batch-timeout = "10s" 93 | read-buffer = 0 94 | typesdb = "/usr/share/collectd/types.db" 95 | security-level = "none" 96 | auth-file = "/etc/collectd/auth_file" 97 | 98 | [[opentsdb]] 99 | enabled = false 100 | bind-address = ":4242" 101 | database = "opentsdb" 102 | retention-policy = "" 103 | consistency-level = "one" 104 | tls-enabled = false 105 | certificate = "/etc/ssl/influxdb.pem" 106 | batch-size = 1000 107 | batch-pending = 5 108 | batch-timeout = "1s" 109 | log-point-errors = true 110 | 111 | [[udp]] 112 | enabled = false 113 | bind-address = ":8089" 114 | database = "udp" 115 | retention-policy = "" 116 | batch-size = 5000 117 | batch-pending = 10 118 | read-buffer = 0 119 | batch-timeout = "1s" 120 | precision = "" 121 | 122 | [continuous_queries] 123 | log-enabled = true 124 | enabled = true 125 | run-interval = "1s" 126 | 127 | -------------------------------------------------------------------------------- /files/logstash/logstash.yml: -------------------------------------------------------------------------------- 1 | # ------------ Node identity ------------ 2 | # Use a descriptive name for the node: 3 | # If omitted the node name will default to the machine's host name 4 | # node.name: test 5 | # 6 | # 7 | # ------------ Data path ------------------ 8 | # Which directory should be used by logstash and its plugins 9 | # for any persistent needs. Defaults to LOGSTASH_HOME/data 10 | # path.data: /var/lib/logstash 11 | # 12 | # 13 | # ------------ Pipeline Settings -------------- 14 | # Set the number of workers that will, in parallel, execute the filters+outputs 15 | # stage of the pipeline. 16 | # This defaults to the number of the host's CPU cores. 17 | # pipeline.workers: 2 18 | # 19 | # How many workers should be used per output plugin instance 20 | # pipeline.output.workers: 1 21 | # 22 | # How many events to retrieve from inputs before sending to filters+workers 23 | # pipeline.batch.size: 125 24 | # 25 | # How long to wait before dispatching an undersized batch to filters+workers 26 | # Value is in milliseconds. 27 | # pipeline.batch.delay: 5 28 | # 29 | # Force Logstash to exit during shutdown even if there are still inflight 30 | # events in memory. By default, logstash will refuse to quit until all 31 | # received events have been pushed to the outputs. 32 | # WARNING: enabling this can lead to data loss during shutdown 33 | # pipeline.unsafe_shutdown: false 34 | # 35 | # 36 | # ------------ Pipeline Configuration Settings -------------- 37 | # Where to fetch the pipeline configuration for the main pipeline 38 | # path.config: /etc/logstash/conf.d/*.conf 39 | # 40 | # Pipeline configuration string for the main pipeline 41 | # config.string: 42 | # 43 | # At startup, test if the configuration is valid and exit (dry run) 44 | config.test_and_exit: false 45 | # 46 | # Periodically check if the configuration has changed and reload the pipeline 47 | # This can also be triggered manually through the SIGHUP signal 48 | config.reload.automatic: true 49 | # 50 | # How often to check if the pipeline configuration has changed (in seconds) 51 | config.reload.interval: 5s 52 | # 53 | # Show fully compiled configuration as debug log message 54 | # NOTE: --log.level must be 'debug' 55 | # config.debug: false 56 | # 57 | # When enabled, process escaped characters such as \n and \" in strings in the 58 | # pipeline configuration files. 59 | # config.support_escapes: false 60 | # 61 | # 62 | # ------------ Module Settings --------------- 63 | # Define modules here. Modules definitions must be defined as an array. 64 | # The simple way to see this is to prepend each `name` with a `-`, and keep 65 | # all associated variables under the `name` they are associated with, and 66 | # above the next, like this: 67 | # 68 | # modules: 69 | # - name: MODULE_NAME 70 | # var.PLUGINTYPE1.PLUGINNAME1.KEY1: VALUE 71 | # var.PLUGINTYPE1.PLUGINNAME1.KEY2: VALUE 72 | # var.PLUGINTYPE2.PLUGINNAME1.KEY1: VALUE 73 | # var.PLUGINTYPE3.PLUGINNAME3.KEY1: VALUE 74 | # 75 | # Module variable names must be in the format of 76 | # 77 | # var.PLUGIN_TYPE.PLUGIN_NAME.KEY 78 | # 79 | # modules: 80 | # 81 | # 82 | # ------------ Cloud Settings --------------- 83 | # Define Elastic Cloud settings here. 84 | # This will overwrite 'var.elasticsearch.hosts' and 'var.kibana.host' 85 | # cloud.id: 86 | # 87 | # Format of cloud.auth is: : 88 | # This is optional 89 | # If supplied this will overwrite 'var.elasticsearch.username' and 'var.elasticsearch.password' 90 | # If supplied this will overwrite 'var.kibana.username' and 'var.kibana.password' 91 | # cloud.auth: elastic: 92 | # 93 | # 94 | # ------------ Queuing Settings -------------- 95 | # Internal queuing model, "memory" for legacy in-memory based queuing and 96 | # "persisted" for disk-based acked queueing. Defaults is memory 97 | queue.type: memory 98 | # 99 | # If using queue.type: persisted, the directory path where the data files will be stored. 100 | # Default is path.data/queue 101 | # path.queue: 102 | # 103 | # If using queue.type: persisted, the page data files size. The queue data consists of 104 | # append-only data files separated into pages. Default is 250mb 105 | # queue.page_capacity: 250mb 106 | # 107 | # If using queue.type: persisted, the maximum number of unread events in the queue. 108 | # Default is 0 (unlimited) 109 | # queue.max_events: 0 110 | # 111 | # If using queue.type: persisted, the total capacity of the queue in number of bytes. 112 | # If you would like more unacked events to be buffered in Logstash, you can increase the 113 | # capacity using this setting. Please make sure your disk drive has capacity greater than 114 | # the size specified here. If both max_bytes and max_events are specified, Logstash will pick 115 | # whichever criteria is reached first 116 | # Default is 1024mb or 1gb 117 | # queue.max_bytes: 1024mb 118 | # 119 | # If using queue.type: persisted, the maximum number of acked events before forcing a checkpoint 120 | # Default is 1024, 0 for unlimited 121 | # queue.checkpoint.acks: 1024 122 | # 123 | # If using queue.type: persisted, the maximum number of written events before forcing a checkpoint 124 | # Default is 1024, 0 for unlimited 125 | # queue.checkpoint.writes: 1024 126 | # 127 | # If using queue.type: persisted, the interval in milliseconds when a checkpoint is forced on the head page 128 | # Default is 1000, 0 for no periodic checkpoint. 129 | # queue.checkpoint.interval: 1000 130 | # 131 | # 132 | # ------------ Dead-Letter Queue Settings -------------- 133 | # Flag to turn on dead-letter queue. 134 | dead_letter_queue.enable: false 135 | 136 | # If using dead_letter_queue.enable: true, the maximum size of each dead letter queue. Entries 137 | # will be dropped if they would increase the size of the dead letter queue beyond this setting. 138 | # Default is 1024mb 139 | # dead_letter_queue.max_bytes: 1024mb 140 | 141 | # If using dead_letter_queue.enable: true, the directory path where the data files will be stored. 142 | # Default is path.data/dead_letter_queue 143 | # path.dead_letter_queue: 144 | # 145 | # 146 | # ------------ Metrics Settings -------------- 147 | # Bind address for the metrics REST endpoint 148 | # http.host: "127.0.0.1" 149 | # 150 | # Bind port for the metrics REST endpoint, this option also accept a range 151 | # (9600-9700) and logstash will pick up the first available ports. 152 | # http.port: 9600-9700 153 | # 154 | # 155 | # ------------ Debugging Settings -------------- 156 | # Options for log.level: 157 | # * fatal 158 | # * error 159 | # * warn 160 | # * info (default) 161 | # * debug 162 | # * trace 163 | # log.level: info 164 | path.logs: /var/log/logstash 165 | # 166 | # 167 | # ------------ Other Settings -------------- 168 | # Where to find custom plugins 169 | # path.plugins: [] 170 | 171 | ## X-Pack: Management 172 | xpack.management.enabled: false 173 | 174 | ## X-Pack: Monitoring 175 | xpack.monitoring.enabled: true 176 | xpack.monitoring.elasticsearch.url: ["http://elasticsearch.shared:9200/"] 177 | xpack.monitoring.elasticsearch.username: 178 | xpack.monitoring.elasticsearch.password: 179 | xpack.monitoring.elasticsearch.sniffing: true 180 | xpack.monitoring.collection.interval: 10s 181 | -------------------------------------------------------------------------------- /files/influxdb/types.db: -------------------------------------------------------------------------------- 1 | absolute value:ABSOLUTE:0:U 2 | apache_bytes value:DERIVE:0:U 3 | apache_connections value:GAUGE:0:65535 4 | apache_idle_workers value:GAUGE:0:65535 5 | apache_requests value:DERIVE:0:U 6 | apache_scoreboard value:GAUGE:0:65535 7 | ath_nodes value:GAUGE:0:65535 8 | ath_stat value:DERIVE:0:U 9 | backends value:GAUGE:0:65535 10 | bitrate value:GAUGE:0:4294967295 11 | blocked_clients value:GAUGE:0:U 12 | bucket value:GAUGE:0:U 13 | bytes value:GAUGE:0:U 14 | cache_eviction value:DERIVE:0:U 15 | cache_operation value:DERIVE:0:U 16 | cache_ratio value:GAUGE:0:100 17 | cache_result value:DERIVE:0:U 18 | cache_size value:GAUGE:0:1125899906842623 19 | capacity value:GAUGE:0:U 20 | ceph_bytes value:GAUGE:U:U 21 | ceph_latency value:GAUGE:U:U 22 | ceph_rate value:DERIVE:0:U 23 | changes_since_last_save value:GAUGE:0:U 24 | charge value:GAUGE:0:U 25 | clock_last_meas value:GAUGE:0:U 26 | clock_last_update value:GAUGE:U:U 27 | clock_mode value:GAUGE:0:U 28 | clock_reachability value:GAUGE:0:U 29 | clock_skew_ppm value:GAUGE:-2:2 30 | clock_state value:GAUGE:0:U 31 | clock_stratum value:GAUGE:0:U 32 | compression uncompressed:DERIVE:0:U, compressed:DERIVE:0:U 33 | compression_ratio value:GAUGE:0:2 34 | connections value:DERIVE:0:U 35 | conntrack value:GAUGE:0:4294967295 36 | contextswitch value:DERIVE:0:U 37 | count value:GAUGE:0:U 38 | counter value:COUNTER:U:U 39 | cpu value:DERIVE:0:U 40 | cpufreq value:GAUGE:0:U 41 | current value:GAUGE:U:U 42 | current_connections value:GAUGE:0:U 43 | current_sessions value:GAUGE:0:U 44 | delay value:GAUGE:-1000000:1000000 45 | derive value:DERIVE:0:U 46 | df used:GAUGE:0:1125899906842623, free:GAUGE:0:1125899906842623 47 | df_complex value:GAUGE:0:U 48 | df_inodes value:GAUGE:0:U 49 | dilution_of_precision value:GAUGE:0:U 50 | disk_io_time io_time:DERIVE:0:U, weighted_io_time:DERIVE:0:U 51 | disk_latency read:GAUGE:0:U, write:GAUGE:0:U 52 | disk_merged read:DERIVE:0:U, write:DERIVE:0:U 53 | disk_octets read:DERIVE:0:U, write:DERIVE:0:U 54 | disk_ops read:DERIVE:0:U, write:DERIVE:0:U 55 | disk_ops_complex value:DERIVE:0:U 56 | disk_time read:DERIVE:0:U, write:DERIVE:0:U 57 | dns_answer value:DERIVE:0:U 58 | dns_notify value:DERIVE:0:U 59 | dns_octets queries:DERIVE:0:U, responses:DERIVE:0:U 60 | dns_opcode value:DERIVE:0:U 61 | dns_qtype value:DERIVE:0:U 62 | dns_qtype_cached value:GAUGE:0:4294967295 63 | dns_query value:DERIVE:0:U 64 | dns_question value:DERIVE:0:U 65 | dns_rcode value:DERIVE:0:U 66 | dns_reject value:DERIVE:0:U 67 | dns_request value:DERIVE:0:U 68 | dns_resolver value:DERIVE:0:U 69 | dns_response value:DERIVE:0:U 70 | dns_transfer value:DERIVE:0:U 71 | dns_update value:DERIVE:0:U 72 | dns_zops value:DERIVE:0:U 73 | drbd_resource value:DERIVE:0:U 74 | duration seconds:GAUGE:0:U 75 | email_check value:GAUGE:0:U 76 | email_count value:GAUGE:0:U 77 | email_size value:GAUGE:0:U 78 | energy value:GAUGE:U:U 79 | energy_wh value:GAUGE:U:U 80 | entropy value:GAUGE:0:4294967295 81 | errors value:DERIVE:0:U 82 | evicted_keys value:DERIVE:0:U 83 | expired_keys value:DERIVE:0:U 84 | fanspeed value:GAUGE:0:U 85 | file_handles value:GAUGE:0:U 86 | file_size value:GAUGE:0:U 87 | files value:GAUGE:0:U 88 | filter_result value:DERIVE:0:U 89 | flow value:GAUGE:0:U 90 | fork_rate value:DERIVE:0:U 91 | frequency value:GAUGE:0:U 92 | frequency_error value:GAUGE:-2:2 93 | frequency_offset value:GAUGE:-1000000:1000000 94 | fscache_stat value:DERIVE:0:U 95 | gauge value:GAUGE:U:U 96 | hash_collisions value:DERIVE:0:U 97 | http_request_methods value:DERIVE:0:U 98 | http_requests value:DERIVE:0:U 99 | http_response_codes value:DERIVE:0:U 100 | humidity value:GAUGE:0:100 101 | if_collisions value:DERIVE:0:U 102 | if_dropped rx:DERIVE:0:U, tx:DERIVE:0:U 103 | if_errors rx:DERIVE:0:U, tx:DERIVE:0:U 104 | if_multicast value:DERIVE:0:U 105 | if_octets rx:DERIVE:0:U, tx:DERIVE:0:U 106 | if_packets rx:DERIVE:0:U, tx:DERIVE:0:U 107 | if_rx_dropped value:DERIVE:0:U 108 | if_rx_errors value:DERIVE:0:U 109 | if_rx_octets value:DERIVE:0:U 110 | if_rx_packets value:DERIVE:0:U 111 | if_tx_dropped value:DERIVE:0:U 112 | if_tx_errors value:DERIVE:0:U 113 | if_tx_octets value:DERIVE:0:U 114 | if_tx_packets value:DERIVE:0:U 115 | invocations value:DERIVE:0:U 116 | io_octets rx:DERIVE:0:U, tx:DERIVE:0:U 117 | io_packets rx:DERIVE:0:U, tx:DERIVE:0:U 118 | ipc value:GAUGE:0:U 119 | ipt_bytes value:DERIVE:0:U 120 | ipt_packets value:DERIVE:0:U 121 | irq value:DERIVE:0:U 122 | latency value:GAUGE:0:U 123 | links value:GAUGE:0:U 124 | load shortterm:GAUGE:0:5000, midterm:GAUGE:0:5000, longterm:GAUGE:0:5000 125 | memory_bandwidth value:DERIVE:0:U 126 | md_disks value:GAUGE:0:U 127 | memcached_command value:DERIVE:0:U 128 | memcached_connections value:GAUGE:0:U 129 | memcached_items value:GAUGE:0:U 130 | memcached_octets rx:DERIVE:0:U, tx:DERIVE:0:U 131 | memcached_ops value:DERIVE:0:U 132 | memory value:GAUGE:0:281474976710656 133 | memory_lua value:GAUGE:0:281474976710656 134 | memory_throttle_count value:DERIVE:0:U 135 | multimeter value:GAUGE:U:U 136 | mutex_operations value:DERIVE:0:U 137 | mysql_bpool_bytes value:GAUGE:0:U 138 | mysql_bpool_counters value:DERIVE:0:U 139 | mysql_bpool_pages value:GAUGE:0:U 140 | mysql_commands value:DERIVE:0:U 141 | mysql_handler value:DERIVE:0:U 142 | mysql_innodb_data value:DERIVE:0:U 143 | mysql_innodb_dblwr value:DERIVE:0:U 144 | mysql_innodb_log value:DERIVE:0:U 145 | mysql_innodb_pages value:DERIVE:0:U 146 | mysql_innodb_row_lock value:DERIVE:0:U 147 | mysql_innodb_rows value:DERIVE:0:U 148 | mysql_locks value:DERIVE:0:U 149 | mysql_log_position value:DERIVE:0:U 150 | mysql_octets rx:DERIVE:0:U, tx:DERIVE:0:U 151 | mysql_select value:DERIVE:0:U 152 | mysql_sort value:DERIVE:0:U 153 | mysql_sort_merge_passes value:DERIVE:0:U 154 | mysql_sort_rows value:DERIVE:0:U 155 | mysql_slow_queries value:DERIVE:0:U 156 | nfs_procedure value:DERIVE:0:U 157 | nginx_connections value:GAUGE:0:U 158 | nginx_requests value:DERIVE:0:U 159 | node_octets rx:DERIVE:0:U, tx:DERIVE:0:U 160 | node_rssi value:GAUGE:0:255 161 | node_stat value:DERIVE:0:U 162 | node_tx_rate value:GAUGE:0:127 163 | objects value:GAUGE:0:U 164 | operations value:DERIVE:0:U 165 | operations_per_second value:GAUGE:0:U 166 | packets value:DERIVE:0:U 167 | pending_operations value:GAUGE:0:U 168 | percent value:GAUGE:0:100.1 169 | percent_bytes value:GAUGE:0:100.1 170 | percent_inodes value:GAUGE:0:100.1 171 | pf_counters value:DERIVE:0:U 172 | pf_limits value:DERIVE:0:U 173 | pf_source value:DERIVE:0:U 174 | pf_state value:DERIVE:0:U 175 | pf_states value:GAUGE:0:U 176 | pg_blks value:DERIVE:0:U 177 | pg_db_size value:GAUGE:0:U 178 | pg_n_tup_c value:DERIVE:0:U 179 | pg_n_tup_g value:GAUGE:0:U 180 | pg_numbackends value:GAUGE:0:U 181 | pg_scan value:DERIVE:0:U 182 | pg_xact value:DERIVE:0:U 183 | ping value:GAUGE:0:65535 184 | ping_droprate value:GAUGE:0:100 185 | ping_stddev value:GAUGE:0:65535 186 | players value:GAUGE:0:1000000 187 | power value:GAUGE:U:U 188 | pressure value:GAUGE:0:U 189 | protocol_counter value:DERIVE:0:U 190 | ps_code value:GAUGE:0:9223372036854775807 191 | ps_count processes:GAUGE:0:1000000, threads:GAUGE:0:1000000 192 | ps_cputime user:DERIVE:0:U, syst:DERIVE:0:U 193 | ps_data value:GAUGE:0:9223372036854775807 194 | ps_disk_octets read:DERIVE:0:U, write:DERIVE:0:U 195 | ps_disk_ops read:DERIVE:0:U, write:DERIVE:0:U 196 | ps_pagefaults minflt:DERIVE:0:U, majflt:DERIVE:0:U 197 | ps_rss value:GAUGE:0:9223372036854775807 198 | ps_stacksize value:GAUGE:0:9223372036854775807 199 | ps_state value:GAUGE:0:65535 200 | ps_vm value:GAUGE:0:9223372036854775807 201 | pubsub value:GAUGE:0:U 202 | queue_length value:GAUGE:0:U 203 | records value:GAUGE:0:U 204 | requests value:GAUGE:0:U 205 | response_code value:GAUGE:0:U 206 | response_time value:GAUGE:0:U 207 | root_delay value:GAUGE:U:U 208 | root_dispersion value:GAUGE:U:U 209 | route_etx value:GAUGE:0:U 210 | route_metric value:GAUGE:0:U 211 | routes value:GAUGE:0:U 212 | satellites value:GAUGE:0:U 213 | segments value:GAUGE:0:65535 214 | serial_octets rx:DERIVE:0:U, tx:DERIVE:0:U 215 | signal_noise value:GAUGE:U:0 216 | signal_power value:GAUGE:U:0 217 | signal_quality value:GAUGE:0:U 218 | smart_attribute current:GAUGE:0:255, worst:GAUGE:0:255, threshold:GAUGE:0:255, pretty:GAUGE:0:U 219 | smart_badsectors value:GAUGE:0:U 220 | smart_powercycles value:GAUGE:0:U 221 | smart_poweron value:GAUGE:0:U 222 | smart_temperature value:GAUGE:-300:300 223 | snr value:GAUGE:0:U 224 | spam_check value:GAUGE:0:U 225 | spam_score value:GAUGE:U:U 226 | spl value:GAUGE:U:U 227 | swap value:GAUGE:0:1099511627776 228 | swap_io value:DERIVE:0:U 229 | tcp_connections value:GAUGE:0:4294967295 230 | temperature value:GAUGE:U:U 231 | threads value:GAUGE:0:U 232 | time_dispersion value:GAUGE:-1000000:1000000 233 | time_offset value:GAUGE:-1000000:1000000 234 | time_offset_ntp value:GAUGE:-1000000:1000000 235 | time_offset_rms value:GAUGE:-1000000:1000000 236 | time_ref value:GAUGE:0:U 237 | timeleft value:GAUGE:0:U 238 | total_bytes value:DERIVE:0:U 239 | total_connections value:DERIVE:0:U 240 | total_objects value:DERIVE:0:U 241 | total_operations value:DERIVE:0:U 242 | total_requests value:DERIVE:0:U 243 | total_sessions value:DERIVE:0:U 244 | total_threads value:DERIVE:0:U 245 | total_time_in_ms value:DERIVE:0:U 246 | total_values value:DERIVE:0:U 247 | uptime value:GAUGE:0:4294967295 248 | users value:GAUGE:0:65535 249 | vcl value:GAUGE:0:65535 250 | vcpu value:GAUGE:0:U 251 | virt_cpu_total value:DERIVE:0:U 252 | virt_vcpu value:DERIVE:0:U 253 | vmpage_action value:DERIVE:0:U 254 | vmpage_faults minflt:DERIVE:0:U, majflt:DERIVE:0:U 255 | vmpage_io in:DERIVE:0:U, out:DERIVE:0:U 256 | vmpage_number value:GAUGE:0:4294967295 257 | volatile_changes value:GAUGE:0:U 258 | voltage value:GAUGE:U:U 259 | voltage_threshold value:GAUGE:U:U, threshold:GAUGE:U:U 260 | vs_memory value:GAUGE:0:9223372036854775807 261 | vs_processes value:GAUGE:0:65535 262 | vs_threads value:GAUGE:0:65535 263 | 264 | # 265 | # Legacy types 266 | # (required for the v5 upgrade target) 267 | # 268 | arc_counts demand_data:COUNTER:0:U, demand_metadata:COUNTER:0:U, prefetch_data:COUNTER:0:U, prefetch_metadata:COUNTER:0:U 269 | arc_l2_bytes read:COUNTER:0:U, write:COUNTER:0:U 270 | arc_l2_size value:GAUGE:0:U 271 | arc_ratio value:GAUGE:0:U 272 | arc_size current:GAUGE:0:U, target:GAUGE:0:U, minlimit:GAUGE:0:U, maxlimit:GAUGE:0:U 273 | mysql_qcache hits:COUNTER:0:U, inserts:COUNTER:0:U, not_cached:COUNTER:0:U, lowmem_prunes:COUNTER:0:U, queries_in_cache:GAUGE:0:U 274 | mysql_threads running:GAUGE:0:U, connected:GAUGE:0:U, cached:GAUGE:0:U, created:COUNTER:0:U 275 | -------------------------------------------------------------------------------- /files/grafana/grafana.ini: -------------------------------------------------------------------------------- 1 | ##################### Grafana Configuration Example ##################### 2 | # 3 | # Everything has defaults so you only need to uncomment things you want to 4 | # change 5 | 6 | # possible values : production, development 7 | app_mode = production 8 | 9 | # instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty 10 | instance_name = ${HOSTNAME} 11 | 12 | #################################### Paths #################################### 13 | [paths] 14 | # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used) 15 | # 16 | data = /var/lib/grafana 17 | # 18 | # Directory where grafana can store logs 19 | # 20 | logs = /var/log/grafana 21 | # 22 | # Directory where grafana will automatically scan and look for plugins 23 | # 24 | plugins = /var/lib/grafana/plugins 25 | 26 | # 27 | #################################### Server #################################### 28 | [server] 29 | # Protocol (http, https, socket) 30 | protocol = http 31 | 32 | # The ip address to bind to, empty will bind to all interfaces 33 | ;http_addr = 34 | 35 | # The http port to use 36 | http_port = 3000 37 | 38 | # The public facing domain name used to access grafana from a browser 39 | ;domain = localhost 40 | 41 | # Redirect to correct domain if host header does not match domain 42 | # Prevents DNS rebinding attacks 43 | ;enforce_domain = false 44 | 45 | # The full public facing url you use in browser, used for redirects and emails 46 | # If you use reverse proxy and sub path specify full url (with sub path) 47 | ;root_url = http://localhost:3000 48 | 49 | # Log web requests 50 | router_logging = false 51 | 52 | # the path relative working path 53 | ;static_root_path = public 54 | 55 | # enable gzip 56 | ;enable_gzip = false 57 | 58 | # https certs & key file 59 | ;cert_file = 60 | ;cert_key = 61 | 62 | # Unix socket path 63 | ;socket = 64 | 65 | #################################### Database #################################### 66 | [database] 67 | # You can configure the database connection by specifying type, host, name, user and password 68 | # as seperate properties or as on string using the url propertie. 69 | 70 | # Either "mysql", "postgres" or "sqlite3", it's your choice 71 | type = sqlite3 72 | ;host = 127.0.0.1:3306 73 | name = grafana 74 | user = root 75 | # If the password contains # or ; you have to wrap it with trippel quotes. Ex """#password;""" 76 | password = 77 | 78 | # Use either URL or the previous fields to configure the database 79 | # Example: mysql://user:secret@host:port/database 80 | ;url = 81 | 82 | # For "postgres" only, either "disable", "require" or "verify-full" 83 | ;ssl_mode = disable 84 | 85 | # For "sqlite3" only, path relative to data_path setting 86 | ;path = grafana.db 87 | 88 | # Max idle conn setting default is 2 89 | ;max_idle_conn = 2 90 | 91 | # Max conn setting default is 0 (mean not set) 92 | ;max_open_conn = 93 | 94 | 95 | #################################### Session #################################### 96 | [session] 97 | # Either "memory", "file", "redis", "mysql", "postgres", default is "file" 98 | ;provider = file 99 | 100 | # Provider config options 101 | # memory: not have any config yet 102 | # file: session dir path, is relative to grafana data_path 103 | # redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana` 104 | # mysql: go-sql-driver/mysql dsn config string, e.g. `user:password@tcp(127.0.0.1:3306)/database_name` 105 | # postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable 106 | ;provider_config = sessions 107 | 108 | # Session cookie name 109 | ;cookie_name = grafana_sess 110 | 111 | # If you use session in https only, default is false 112 | ;cookie_secure = false 113 | 114 | # Session life time, default is 86400 115 | ;session_life_time = 86400 116 | 117 | #################################### Data proxy ########################### 118 | [dataproxy] 119 | 120 | # This enables data proxy logging, default is false 121 | logging = false 122 | 123 | 124 | #################################### Analytics #################################### 125 | [analytics] 126 | # Server reporting, sends usage counters to stats.grafana.org every 24 hours. 127 | # No ip addresses are being tracked, only simple counters to track 128 | # running instances, dashboard and error counts. It is very helpful to us. 129 | # Change this option to false to disable reporting. 130 | reporting_enabled = false 131 | 132 | # Set to false to disable all checks to https://grafana.net 133 | # for new vesions (grafana itself and plugins), check is used 134 | # in some UI views to notify that grafana or plugin update exists 135 | # This option does not cause any auto updates, nor send any information 136 | # only a GET request to http://grafana.com to get latest versions 137 | check_for_updates = true 138 | 139 | # Google Analytics universal tracking code, only enabled if you specify an id here 140 | ;google_analytics_ua_id = 141 | 142 | #################################### Security #################################### 143 | [security] 144 | # default admin user, created on startup 145 | admin_user = admin 146 | 147 | # default admin password, can be changed before first start of grafana, or in profile settings 148 | admin_password = admin 149 | 150 | # used for signing 151 | ;secret_key = SW2YcwTIb9zpOOhoPsMm 152 | 153 | # Auto-login remember days 154 | login_remember_days = 7 155 | ;cookie_username = grafana_user 156 | ;cookie_remember_name = grafana_remember 157 | 158 | # disable gravatar profile images 159 | disable_gravatar = false 160 | 161 | # data source proxy whitelist (ip_or_domain:port separated by spaces) 162 | ;data_source_proxy_whitelist = 163 | 164 | [snapshots] 165 | # snapshot sharing options 166 | ;external_enabled = true 167 | ;external_snapshot_url = https://snapshots-origin.raintank.io 168 | ;external_snapshot_name = Publish to snapshot.raintank.io 169 | 170 | # remove expired snapshot 171 | ;snapshot_remove_expired = true 172 | 173 | # remove snapshots after 90 days 174 | ;snapshot_TTL_days = 90 175 | 176 | #################################### Users #################################### 177 | [users] 178 | # disable user signup / registration 179 | allow_sign_up = false 180 | 181 | # Allow non admin users to create organizations 182 | allow_org_create = false 183 | 184 | # Set to true to automatically assign new users to the default organization (id 1) 185 | ;auto_assign_org = true 186 | 187 | # Default role new users will be automatically assigned (if disabled above is set to true) 188 | ;auto_assign_org_role = Viewer 189 | 190 | # Background text for the user field on the login page 191 | login_hint = email or username 192 | 193 | # Default UI theme ("dark" or "light") 194 | default_theme = dark 195 | 196 | # External user management, these options affect the organization users view 197 | ;external_manage_link_url = 198 | ;external_manage_link_name = 199 | ;external_manage_info = 200 | 201 | [auth] 202 | # Set to true to disable (hide) the login form, useful if you use OAuth, defaults to false 203 | disable_login_form = false 204 | 205 | # Set to true to disable the signout link in the side menu. useful if you use auth.proxy, defaults to false 206 | disable_signout_menu = false 207 | 208 | #################################### Anonymous Auth ########################## 209 | [auth.anonymous] 210 | # enable anonymous access 211 | enabled = false 212 | 213 | # specify organization name that should be used for unauthenticated users 214 | ;org_name = Main Org. 215 | 216 | # specify role for unauthenticated users 217 | ;org_role = Viewer 218 | 219 | #################################### Github Auth ########################## 220 | [auth.github] 221 | ;enabled = false 222 | ;allow_sign_up = true 223 | ;client_id = some_id 224 | ;client_secret = some_secret 225 | ;scopes = user:email,read:org 226 | ;auth_url = https://github.com/login/oauth/authorize 227 | ;token_url = https://github.com/login/oauth/access_token 228 | ;api_url = https://api.github.com/user 229 | ;team_ids = 230 | ;allowed_organizations = 231 | 232 | #################################### Google Auth ########################## 233 | [auth.google] 234 | ;enabled = false 235 | ;allow_sign_up = true 236 | ;client_id = some_client_id 237 | ;client_secret = some_client_secret 238 | ;scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email 239 | ;auth_url = https://accounts.google.com/o/oauth2/auth 240 | ;token_url = https://accounts.google.com/o/oauth2/token 241 | ;api_url = https://www.googleapis.com/oauth2/v1/userinfo 242 | ;allowed_domains = 243 | 244 | #################################### Generic OAuth ########################## 245 | [auth.generic_oauth] 246 | ;enabled = false 247 | ;name = OAuth 248 | ;allow_sign_up = true 249 | ;client_id = some_id 250 | ;client_secret = some_secret 251 | ;scopes = user:email,read:org 252 | ;auth_url = https://foo.bar/login/oauth/authorize 253 | ;token_url = https://foo.bar/login/oauth/access_token 254 | ;api_url = https://foo.bar/user 255 | ;team_ids = 256 | ;allowed_organizations = 257 | 258 | #################################### Grafana.com Auth #################### 259 | [auth.grafana_com] 260 | ;enabled = false 261 | ;allow_sign_up = true 262 | ;client_id = some_id 263 | ;client_secret = some_secret 264 | ;scopes = user:email 265 | ;allowed_organizations = 266 | 267 | #################################### Auth Proxy ########################## 268 | [auth.proxy] 269 | ;enabled = false 270 | ;header_name = X-WEBAUTH-USER 271 | ;header_property = username 272 | ;auto_sign_up = true 273 | ;ldap_sync_ttl = 60 274 | ;whitelist = 192.168.1.1, 192.168.2.1 275 | 276 | #################################### Basic Auth ########################## 277 | [auth.basic] 278 | ;enabled = true 279 | 280 | #################################### Auth LDAP ########################## 281 | [auth.ldap] 282 | ;enabled = false 283 | ;config_file = /etc/grafana/ldap.toml 284 | ;allow_sign_up = true 285 | 286 | #################################### SMTP / Emailing ########################## 287 | [smtp] 288 | ;enabled = false 289 | ;host = localhost:25 290 | ;user = 291 | # If the password contains # or ; you have to wrap it with trippel quotes. Ex """#password;""" 292 | ;password = 293 | ;cert_file = 294 | ;key_file = 295 | ;skip_verify = false 296 | ;from_address = admin@grafana.localhost 297 | ;from_name = Grafana 298 | 299 | [emails] 300 | ;welcome_email_on_sign_up = false 301 | 302 | #################################### Logging ########################## 303 | [log] 304 | # Either "console", "file", "syslog". Default is console and file 305 | # Use space to separate multiple modes, e.g. "console file" 306 | ;mode = console file 307 | 308 | # Either "debug", "info", "warn", "error", "critical", default is "info" 309 | ;level = info 310 | 311 | # optional settings to set different levels for specific loggers. Ex filters = sqlstore:debug 312 | ;filters = 313 | 314 | 315 | # For "console" mode only 316 | [log.console] 317 | ;level = 318 | 319 | # log line format, valid options are text, console and json 320 | ;format = console 321 | 322 | # For "file" mode only 323 | [log.file] 324 | ;level = 325 | 326 | # log line format, valid options are text, console and json 327 | ;format = text 328 | 329 | # This enables automated log rotate(switch of following options), default is true 330 | ;log_rotate = true 331 | 332 | # Max line number of single file, default is 1000000 333 | ;max_lines = 1000000 334 | 335 | # Max size shift of single file, default is 28 means 1 << 28, 256MB 336 | ;max_size_shift = 28 337 | 338 | # Segment log daily, default is true 339 | ;daily_rotate = true 340 | 341 | # Expired days of log file(delete after max days), default is 7 342 | ;max_days = 7 343 | 344 | [log.syslog] 345 | ;level = 346 | 347 | # log line format, valid options are text, console and json 348 | ;format = text 349 | 350 | # Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used. 351 | ;network = 352 | ;address = 353 | 354 | # Syslog facility. user, daemon and local0 through local7 are valid. 355 | ;facility = 356 | 357 | # Syslog tag. By default, the process' argv[0] is used. 358 | ;tag = 359 | 360 | 361 | #################################### AMQP Event Publisher ########################## 362 | [event_publisher] 363 | ;enabled = false 364 | ;rabbitmq_url = amqp://localhost/ 365 | ;exchange = grafana_events 366 | 367 | ;#################################### Dashboard JSON files ########################## 368 | [dashboards.json] 369 | ;enabled = false 370 | ;path = /var/lib/grafana/dashboards 371 | 372 | #################################### Alerting ############################ 373 | [alerting] 374 | # Disable alerting engine & UI features 375 | ;enabled = true 376 | # Makes it possible to turn off alert rule execution but alerting UI is visible 377 | ;execute_alerts = true 378 | 379 | #################################### Internal Grafana Metrics ########################## 380 | # Metrics available at HTTP API Url /api/metrics 381 | [metrics] 382 | # Disable / Enable internal metrics 383 | ;enabled = true 384 | 385 | # Publish interval 386 | ;interval_seconds = 10 387 | 388 | # Send internal metrics to Graphite 389 | [metrics.graphite] 390 | # Enable by setting the address setting (ex localhost:2003) 391 | ;address = 392 | ;prefix = prod.grafana.%(instance_name)s. 393 | 394 | #################################### Grafana.com integration ########################## 395 | # Url used to to import dashboards directly from Grafana.com 396 | [grafana_com] 397 | url = https://grafana.com 398 | 399 | #################################### External image storage ########################## 400 | [external_image_storage] 401 | # Used for uploading images to public servers so they can be included in slack/email messages. 402 | # you can choose between (s3, webdav) 403 | ;provider = 404 | 405 | [external_image_storage.s3] 406 | ;bucket_url = 407 | ;access_key = 408 | ;secret_key = 409 | 410 | [external_image_storage.webdav] 411 | ;url = 412 | ;public_url = 413 | ;username = 414 | ;password = 415 | -------------------------------------------------------------------------------- /files/elastic-apm-server/apm-server.yml: -------------------------------------------------------------------------------- 1 | ######################## APM Server Configuration ############################# 2 | 3 | #========================= APM Server global options ============================ 4 | apm-server: 5 | # Defines the host and port the server is listening on 6 | host: ${APM_SERVER_HOST:0.0.0.0:8200} 7 | 8 | # Maximum permitted size in bytes of an unzipped request accepted by the server to be processed. 9 | # max_unzipped_size: 31457280 10 | # Maximum permitted size in bytes of a request's header accepted by the server to be processed. 11 | # max_header_size: 1048576 12 | 13 | # Maximum duration request will be queued before being read. 14 | # max_request_queue_time: 2s 15 | # Maximum permitted duration for reading an entire request. 16 | # read_timeout: 30s 17 | # Maximum permitted duration for writing a response. 18 | # write_timeout: 30s 19 | 20 | # Maximum duration in seconds before releasing resources when shutting down the server. 21 | # shutdown_timeout: 5s 22 | 23 | # Maximum number of requests permitted to be sent to the server concurrently. 24 | # concurrent_requests: 5 25 | 26 | # Authorization token to be checked. If a token is set here the agents must 27 | # send their token in the following format: Authorization: Bearer . 28 | # It is recommended to use an authorization token in combination with SSL enabled. 29 | # secret_token: 30 | # ssl.enabled: false 31 | # ssl.certificate : "path/to/cert" 32 | # ssl.key : "path/to/private_key" 33 | 34 | # Please be aware that frontend support is an experimental feature at the moment! 35 | frontend: 36 | # To enable experimental frontend support set this to true. 37 | enabled: false 38 | 39 | # Rate limit per second and IP address for requests sent to the frontend endpoint. 40 | rate_limit: 10 41 | 42 | # Comma separated list of permitted origins for frontend. User-agents will send 43 | # a origin header that will be validated against this list. 44 | # An origin is made of a protocol scheme, host and port, without the url path. 45 | # Allowed origins in this setting can have * to match anything (eg.: http://*.example.com) 46 | # If an item in the list is a single '*', everything will be allowed 47 | # allow_origins : ['*'] 48 | 49 | # Regexp to be matched against a stacktrace frame's `file_name` and `abs_path` attributes. 50 | # If the regexp matches, the stacktrace frame is considered to be a library frame. 51 | # library_pattern: "node_modules|bower_components|~" 52 | 53 | # Regexp to be matched against a stacktrace frame's `file_name`. 54 | # If the regexp matches, the stacktrace frame is not used for calculating error groups. 55 | # The default pattern excludes stacktrace frames that have a filename starting with '/webpack' 56 | # exclude_from_grouping: "^/webpack" 57 | 58 | # If a source map has previously been uploaded, source mapping is automatically applied 59 | # to all error and transaction documents sent to the frontend endpoint. 60 | # source_mapping: 61 | 62 | # Source maps are are fetched from Elasticsearch and then kept in an in-memory cache for a certain time. 63 | # The `cache.expiration` determines how long a source map should be cached before fetching it again from Elasticsearch. 64 | # Note that values configured without a time unit will be interpreted as seconds. 65 | # cache: 66 | # expiration: 5m 67 | 68 | # Source maps are stored in a seperate index. 69 | # If the default index pattern for source maps at 'outputs.elasticsearch.indices' 70 | # is changed, a matching index pattern needs to be specified here. 71 | # index_pattern: "apm-*-sourcemap*" 72 | 73 | # If set to true, APM Server augments data received by the agent with the original IP of the backend server, 74 | # or the IP and User Agent of the real user (frontend requests). It defaults to true. 75 | # capture_personal_data: true 76 | 77 | # golang expvar support - https://golang.org/pkg/expvar/ 78 | # expvar: 79 | # Set to true to Expose expvar 80 | # enabled: false 81 | 82 | # Url to expose expvar 83 | # url: "/debug/vars" 84 | 85 | 86 | #================================ General ====================================== 87 | # Internal queue configuration for buffering events to be published. 88 | queue: 89 | # Queue type by name (default 'mem') 90 | # The memory queue will present all available events (up to the outputs 91 | # bulk_max_size) to the output, the moment the output is ready to server 92 | # another batch of events. 93 | mem: 94 | # Max number of events the queue can buffer. 95 | events: 4096 96 | 97 | # Hints the minimum number of events stored in the queue, 98 | # before providing a batch of events to the outputs. 99 | # A value of 0 (the default) ensures events are immediately available 100 | # to be sent to the outputs. 101 | flush.min_events: 0 102 | 103 | # Maximum duration after which events are available to the outputs, 104 | # if the number of events stored in the queue is < min_flush_events. 105 | flush.timeout: 0s 106 | 107 | # Sets the maximum number of CPUs that can be executing simultaneously. The 108 | # default is the number of logical CPUs available in the system. 109 | # max_procs: 110 | 111 | 112 | #================================= Paths ====================================== 113 | # The home path for the apm-server installation. This is the default base path 114 | # for all other path settings and for miscellaneous files that come with the 115 | # distribution (for example, the sample dashboards). 116 | # If not set by a CLI flag or in the configuration file, the default for the 117 | # home path is the location of the binary. 118 | # path.home: 119 | 120 | # The configuration path for the apm-server installation. This is the default 121 | # base path for configuration files, including the main YAML configuration file 122 | # and the Elasticsearch template file. If not set by a CLI flag or in the 123 | # configuration file, the default for the configuration path is the home path. 124 | # path.config: ${path.home} 125 | 126 | # The data path for the apm-server installation. This is the default base path 127 | # for all the files in which apm-server needs to store its data. If not set by a 128 | # CLI flag or in the configuration file, the default for the data path is a data 129 | # subdirectory inside the home path. 130 | # path.data: ${path.home}/data 131 | 132 | # The logs path for a apm-server installation. This is the default location for 133 | # the Beat's log files. If not set by a CLI flag or in the configuration file, 134 | # the default for the logs path is a logs subdirectory inside the home path. 135 | # path.logs: ${path.home}/logs 136 | 137 | 138 | #================================ Logging ====================================== 139 | # Sets log level. The default log level is info. 140 | # Available log levels are: error, warning, info, debug 141 | logging.level: ${APM_SERVER_LOGGING_LEVEL:warning} 142 | 143 | # Enable debug output for selected components. To enable all selectors use ["*"] 144 | # Other available selectors are "beat", "publish", "service" 145 | # Multiple selectors can be chained. 146 | logging.selectors: ["*"] 147 | 148 | # Send all logging output to syslog. The default is false. 149 | logging.to_syslog: false 150 | 151 | # If enabled, apm-server periodically logs its internal metrics that have changed 152 | # in the last period. For each metric that changed, the delta from the value at 153 | # the beginning of the period is logged. Also, the total values for 154 | # all non-zero internal metrics are logged on shutdown. The default is true. 155 | logging.metrics.enabled: ${APM_SERVER_LOGGING_METRICS_ENABLED:true} 156 | 157 | # The period after which to log the internal metrics. The default is 30s. 158 | logging.metrics.period: 30s 159 | 160 | # Set to true to log messages in json format. 161 | logging.json: ${APM_SERVER_LOGGING_JSON:false} 162 | 163 | # Logging to rotating files. Set logging.to_files to false to disable logging to files. 164 | logging.to_files: ${APM_SERVER_LOGGING_ENABLED:true} 165 | 166 | logging.files: 167 | # Configure the path where the logs are written. The default is the logs directory 168 | # under the home path (the binary location). 169 | path: /var/log/apm-server 170 | 171 | # The name of the files where the logs are written to. 172 | name: apm-server 173 | 174 | # Configure log file size limit. If limit is reached, log file will be automatically rotated (10MB = 10485760) 175 | rotateeverybytes: 10485760 176 | 177 | # Number of rotated log files to keep. Oldest files will be deleted first. 178 | keepfiles: 7 179 | 180 | # The permissions mask to apply when rotating log files. The default value is 0600. 181 | # Must be a valid Unix-style file permissions mask expressed in octal notation. 182 | permissions: 0600 183 | 184 | 185 | #============================== Dashboards ===================================== 186 | # These settings control loading the sample dashboards to the Kibana index. Loading 187 | # the dashboards are disabled by default and can be enabled either by setting the 188 | # options here, or by using the `-setup` CLI flag or the `setup` command. 189 | setup.dashboards.enabled: ${APM_SERVER_DASHBOARDS_ENABLED:true} 190 | 191 | # The directory from where to read the dashboards. The default is the `kibana` 192 | # folder in the home path. 193 | setup.dashboards.directory: ${path.home}/kibana 194 | 195 | # The URL from where to download the dashboards archive. It is used instead of 196 | # the directory if it has a value. 197 | # setup.dashboards.url: 198 | 199 | # The file archive (zip file) from where to read the dashboards. It is used instead 200 | # of the directory when it has a value. 201 | # setup.dashboards.file: 202 | 203 | # The name of the Kibana index to use for setting the configuration. Default is ".kibana" 204 | setup.dashboards.kibana_index: .kibana 205 | 206 | # The Elasticsearch index name. This overwrites the index name defined in the 207 | # dashboards and index pattern. Example: testbeat-* 208 | # The dashboards.index needs to be changed in case the elasticsearch index pattern is modified. 209 | # setup.dashboards.index: 210 | 211 | # Always use the Kibana API for loading the dashboards instead of autodetecting 212 | # how to install the dashboards by first querying Elasticsearch. 213 | setup.dashboards.always_kibana: false 214 | 215 | 216 | #============================== Template ===================================== 217 | # Set to false to disable template loading. 218 | setup.template.enabled: ${APM_SERVER_TEMPLATE_ENABLED:true} 219 | 220 | # Template name. By default the template name is "apm-%{[beat.version]}" 221 | # The template name and pattern has to be set in case the elasticsearch index pattern is modified. 222 | setup.template.name: "apm-%{[beat.version]}" 223 | 224 | # Template pattern. By default the template pattern is "apm-%{[beat.version]}-*" to apply to the default index settings. 225 | # The first part is the version of the beat and then -* is used to match all daily indices. 226 | # The template name and pattern has to be set in case the elasticsearch index pattern is modified. 227 | setup.template.pattern: "apm-%{[beat.version]}-*" 228 | 229 | # Path to fields.yml file to generate the template 230 | setup.template.fields: "${path.config}/fields.yml" 231 | 232 | # Overwrite existing template 233 | setup.template.overwrite: false 234 | 235 | # Elasticsearch template settings 236 | setup.template.settings: 237 | # A dictionary of settings to place into the settings.index dictionary 238 | # of the Elasticsearch template. For more details, please check 239 | # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html 240 | # index: 241 | # number_of_shards: 1 242 | # codec: best_compression 243 | # number_of_routing_shards: 30 244 | 245 | # A dictionary of settings for the _source field. For more details, please check 246 | # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html 247 | # _source: 248 | # enabled: false 249 | 250 | 251 | #============================== Kibana ===================================== 252 | setup.kibana: 253 | # Scheme and port can be left out and will be set to the default (http and 5601) 254 | # In case you specify and additional path, the scheme is required: http://localhost:5601/path 255 | # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 256 | host: ${APM_SERVER_KIBANA_HOST:kibana.shared} 257 | 258 | # Optional protocol and basic auth credentials. 259 | # protocol: "https" 260 | # username: "elastic" 261 | # password: "changeme" 262 | 263 | # Optional HTTP Path 264 | # path: "" 265 | 266 | 267 | #================================ Outputs ====================================== 268 | #-------------------------- Elasticsearch output ------------------------------- 269 | output.elasticsearch: 270 | # Boolean flag to enable or disable the output module. 271 | enabled: true 272 | 273 | # Array of hosts to connect to. 274 | # Scheme and port can be left out and will be set to the default (http and 9200) 275 | # In case you specify and additional path, the scheme is required: http://localhost:9200/path 276 | # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 277 | hosts: ["localhost:9200"] 278 | hosts: '${APM_SERVER_ELASTICSEARCH_HOSTS:elasticsearch.shared}' 279 | 280 | # Set gzip compression level. 281 | compression_level: ${APM_SERVER_ELASTICSEARCH_COMPRESSION_LEVEL:0} 282 | 283 | # Optional protocol and basic auth credentials. 284 | # protocol: "https" 285 | # username: "elastic" 286 | # password: "changeme" 287 | 288 | # Dictionary of HTTP parameters to pass within the url with index operations. 289 | # parameters: 290 | # param1: value1 291 | # param2: value2 292 | 293 | # Number of workers per Elasticsearch host. 294 | # worker: 1 295 | 296 | # Optional index name. The default is "apm" plus version plus date 297 | # and generates apm-%{[beat.version]}-YYYY.MM.DD keys. 298 | # In case you modify this pattern you must update following configuration accordingly: 299 | # index: "apm-%{[beat.version]}-%{+yyyy.MM.dd}" 300 | 301 | # By specifying additional indices documents matching the criteria are indexed in a seperate index. 302 | # In case you are uploading source map documents (which is currently an experimental feature), 303 | # source map documents are going to their own index with the configuration below. 304 | # 305 | # Be aware that there is only one Elasticsearch template and one Kibana Index Pattern, 306 | # which needs to match against indices for ingested documents as well as for source maps. 307 | # In case you modify the index patterns you must ensure to use the same prefix for all indices, 308 | # and to accordingly set: 309 | # * `setup.template.name` 310 | # * `setup.template.pattern` 311 | # * `setup.dashboards.index` 312 | #indices: 313 | #- index: "apm-%{[beat.version]}-sourcemap" 314 | #when.contains: 315 | #processor.event: "sourcemap" 316 | 317 | # Optional ingest node pipeline. By default no pipeline will be used. 318 | # pipeline: "" 319 | 320 | # Optional HTTP Path 321 | # path: "/elasticsearch" 322 | 323 | # Custom HTTP headers to add to each request 324 | # headers: 325 | # X-My-Header: Contents of the header 326 | 327 | # Proxy server url 328 | # proxy_url: http://proxy:3128 329 | 330 | # The number of times a particular Elasticsearch index operation is attempted. If 331 | # the indexing operation doesn't succeed after this many retries, the events are 332 | # dropped. The default is 3. 333 | max_retries: ${APM_SERVER_ELASTICSEARCH_MAX_RETRIES:3} 334 | 335 | # The maximum number of events to bulk in a single Elasticsearch bulk API index request. 336 | # The default is 50. 337 | bulk_max_size: ${APM_SERVER_ELASTICSEARCH_BULK_MAX_SIZE:50} 338 | 339 | # Configure http request timeout before failing an request to Elasticsearch. 340 | timeout: ${APM_SERVER_ELASTICSEARCH_TIMEOUT:60s} 341 | 342 | # Use SSL settings for HTTPS. Default is true. 343 | # ssl.enabled: true 344 | 345 | # Configure SSL verification mode. If `none` is configured, all server hosts 346 | # and certificates will be accepted. In this mode, SSL based connections are 347 | # susceptible to man-in-the-middle attacks. Use only for testing. Default is 348 | # `full`. 349 | # ssl.verification_mode: full 350 | 351 | # List of supported/valid TLS versions. By default all TLS versions 1.0 up to 352 | # 1.2 are enabled. 353 | # ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] 354 | 355 | # SSL configuration. By default is off. 356 | # List of root certificates for HTTPS server verifications 357 | # ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] 358 | 359 | # Certificate for SSL client authentication 360 | # ssl.certificate: "/etc/pki/client/cert.pem" 361 | 362 | # Client Certificate Key 363 | # ssl.key: "/etc/pki/client/cert.key" 364 | 365 | # Optional passphrase for decrypting the Certificate Key. 366 | # ssl.key_passphrase: '' 367 | 368 | # Configure cipher suites to be used for SSL connections 369 | # ssl.cipher_suites: [] 370 | 371 | # Configure curve types for ECDHE based cipher suites 372 | # ssl.curve_types: [] 373 | 374 | # Configure what types of renegotiation are supported. Valid options are 375 | # never, once, and freely. Default is never. 376 | # ssl.renegotiation: never 377 | 378 | #----------------------------- Console output --------------------------------- 379 | output.console: 380 | # Boolean flag to enable or disable the output module. 381 | enabled: false 382 | 383 | # Pretty print json event 384 | pretty: true 385 | -------------------------------------------------------------------------------- /files/collectd/collectd.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Config file for collectd 3 | # 4 | # You should also read /usr/share/doc/collectd-core/README.Debian.plugins 5 | # before enabling any more plugins. 6 | 7 | 8 | ############################################################################## 9 | # Global # 10 | #----------------------------------------------------------------------------# 11 | # Global settings for the daemon. # 12 | ############################################################################## 13 | 14 | # Hostname "localhost" 15 | FQDNLookup true 16 | BaseDir "/var/lib/collectd" 17 | PluginDir "/usr/lib/collectd" 18 | TypesDB "/usr/share/collectd/types.db" 19 | 20 | # Load plugins automatically when block is encountered 21 | AutoLoadPlugin false 22 | # Collect internal statistics using "collectd" as the plugin name. 23 | CollectInternalStats false 24 | # Set interval to query values. This may be overwritten on a per-plugin 25 | Interval 10 26 | 27 | # MaxReadInterval 86400 28 | # Timeout 2 29 | # ReadThreads 5 30 | # WriteThreads 5 31 | 32 | # Limit the size of the write queue. (Default: no limit) 33 | # WriteQueueLimitHigh 1000000 34 | # WriteQueueLimitLow 800000 35 | 36 | 37 | ############################################################################## 38 | # Logging # 39 | #----------------------------------------------------------------------------# 40 | # Plugins which provide logging functions should be loaded first, so log # 41 | # messages generated when loading or configuring other plugins can be # 42 | # accessed. # 43 | ############################################################################## 44 | 45 | ## Plugin: logfile 46 | LoadPlugin logfile 47 | 48 | LogLevel info 49 | File "/var/log/collectd.log" 50 | Timestamp true 51 | PrintSeverity false 52 | 53 | 54 | 55 | ############################################################################## 56 | # Plugins 57 | #----------------------------------------------------------------------------# 58 | # Specify and configure each plugin to activate. # 59 | ############################################################################## 60 | 61 | ## Plugin: cpu 62 | LoadPlugin cpu 63 | 64 | # ReportByCpu true 65 | # ReportByState true 66 | # ValuesPercentage false 67 | 68 | 69 | ## Plugin: disk 70 | LoadPlugin disk 71 | 72 | # Disk "hda" 73 | # Disk "/sda[23]/" 74 | # IgnoreSelected false 75 | # UseBSDName false 76 | # UdevNameAttr "DEVNAME" 77 | 78 | 79 | ## Plugin: interface 80 | LoadPlugin interface 81 | 82 | # Interface "eth0" 83 | # IgnoreSelected false 84 | 85 | 86 | ## Plugin: memory 87 | LoadPlugin memory 88 | 89 | # ValuesAbsolute true 90 | # ValuesPercentage false 91 | 92 | 93 | ## Plugin: network 94 | LoadPlugin network 95 | 96 | # Client setup: 97 | Server "ff18::efc0:4a42" "25826" 98 | # 99 | 100 | # SecurityLevel Encrypt 101 | # Username "user" 102 | # Password "secret" 103 | # Interface "eth0" 104 | # ResolveInterval 14400 105 | 106 | # TimeToLive 128 107 | MaxPacketSize 1452 108 | # Report statistics about the network plugin itself 109 | ReportStats false 110 | # Execute garbage collection 111 | # CacheFlush 1800 112 | 113 | 114 | ## Plugin: processes 115 | LoadPlugin processes 116 | 117 | # Process "name" 118 | # ProcessMatch "foobar" "/usr/bin/perl foobar\\.pl.*" 119 | 120 | 121 | ## Plugin: swap 122 | LoadPlugin swap 123 | 124 | # ReportByDevice false 125 | # ReportBytes true 126 | 127 | 128 | 129 | #LoadPlugin aggregation 130 | #LoadPlugin amqp 131 | #LoadPlugin apache 132 | #LoadPlugin apcups 133 | #LoadPlugin ascent 134 | #LoadPlugin barometer 135 | LoadPlugin battery 136 | #LoadPlugin bind 137 | #LoadPlugin ceph 138 | #LoadPlugin cgroups 139 | #LoadPlugin conntrack 140 | #LoadPlugin contextswitch 141 | #LoadPlugin cpufreq 142 | #LoadPlugin csv 143 | #LoadPlugin curl 144 | #LoadPlugin curl_json 145 | #LoadPlugin curl_xml 146 | #LoadPlugin dbi 147 | LoadPlugin df 148 | #LoadPlugin dns 149 | #LoadPlugin drbd 150 | #LoadPlugin email 151 | LoadPlugin entropy 152 | #LoadPlugin ethstat 153 | #LoadPlugin exec 154 | #LoadPlugin fhcount 155 | #LoadPlugin filecount 156 | #LoadPlugin fscache 157 | #LoadPlugin gmond 158 | #LoadPlugin hddtemp 159 | #LoadPlugin ipc 160 | #LoadPlugin ipmi 161 | #LoadPlugin iptables 162 | #LoadPlugin ipvs 163 | LoadPlugin irq 164 | #LoadPlugin java 165 | LoadPlugin load 166 | #LoadPlugin lvm 167 | #LoadPlugin madwifi 168 | #LoadPlugin mbmon 169 | #LoadPlugin md 170 | #LoadPlugin memcachec 171 | #LoadPlugin memcached 172 | #LoadPlugin modbus 173 | #LoadPlugin multimeter 174 | #LoadPlugin mysql 175 | #LoadPlugin netlink 176 | #LoadPlugin nfs 177 | #LoadPlugin nginx 178 | #LoadPlugin notify_desktop 179 | #LoadPlugin notify_email 180 | #LoadPlugin ntpd 181 | #LoadPlugin numa 182 | #LoadPlugin nut 183 | #LoadPlugin olsrd 184 | #LoadPlugin openldap 185 | #LoadPlugin openvpn 186 | #LoadPlugin perl 187 | #LoadPlugin pinba 188 | #LoadPlugin ping 189 | #LoadPlugin postgresql 190 | #LoadPlugin powerdns 191 | #LoadPlugin protocols 192 | #LoadPlugin python 193 | #LoadPlugin redis 194 | #LoadPlugin rrdcached 195 | LoadPlugin rrdtool 196 | #LoadPlugin sensors 197 | #LoadPlugin serial 198 | #LoadPlugin sigrok 199 | #LoadPlugin smart 200 | #LoadPlugin snmp 201 | #LoadPlugin statsd 202 | #LoadPlugin table 203 | #LoadPlugin tail 204 | #LoadPlugin tail_csv 205 | #LoadPlugin tcpconns 206 | #LoadPlugin teamspeak2 207 | #LoadPlugin ted 208 | #LoadPlugin thermal 209 | #LoadPlugin tokyotyrant 210 | #LoadPlugin turbostat 211 | #LoadPlugin unixsock 212 | LoadPlugin uptime 213 | LoadPlugin users 214 | #LoadPlugin uuid 215 | #LoadPlugin varnish 216 | #LoadPlugin virt 217 | #LoadPlugin vmem 218 | #LoadPlugin vserver 219 | #LoadPlugin wireless 220 | #LoadPlugin write_graphite 221 | #LoadPlugin write_http 222 | #LoadPlugin write_kafka 223 | #LoadPlugin write_log 224 | #LoadPlugin write_redis 225 | #LoadPlugin write_riemann 226 | #LoadPlugin write_sensu 227 | #LoadPlugin write_tsdb 228 | #LoadPlugin zfs_arc 229 | #LoadPlugin zookeeper 230 | 231 | # 232 | # 233 | # #Host "unspecified" 234 | # Plugin "cpu" 235 | # PluginInstance "/[0,2,4,6,8]$/" 236 | # Type "cpu" 237 | # #TypeInstance "unspecified" 238 | # 239 | # SetPlugin "cpu" 240 | # SetPluginInstance "even-%{aggregation}" 241 | # 242 | # GroupBy "Host" 243 | # GroupBy "TypeInstance" 244 | # 245 | # CalculateNum false 246 | # CalculateSum false 247 | # CalculateAverage true 248 | # CalculateMinimum false 249 | # CalculateMaximum false 250 | # CalculateStddev false 251 | # 252 | # 253 | 254 | # 255 | # 256 | # Host "localhost" 257 | # Port "5672" 258 | # VHost "/" 259 | # User "guest" 260 | # Password "guest" 261 | # Exchange "amq.fanout" 262 | # RoutingKey "collectd" 263 | # Persistent false 264 | # StoreRates false 265 | # ConnectionRetryDelay 0 266 | # 267 | # 268 | 269 | # 270 | # 271 | # URL "http://localhost/server-status?auto" 272 | # User "www-user" 273 | # Password "secret" 274 | # VerifyPeer false 275 | # VerifyHost false 276 | # CACert "/etc/ssl/ca.crt" 277 | # Server "apache" 278 | # 279 | # 280 | # 281 | # URL "http://some.domain.tld/status?auto" 282 | # Host "some.domain.tld" 283 | # Server "lighttpd" 284 | # 285 | # 286 | 287 | # 288 | # Host "localhost" 289 | # Port "3551" 290 | # ReportSeconds true 291 | # 292 | 293 | # 294 | # URL "http://localhost/ascent/status/" 295 | # User "www-user" 296 | # Password "secret" 297 | # VerifyPeer false 298 | # VerifyHost false 299 | # CACert "/etc/ssl/ca.crt" 300 | # 301 | 302 | # 303 | # Device "/dev/i2c-0"; 304 | # Oversampling 512 305 | # PressureOffset 0.0 306 | # TemperatureOffset 0.0 307 | # Normalization 2 308 | # Altitude 238.0 309 | # TemperatureSensor "myserver/onewire-F10FCA000800/temperature" 310 | # 311 | 312 | # 313 | # ValuesPercentage false 314 | # ReportDegraded false 315 | # 316 | 317 | # 318 | # URL "http://localhost:8053/" 319 | # 320 | # ParseTime false 321 | # 322 | # OpCodes true 323 | # QTypes true 324 | # ServerStats true 325 | # ZoneMaintStats true 326 | # ResolverStats false 327 | # MemoryStats true 328 | # 329 | # 330 | # QTypes true 331 | # ResolverStats true 332 | # CacheRRSets true 333 | # 334 | # Zone "127.in-addr.arpa/IN" 335 | # 336 | # 337 | 338 | # 339 | # LongRunAvgLatency false 340 | # ConvertSpecialMetricTypes true 341 | # 342 | # SocketPath "/var/run/ceph/ceph-osd.0.asok" 343 | # 344 | # 345 | # SocketPath "/var/run/ceph/ceph-osd.1.asok" 346 | # 347 | # 348 | # SocketPath "/var/run/ceph/ceph-mon.ceph1.asok" 349 | # 350 | # 351 | # SocketPath "/var/run/ceph/ceph-mds.ceph1.asok" 352 | # 353 | # 354 | 355 | # 356 | # CGroup "libvirt" 357 | # IgnoreSelected false 358 | # 359 | 360 | # 361 | # DataDir "/var/lib/collectd/csv" 362 | # StoreRates false 363 | # 364 | 365 | # 366 | # 367 | # URL "http://finance.google.com/finance?q=NYSE%3AAMD" 368 | # User "foo" 369 | # Password "bar" 370 | # Digest false 371 | # VerifyPeer true 372 | # VerifyHost true 373 | # CACert "/path/to/ca.crt" 374 | # Header "X-Custom-Header: foobar" 375 | # Post "foo=bar" 376 | # 377 | # MeasureResponseTime false 378 | # MeasureResponseCode false 379 | # 380 | # Regex "]*> *([0-9]*\\.[0-9]+) *" 381 | # DSType "GaugeAverage" 382 | # Type "stock_value" 383 | # Instance "AMD" 384 | # 385 | # 386 | # 387 | 388 | # 389 | ## See: http://wiki.apache.org/couchdb/Runtime_Statistics 390 | # 391 | # Instance "httpd" 392 | # 393 | # Type "http_requests" 394 | # 395 | # 396 | # 397 | # Type "http_request_methods" 398 | # 399 | # 400 | # 401 | # Type "http_response_codes" 402 | # 403 | # 404 | ## Database status metrics: 405 | # 406 | # Instance "dbs" 407 | # 408 | # Type "gauge" 409 | # 410 | # 411 | # Type "counter" 412 | # 413 | # 414 | # Type "bytes" 415 | # 416 | # 417 | # 418 | 419 | # 420 | # 421 | # Host "my_host" 422 | # Instance "some_instance" 423 | # User "collectd" 424 | # Password "thaiNg0I" 425 | # Digest false 426 | # VerifyPeer true 427 | # VerifyHost true 428 | # CACert "/path/to/ca.crt" 429 | # Header "X-Custom-Header: foobar" 430 | # Post "foo=bar" 431 | # 432 | # 433 | # Type "magic_level" 434 | # InstancePrefix "prefix-" 435 | # InstanceFrom "td[1]" 436 | # ValuesFrom "td[2]/span[@class=\"level\"]" 437 | # 438 | # 439 | # 440 | 441 | # 442 | # 443 | # Statement "SELECT 'customers' AS c_key, COUNT(*) AS c_value \ 444 | # FROM customers_tbl" 445 | # MinVersion 40102 446 | # MaxVersion 50042 447 | # 448 | # Type "gauge" 449 | # InstancePrefix "customer" 450 | # InstancesFrom "c_key" 451 | # ValuesFrom "c_value" 452 | # 453 | # 454 | # 455 | # 456 | # Driver "mysql" 457 | # DriverOption "host" "localhost" 458 | # DriverOption "username" "collectd" 459 | # DriverOption "password" "secret" 460 | # DriverOption "dbname" "custdb0" 461 | # SelectDB "custdb0" 462 | # Query "num_of_customers" 463 | # Query "..." 464 | # Host "..." 465 | # 466 | # 467 | 468 | 469 | # Device "/dev/sda1" 470 | # Device "192.168.0.2:/mnt/nfs" 471 | # MountPoint "/home" 472 | # FSType "ext3" 473 | 474 | # ignore rootfs; else, the root file-system would appear twice, causing 475 | # one of the updates to fail and spam the log 476 | FSType rootfs 477 | # ignore the usual virtual / temporary file-systems 478 | FSType sysfs 479 | FSType proc 480 | FSType devtmpfs 481 | FSType devpts 482 | FSType tmpfs 483 | FSType fusectl 484 | FSType cgroup 485 | IgnoreSelected true 486 | 487 | # ReportByDevice false 488 | # ReportInodes false 489 | 490 | # ValuesAbsolute true 491 | # ValuesPercentage false 492 | 493 | 494 | # 495 | # Interface "eth0" 496 | # IgnoreSource "192.168.0.1" 497 | # SelectNumericQueryTypes false 498 | # 499 | 500 | # 501 | # SocketFile "/var/run/collectd-email" 502 | # SocketGroup "collectd" 503 | # SocketPerms "0770" 504 | # MaxConns 5 505 | # 506 | 507 | # 508 | # Interface "eth0" 509 | # Map "rx_csum_offload_errors" "if_rx_errors" "checksum_offload" 510 | # Map "multicast" "if_multicast" 511 | # MappedOnly false 512 | # 513 | 514 | # 515 | # Exec user "/path/to/exec" 516 | # Exec "user:group" "/path/to/exec" 517 | # NotificationExec user "/path/to/exec" 518 | # 519 | 520 | # 521 | # ValuesAbsolute true 522 | # ValuesPercentage false 523 | # 524 | 525 | # 526 | # 527 | # Instance "foodir" 528 | # Name "*.conf" 529 | # MTime "-5m" 530 | # Size "+10k" 531 | # Recursive true 532 | # IncludeHidden false 533 | # 534 | # 535 | 536 | # 537 | # MCReceiveFrom "239.2.11.71" "8649" 538 | # 539 | # 540 | # Type "swap" 541 | # TypeInstance "total" 542 | # DataSource "value" 543 | # 544 | # 545 | # 546 | # Type "swap" 547 | # TypeInstance "free" 548 | # DataSource "value" 549 | # 550 | # 551 | 552 | # 553 | # Host "127.0.0.1" 554 | # Port 7634 555 | # 556 | 557 | # 558 | # Sensor "some_sensor" 559 | # Sensor "another_one" 560 | # IgnoreSelected false 561 | # NotifySensorAdd false 562 | # NotifySensorRemove true 563 | # NotifySensorNotPresent false 564 | # 565 | 566 | # 567 | # Chain "table" "chain" 568 | # Chain6 "table" "chain" 569 | # 570 | 571 | # 572 | # Irq 7 573 | # Irq 8 574 | # Irq 9 575 | # IgnoreSelected true 576 | # 577 | 578 | # 579 | # JVMArg "-verbose:jni" 580 | # JVMArg "-Djava.class.path=/usr/share/collectd/java/collectd-api.jar" 581 | # 582 | # LoadPlugin "org.collectd.java.GenericJMX" 583 | # 584 | # # See /usr/share/doc/collectd/examples/GenericJMX.conf 585 | # # for an example config. 586 | # 587 | # 588 | 589 | # 590 | # ReportRelative true 591 | # 592 | 593 | # 594 | # Interface "wlan0" 595 | # IgnoreSelected false 596 | # Source "SysFS" 597 | # WatchSet "None" 598 | # WatchAdd "node_octets" 599 | # WatchAdd "node_rssi" 600 | # WatchAdd "is_rx_acl" 601 | # WatchAdd "is_scan_active" 602 | # 603 | 604 | # 605 | # Host "127.0.0.1" 606 | # Port 411 607 | # 608 | 609 | # 610 | # Device "/dev/md0" 611 | # IgnoreSelected false 612 | # 613 | 614 | # 615 | # 616 | # Server "localhost" 617 | # Key "page_key" 618 | # 619 | # Regex "(\\d+) bytes sent" 620 | # ExcludeRegex "" 621 | # DSType CounterAdd 622 | # Type "ipt_octets" 623 | # Instance "type_instance" 624 | # 625 | # 626 | # 627 | 628 | # 629 | # 630 | # Socket "/var/run/memcached.sock" 631 | # or: 632 | # Host "127.0.0.1" 633 | # Port "11211" 634 | # 635 | # 636 | 637 | # 638 | # 639 | # RegisterBase 1234 640 | # RegisterCmd ReadHolding 641 | # RegisterType float 642 | # Type gauge 643 | # Instance "..." 644 | # 645 | # 646 | # 647 | # Address "addr" 648 | # Port "1234" 649 | # Interval 60 650 | # 651 | # 652 | # Instance "foobar" # optional 653 | # Collect "data_name" 654 | # 655 | # 656 | # 657 | 658 | # 659 | # 660 | # Host "database.serv.er" 661 | # Port "3306" 662 | # User "db_user" 663 | # Password "secret" 664 | # Database "db_name" 665 | # MasterStats true 666 | # ConnectTimeout 10 667 | # InnodbStats true 668 | # 669 | # 670 | # 671 | # Alias "squeeze" 672 | # Host "localhost" 673 | # Socket "/var/run/mysql/mysqld.sock" 674 | # SlaveStats true 675 | # SlaveNotifications true 676 | # 677 | # 678 | 679 | # 680 | # Interface "All" 681 | # VerboseInterface "All" 682 | # QDisc "eth0" "pfifo_fast-1:0" 683 | # Class "ppp0" "htb-1:10" 684 | # Filter "ppp0" "u32-1:0" 685 | # IgnoreSelected false 686 | # 687 | 688 | # 689 | # URL "http://localhost/status?auto" 690 | # User "www-user" 691 | # Password "secret" 692 | # VerifyPeer false 693 | # VerifyHost false 694 | # CACert "/etc/ssl/ca.crt" 695 | # 696 | 697 | # 698 | # OkayTimeout 1000 699 | # WarningTimeout 5000 700 | # FailureTimeout 0 701 | # 702 | 703 | # 704 | # SMTPServer "localhost" 705 | # SMTPPort 25 706 | # SMTPUser "my-username" 707 | # SMTPPassword "my-password" 708 | # From "collectd@main0server.com" 709 | # # on . 710 | # # Beware! Do not use not more than two placeholders (%)! 711 | # Subject "[collectd] %s on %s!" 712 | # Recipient "email1@domain1.net" 713 | # Recipient "email2@domain2.com" 714 | # 715 | 716 | # 717 | # Host "localhost" 718 | # Port 123 719 | # ReverseLookups false 720 | # IncludeUnitID true 721 | # 722 | 723 | # 724 | # UPS "upsname@hostname:port" 725 | # 726 | 727 | # 728 | # Host "127.0.0.1" 729 | # Port "2006" 730 | # CollectLinks "Summary" 731 | # CollectRoutes "Summary" 732 | # CollectTopology "Summary" 733 | # 734 | 735 | # 736 | # 737 | # URL "ldap://localhost:389" 738 | # StartTLS false 739 | # VerifyHost true 740 | # CACert "/path/to/ca.crt" 741 | # Timeout -1 742 | # Version 3 743 | # 744 | # 745 | 746 | # 747 | # StatusFile "/etc/openvpn/openvpn-status.log" 748 | # ImprovedNamingSchema false 749 | # CollectCompression true 750 | # CollectIndividualUsers true 751 | # CollectUserCount false 752 | # 753 | 754 | # 755 | # IncludeDir "/my/include/path" 756 | # BaseName "Collectd::Plugins" 757 | # EnableDebugger "" 758 | # LoadPlugin Monitorus 759 | # LoadPlugin OpenVZ 760 | # 761 | # 762 | # Foo "Bar" 763 | # Qux "Baz" 764 | # 765 | # 766 | 767 | # 768 | # Address "::0" 769 | # Port "30002" 770 | # 771 | # Host "host name" 772 | # Server "server name" 773 | # Script "script name" 774 | # 775 | # 776 | 777 | # 778 | # Host "host.foo.bar" 779 | # Host "host.baz.qux" 780 | # Interval 1.0 781 | # Timeout 0.9 782 | # TTL 255 783 | # SourceAddress "1.2.3.4" 784 | # Device "eth0" 785 | # MaxMissed -1 786 | # 787 | 788 | # 789 | # 790 | # Statement "SELECT magic FROM wizard WHERE host = $1;" 791 | # Param hostname 792 | # 793 | # 794 | # Type gauge 795 | # InstancePrefix "magic" 796 | # ValuesFrom "magic" 797 | # 798 | # 799 | # 800 | # 801 | # Statement "SELECT COUNT(type) AS count, type \ 802 | # FROM (SELECT CASE \ 803 | # WHEN resolved = 'epoch' THEN 'open' \ 804 | # ELSE 'resolved' END AS type \ 805 | # FROM tickets) type \ 806 | # GROUP BY type;" 807 | # 808 | # 809 | # Type counter 810 | # InstancePrefix "rt36_tickets" 811 | # InstancesFrom "type" 812 | # ValuesFrom "count" 813 | # 814 | # 815 | # 816 | # 817 | # # See /usr/share/doc/collectd-core/examples/postgresql/collectd_insert.sql for details 818 | # Statement "SELECT collectd_insert($1, $2, $3, $4, $5, $6, $7, $8, $9);" 819 | # StoreRates true 820 | # 821 | # 822 | # 823 | # Host "hostname" 824 | # Port 5432 825 | # User "username" 826 | # Password "secret" 827 | # 828 | # SSLMode "prefer" 829 | # KRBSrvName "kerberos_service_name" 830 | # 831 | # Query magic 832 | # 833 | # 834 | # 835 | # Interval 60 836 | # Service "service_name" 837 | # 838 | # Query backend # predefined 839 | # Query rt36_tickets 840 | # 841 | # 842 | # 843 | # Service "collectd_store" 844 | # Writer sqlstore 845 | # # see collectd.conf(5) for details 846 | # CommitInterval 30 847 | # 848 | # 849 | 850 | # 851 | # 852 | # Collect "latency" 853 | # Collect "udp-answers" "udp-queries" 854 | # Socket "/var/run/pdns.controlsocket" 855 | # 856 | # 857 | # Collect "questions" 858 | # Collect "cache-hits" "cache-misses" 859 | # Socket "/var/run/pdns_recursor.controlsocket" 860 | # 861 | # LocalSocket "/opt/collectd/var/run/collectd-powerdns" 862 | # 863 | 864 | # 865 | # Value "/^Tcp:/" 866 | # IgnoreSelected false 867 | # 868 | 869 | # 870 | # ModulePath "/path/to/your/python/modules" 871 | # LogTraces true 872 | # Interactive true 873 | # Import "spam" 874 | # 875 | # 876 | # spam "wonderful" "lovely" 877 | # 878 | # 879 | 880 | # 881 | # 882 | # Host "redis.example.com" 883 | # Port "6379" 884 | # Timeout 2000 885 | # 886 | # 887 | 888 | # 889 | # DaemonAddress "unix:/var/run/rrdcached.sock" 890 | # DataDir "/var/lib/rrdcached/db/collectd" 891 | # CreateFiles true 892 | # CreateFilesAsync false 893 | # CollectStatistics true 894 | # 895 | # The following settings are rather advanced 896 | # and should usually not be touched: 897 | # StepSize 10 898 | # HeartBeat 20 899 | # RRARows 1200 900 | # RRATimespan 158112000 901 | # XFF 0.1 902 | # 903 | 904 | 905 | DataDir "/var/lib/collectd/rrd" 906 | # CacheTimeout 120 907 | # CacheFlush 900 908 | # WritesPerSecond 30 909 | # CreateFilesAsync false 910 | # RandomTimeout 0 911 | # 912 | # The following settings are rather advanced 913 | # and should usually not be touched: 914 | # StepSize 10 915 | # HeartBeat 20 916 | # RRARows 1200 917 | # RRATimespan 158112000 918 | # XFF 0.1 919 | 920 | 921 | # 922 | # SensorConfigFile "/etc/sensors3.conf" 923 | # Sensor "it8712-isa-0290/temperature-temp1" 924 | # Sensor "it8712-isa-0290/fanspeed-fan3" 925 | # Sensor "it8712-isa-0290/voltage-in8" 926 | # IgnoreSelected false 927 | # 928 | 929 | # 930 | # LogLevel 3 931 | # 932 | # Driver "fluke-dmm" 933 | # MinimumInterval 10 934 | # Conn "/dev/ttyUSB2" 935 | # 936 | # 937 | # Driver "cem-dt-885x" 938 | # Conn "/dev/ttyUSB1" 939 | # 940 | # 941 | 942 | # 943 | # Disk "/^[hs]d[a-f][0-9]?$/" 944 | # IgnoreSelected false 945 | # 946 | 947 | # See /usr/share/doc/collectd/examples/snmp-data.conf.gz for a 948 | # comprehensive sample configuration. 949 | # 950 | # 951 | # Type "voltage" 952 | # Table false 953 | # Instance "input_line1" 954 | # Scale 0.1 955 | # Values "SNMPv2-SMI::enterprises.6050.5.4.1.1.2.1" 956 | # 957 | # 958 | # Type "users" 959 | # Table false 960 | # Instance "" 961 | # Shift -1 962 | # Values "HOST-RESOURCES-MIB::hrSystemNumUsers.0" 963 | # 964 | # 965 | # Type "if_octets" 966 | # Table true 967 | # InstancePrefix "traffic" 968 | # Instance "IF-MIB::ifDescr" 969 | # Values "IF-MIB::ifInOctets" "IF-MIB::ifOutOctets" 970 | # 971 | # 972 | # 973 | # Address "192.168.0.2" 974 | # Version 1 975 | # Community "community_string" 976 | # Collect "std_traffic" 977 | # Inverval 120 978 | # 979 | # 980 | # Address "192.168.0.42" 981 | # Version 2 982 | # Community "another_string" 983 | # Collect "std_traffic" "hr_users" 984 | # 985 | # 986 | # Address "192.168.0.3" 987 | # Version 1 988 | # Community "more_communities" 989 | # Collect "powerplus_voltge_input" 990 | # Interval 300 991 | # 992 | # 993 | 994 | # 995 | # Host "::" 996 | # Port "8125" 997 | # DeleteCounters false 998 | # DeleteTimers false 999 | # DeleteGauges false 1000 | # DeleteSets false 1001 | # TimerPercentile 90.0 1002 | # TimerPercentile 95.0 1003 | # TimerPercentile 99.0 1004 | # TimerLower false 1005 | # TimerUpper false 1006 | # TimerSum false 1007 | # TimerCount false 1008 | # 1009 | 1010 | # 1011 | # 1012 | # Instance "slabinfo" 1013 | # Separator " " 1014 | # 1015 | # Type gauge 1016 | # InstancePrefix "active_objs" 1017 | # InstancesFrom 0 1018 | # ValuesFrom 1 1019 | # 1020 | # 1021 | # Type gauge 1022 | # InstancePrefix "objperslab" 1023 | # InstancesFrom 0 1024 | # ValuesFrom 4 1025 | # 1026 | #
1027 | #
1028 | 1029 | # 1030 | # 1031 | # Instance "exim" 1032 | # Interval 60 1033 | # 1034 | # Regex "S=([1-9][0-9]*)" 1035 | # DSType "CounterAdd" 1036 | # Type "ipt_bytes" 1037 | # Instance "total" 1038 | # 1039 | # 1040 | # Regex "\\" 1041 | # ExcludeRegex "\\.*mail_spool defer" 1042 | # DSType "CounterInc" 1043 | # Type "counter" 1044 | # Instance "local_user" 1045 | # 1046 | # 1047 | # 1048 | 1049 | # 1050 | # 1051 | # Type "percent" 1052 | # Instance "dropped" 1053 | # ValueFrom 1 1054 | # 1055 | # 1056 | # Type "bytes" 1057 | # Instance "wire-realtime" 1058 | # ValueFrom 2 1059 | # 1060 | # 1061 | # Type "alerts_per_second" 1062 | # ValueFrom 3 1063 | # 1064 | # 1065 | # Type "kpackets_wire_per_sec.realtime" 1066 | # ValueFrom 4 1067 | # 1068 | # 1069 | # Instance "snort-eth0" 1070 | # Interval 600 1071 | # Collect "dropped" "mbps" "alerts" "kpps" 1072 | # TimeFrom 0 1073 | # 1074 | # 1075 | 1076 | # 1077 | # ListeningPorts false 1078 | # AllPortsSummary false 1079 | # LocalPort "25" 1080 | # RemotePort "25" 1081 | # 1082 | 1083 | # 1084 | # Host "127.0.0.1" 1085 | # Port "51234" 1086 | # Server "8767" 1087 | # 1088 | 1089 | # 1090 | # Device "/dev/ttyUSB0" 1091 | # Retries 0 1092 | # 1093 | 1094 | # 1095 | # ForceUseProcfs false 1096 | # Device "THRM" 1097 | # IgnoreSelected false 1098 | # 1099 | 1100 | # 1101 | # Host "localhost" 1102 | # Port "1978" 1103 | # 1104 | 1105 | # 1106 | ## None of the following option should be set manually 1107 | ## This plugin automatically detect most optimal options 1108 | ## Only set values here if: 1109 | ## - The module ask you to 1110 | ## - You want to disable the collection of some data 1111 | ## - Your (intel) CPU is not supported (yet) by the module 1112 | ## - The module generate a lot of errors 'MSR offset 0x... read failed' 1113 | ## In the last two cases, please open a bug request 1114 | # 1115 | # TCCActivationTemp "100" 1116 | # CoreCstates "392" 1117 | # PackageCstates "396" 1118 | # SystemManagementInterrupt true 1119 | # DigitalTemperatureSensor true 1120 | # PackageThermalManagement true 1121 | # RunningAveragePowerLimit "7" 1122 | # 1123 | 1124 | # 1125 | # SocketFile "/var/run/collectd-unixsock" 1126 | # SocketGroup "collectd" 1127 | # SocketPerms "0660" 1128 | # DeleteSocket false 1129 | # 1130 | 1131 | # 1132 | # UUIDFile "/etc/uuid" 1133 | # 1134 | 1135 | # 1136 | # 1137 | # CollectBackend true 1138 | # CollectBan false # Varnish 3 and above 1139 | # CollectCache true 1140 | # CollectConnections true 1141 | # CollectDirectorDNS false # Varnish 3 only 1142 | # CollectESI false 1143 | # CollectFetch false 1144 | # CollectHCB false 1145 | # CollectObjects false 1146 | # CollectPurge false # Varnish 2 only 1147 | # CollectSession false 1148 | # CollectSHM true 1149 | # CollectSMA false # Varnish 2 only 1150 | # CollectSMS false 1151 | # CollectSM false # Varnish 2 only 1152 | # CollectStruct false 1153 | # CollectTotals false 1154 | # CollectUptime false # Varnish 3 and above 1155 | # CollectdVCL false 1156 | # CollectVSM false # Varnish 4 only 1157 | # CollectWorkers false 1158 | # 1159 | # 1160 | # 1161 | # CollectCache true 1162 | # 1163 | # 1164 | 1165 | # 1166 | # Connection "xen:///" 1167 | # RefreshInterval 60 1168 | # Domain "name" 1169 | # BlockDevice "name:device" 1170 | # InterfaceDevice "name:device" 1171 | # IgnoreSelected false 1172 | # HostnameFormat name 1173 | # InterfaceFormat name 1174 | # PluginInstanceFormat name 1175 | # 1176 | 1177 | # 1178 | # Verbose false 1179 | # 1180 | 1181 | # 1182 | # 1183 | # Host "localhost" 1184 | # Port "2003" 1185 | # Protocol "tcp" 1186 | # LogSendErrors true 1187 | # Prefix "collectd" 1188 | # Postfix "collectd" 1189 | # StoreRates true 1190 | # AlwaysAppendDS false 1191 | # EscapeCharacter "_" 1192 | # 1193 | # 1194 | 1195 | # 1196 | # 1197 | # URL "http://example.com/collectd-post" 1198 | # User "collectd" 1199 | # Password "secret" 1200 | # VerifyPeer true 1201 | # VerifyHost true 1202 | # CACert "/etc/ssl/ca.crt" 1203 | # CAPath "/etc/ssl/certs/" 1204 | # ClientKey "/etc/ssl/client.pem" 1205 | # ClientCert "/etc/ssl/client.crt" 1206 | # ClientKeyPass "secret" 1207 | # SSLVersion "TLSv1" 1208 | # Format "Command" 1209 | # StoreRates false 1210 | # BufferSize 4096 1211 | # LowSpeedLimit 0 1212 | # Timeout 0 1213 | # 1214 | # 1215 | 1216 | # 1217 | # Property "metadata.broker.list" "localhost:9092" 1218 | # 1219 | # Format JSON 1220 | # 1221 | # 1222 | 1223 | # 1224 | # 1225 | # Host "localhost" 1226 | # Port 5555 1227 | # Protocol TCP 1228 | # Batch true 1229 | # BatchMaxSize 8192 1230 | # StoreRates true 1231 | # AlwaysAppendDS false 1232 | # TTLFactor 2.0 1233 | # Notifications true 1234 | # CheckThresholds false 1235 | # EventServicePrefix "" 1236 | # 1237 | # Tag "foobar" 1238 | # Attribute "foo" "bar" 1239 | # 1240 | 1241 | # 1242 | # 1243 | # Host "localhost" 1244 | # Port 3030 1245 | # StoreRates true 1246 | # AlwaysAppendDS false 1247 | # Notifications true 1248 | # Metrics true 1249 | # EventServicePrefix "" 1250 | # MetricHandler "influx" 1251 | # MetricHandler "default" 1252 | # NotificationHandler "flapjack" 1253 | # NotificationHandler "howling_monkey" 1254 | # 1255 | # Tag "foobar" 1256 | # Attribute "foo" "bar" 1257 | # 1258 | 1259 | # 1260 | # 1261 | # Host "localhost" 1262 | # Port "4242" 1263 | # HostTags "status=production" 1264 | # StoreRates false 1265 | # AlwaysAppendDS false 1266 | # 1267 | # 1268 | 1269 | # 1270 | # Host "localhost" 1271 | # Port "2181" 1272 | # 1273 | 1274 | 1275 | Filter "*.conf" 1276 | 1277 | --------------------------------------------------------------------------------