├── .gitignore ├── roles ├── kibana │ ├── handlers │ │ └── main.yml │ ├── files │ │ └── install-kibana.sh │ ├── vars │ │ └── main.yml │ ├── templates │ │ └── kibana.nginx.conf │ └── tasks │ │ └── main.yml ├── esnode │ ├── handlers │ │ └── main.yml │ ├── vars │ │ └── main.yml │ ├── templates │ │ ├── packetbeat.template.json │ │ └── elasticsearch.yml │ └── tasks │ │ └── main.yml ├── logstash │ ├── handlers │ │ └── main.yml │ ├── templates │ │ ├── logstash_fileinput.conf │ │ ├── logstash_output.conf │ │ └── logstash.repo │ ├── tasks │ │ ├── redhat.yml │ │ ├── debian.yml │ │ └── main.yml │ └── vars │ │ └── main.yml ├── packetbeat │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ ├── main.yml │ │ ├── redhat.yml │ │ └── debian.yml │ ├── vars │ │ └── main.yml │ └── templates │ │ └── packetbeat.conf └── aggregator │ ├── handlers │ └── main.yml │ ├── vars │ └── main.yml │ ├── templates │ ├── logstash_packetbeat.conf │ └── redis.conf │ └── tasks │ └── main.yml ├── images ├── digitalocean-debian.png ├── pb_system_allinone.png └── pb_system_multiple.png ├── hosts-vagrant-allinone ├── hosts-vagrant ├── site.yml ├── Vagrantfile ├── README.md └── LICENSE /.gitignore: -------------------------------------------------------------------------------- 1 | *.sw? 2 | .vagrant/* 3 | -------------------------------------------------------------------------------- /roles/kibana/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: nginx reload 2 | service: name=nginx state=restarted 3 | -------------------------------------------------------------------------------- /roles/esnode/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: Restart ES 2 | service: name=elasticsearch state=restarted 3 | -------------------------------------------------------------------------------- /roles/logstash/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: Restart Logstash 2 | service: name=logstash state=restarted 3 | -------------------------------------------------------------------------------- /roles/packetbeat/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: Restart Packetbeat 2 | service: name=packetbeat state=restarted 3 | -------------------------------------------------------------------------------- /images/digitalocean-debian.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tsg/packetbeat-deploy/HEAD/images/digitalocean-debian.png -------------------------------------------------------------------------------- /images/pb_system_allinone.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tsg/packetbeat-deploy/HEAD/images/pb_system_allinone.png -------------------------------------------------------------------------------- /images/pb_system_multiple.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tsg/packetbeat-deploy/HEAD/images/pb_system_multiple.png -------------------------------------------------------------------------------- /hosts-vagrant-allinone: -------------------------------------------------------------------------------- 1 | [aggregator] 2 | aggregator:2303 main_iface=eth1 3 | 4 | [app-servers] 5 | app-deb-1:2304 6 | app-centos-1:2305 7 | -------------------------------------------------------------------------------- /roles/aggregator/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: Restart Redis 2 | service: name=redis-server state=restarted 3 | 4 | - name: Restart Logstash 5 | service: name=logstash state=restarted 6 | -------------------------------------------------------------------------------- /hosts-vagrant: -------------------------------------------------------------------------------- 1 | [esnodes] 2 | esnode1:2301 3 | esnode2:2302 4 | 5 | [esnodes:vars] 6 | main_iface=eth1 7 | 8 | [aggregator] 9 | aggregator:2303 main_iface=eth1 10 | 11 | [app-servers] 12 | app-deb-1:2304 13 | app-centos-1:2305 14 | 15 | -------------------------------------------------------------------------------- /roles/kibana/files/install-kibana.sh: -------------------------------------------------------------------------------- 1 | ARCHIVE=$1 2 | DEST=$2 3 | USER=$3 4 | 5 | NAME=`basename $ARCHIVE .tar.gz` 6 | 7 | cd /tmp 8 | tar -xzf $ARCHIVE || exit 1 9 | mv $NAME $DEST || exit 1 10 | chown -R $USER.users $DEST || exit 1 11 | -------------------------------------------------------------------------------- /roles/logstash/templates/logstash_fileinput.conf: -------------------------------------------------------------------------------- 1 | input { 2 | file { 3 | type => {{ item.key }} 4 | path => [{% for path in item.value.path %}"{{ path }}"{% if not loop.last %}, {% endif %}{% endfor %}] 5 | } 6 | } 7 | 8 | -------------------------------------------------------------------------------- /roles/logstash/templates/logstash_output.conf: -------------------------------------------------------------------------------- 1 | output { 2 | redis { 3 | codec => "json" 4 | host => "{{ aggregator_bind_ip }}" 5 | port => {{ aggregator_redis_port }} 6 | data_type => list 7 | key => "logstash" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /roles/logstash/tasks/redhat.yml: -------------------------------------------------------------------------------- 1 | 2 | - name: Add Logstash repos 3 | template: src=logstash.repo dest=/etc/yum.repos.d/logstash-{{ logstash.version }}.repo 4 | 5 | - name: Install Logstash 6 | yum: name={{ item }} state=present 7 | with_items: 8 | - logstash 9 | - logstash-contrib 10 | -------------------------------------------------------------------------------- /roles/logstash/templates/logstash.repo: -------------------------------------------------------------------------------- 1 | [logstash-{{ logstash.version }}] 2 | name=logstash repository for {{ logstash.version }}.x packages 3 | baseurl=http://packages.elasticsearch.org/logstash/{{ logstash.version }}/centos 4 | gpgcheck=1 5 | gpgkey=http://packages.elasticsearch.org/GPG-KEY-elasticsearch 6 | enabled=1 7 | -------------------------------------------------------------------------------- /roles/packetbeat/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include: debian.yml 2 | when: ansible_os_family == 'Debian' 3 | 4 | - include: redhat.yml 5 | when: ansible_os_family == 'RedHat' 6 | 7 | - name: Configure Packetbeat 8 | template: src=packetbeat.conf dest=/etc/packetbeat/packetbeat.conf 9 | notify: Restart Packetbeat 10 | 11 | - name: Start Packetbeat 12 | service: name=packetbeat state=started enabled=yes 13 | -------------------------------------------------------------------------------- /site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install the Packetbeat central node 3 | hosts: aggregator 4 | sudo: yes 5 | roles: 6 | - esnode 7 | - aggregator 8 | - kibana 9 | 10 | - name: Install extra Elasticsearch nodes 11 | hosts: esnodes 12 | sudo: yes 13 | roles: 14 | - esnode 15 | 16 | - name: Install the Packetbeat and Logstash agents 17 | hosts: app-servers 18 | sudo: yes 19 | roles: 20 | - packetbeat 21 | - logstash 22 | -------------------------------------------------------------------------------- /roles/logstash/tasks/debian.yml: -------------------------------------------------------------------------------- 1 | - name: Add Logstash repository 2 | apt_repository: repo="deb http://packages.elasticsearch.org/logstash/{{ logstash.version }}/debian stable main" state=present 3 | 4 | - name: Add Logstash repository key 5 | apt_key: url="http://packages.elasticsearch.org/GPG-KEY-elasticsearch" state=present 6 | 7 | - name: Install Logstash 8 | apt: pkg={{ item }} update_cache=yes state=latest 9 | with_items: 10 | - logstash 11 | - logstash-contrib 12 | -------------------------------------------------------------------------------- /roles/kibana/vars/main.yml: -------------------------------------------------------------------------------- 1 | kibana: 2 | url: https://github.com/packetbeat/kibana/releases/download/v3.1.2-pb 3 | archive: kibana-3.1.2-packetbeat.tar.gz 4 | 5 | config: 6 | server_name: "localhost" 7 | nginx_disable_default: true 8 | elasticsearch_url: '"http://"+window.location.hostname+"/ES"' 9 | 10 | # Find aggregator's IP address from the inventory vars 11 | aggregator_bind_iface: "{{ hostvars[groups['aggregator'][0]].get('main_iface', 'eth0') }}" 12 | aggregator_bind_ip: "{{ hostvars[groups['aggregator'][0]]['ansible_' + aggregator_bind_iface ].ipv4.address }}" 13 | -------------------------------------------------------------------------------- /roles/aggregator/vars/main.yml: -------------------------------------------------------------------------------- 1 | aggregator: 2 | logstash_version: "1.4" 3 | 4 | config: 5 | 6 | days_of_history: 3 7 | 8 | redis_port: 6380 9 | 10 | redis_packetbeat_key: "packetbeat" 11 | 12 | logstash_heap_size: 64 13 | redis_maxmemory: 64 14 | 15 | # Auto-detect IP address to bind on based on the configured network interface 16 | aggregator_bind_iface: "{{ hostvars[inventory_hostname].get('main_iface', 'eth0') }}" 17 | aggregator_bind_ip: "{{ hostvars[inventory_hostname]['ansible_' + aggregator_bind_iface ].ipv4.address }}" 18 | 19 | curator_bin: "python /usr/local/lib/python2.7/dist-packages/curator/curator.py" 20 | -------------------------------------------------------------------------------- /roles/packetbeat/tasks/redhat.yml: -------------------------------------------------------------------------------- 1 | - name: Install Packetbeat dependencies 2 | yum: name={{ item }} state=present 3 | with_items: 4 | - http://dl.fedoraproject.org/pub/epel/6/{{ ansible_architecture }}/epel-release-6-8.noarch.rpm 5 | - libpcap 6 | - daemonize 7 | tags: deps 8 | 9 | - name: Check if Packetbeat is already at the right version 10 | shell: rpm -qi libpcap | grep Version | awk '{print $3}' 11 | register: installed_version 12 | always_run: True 13 | ignore_errors: True 14 | 15 | - name: Install Packetbeat agent 16 | yum: name=https://github.com/packetbeat/packetbeat/releases/download/v{{ packetbeat.version }}/packetbeat-{{ packetbeat.version }}-1.el6.{{ packetbeat.rpm.arch }}.rpm state=present 17 | when: installed_version.stdout != '{{ packetbeat.version }}' 18 | -------------------------------------------------------------------------------- /roles/logstash/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include: debian.yml 2 | when: ansible_os_family == 'Debian' 3 | 4 | - include: redhat.yml 5 | when: ansible_os_family == 'RedHat' 6 | 7 | - name: Logstash set heap size 8 | action: > 9 | lineinfile dest=/etc/init.d/logstash state=present 10 | regexp="^LS_HEAP_SIZE=" 11 | line='LS_HEAP_SIZE="{{logstash.config.heap_size|int}}m"' 12 | notify: Restart Logstash 13 | 14 | - name: Install logstash fileinput configuration files 15 | template: src=logstash_fileinput.conf dest=/etc/logstash/conf.d/00logstash_{{ item.key }}.conf 16 | with_dict: logstash.config.file_inputs 17 | notify: Restart Logstash 18 | 19 | - name: Install logstash output configuration file 20 | template: src=logstash_output.conf dest=/etc/logstash/conf.d/99logstash_output.conf 21 | notify: Restart Logstash 22 | 23 | - name: Start Logstash 24 | service: name=logstash state=started enabled=yes 25 | -------------------------------------------------------------------------------- /roles/logstash/vars/main.yml: -------------------------------------------------------------------------------- 1 | logstash: 2 | version: 1.4 3 | 4 | config: 5 | heap_size: 64 6 | 7 | file_inputs: 8 | syslog: 9 | enabled: true 10 | path: 11 | - "/var/log/syslog" 12 | - "/var/log/messages" 13 | nginx_access: 14 | enabled: true 15 | path: 16 | - "/var/log/nginx/access.log" 17 | nginx_error: 18 | enabled: true 19 | path: 20 | - "/var/log/nginx/error.log" 21 | 22 | 23 | # Find aggregator's IP address from the inventory vars 24 | aggregator_bind_iface: "{{ hostvars[groups['aggregator'][0]].get('main_iface', 'eth0') }}" 25 | aggregator_bind_ip: "{{ hostvars[groups['aggregator'][0]]['ansible_' + aggregator_bind_iface ].ipv4.address }}" 26 | aggregator_redis_port: 6380 27 | 28 | -------------------------------------------------------------------------------- /roles/packetbeat/tasks/debian.yml: -------------------------------------------------------------------------------- 1 | - name: Install Packetbeat dependencies 2 | apt: pkg={{ item }} state=present update_cache=yes 3 | with_items: 4 | - libpcap0.8 5 | tags: deps 6 | 7 | - name: Check if Packetbeat is already at the right version 8 | shell: dpkg -s packetbeat | grep Version | awk '{print $2}' 9 | register: installed_version 10 | always_run: True 11 | ignore_errors: True 12 | 13 | - name: Download Packetbeat agent 14 | get_url: > 15 | url=https://github.com/packetbeat/packetbeat/releases/download/v{{ packetbeat.version }}/packetbeat_{{ packetbeat.version }}-1_{{ packetbeat.deb.arch }}.deb 16 | dest=/tmp/packetbeat_{{ packetbeat.version }}-1_{{ packetbeat.deb.arch }}.deb 17 | when: installed_version.stdout != '{{ packetbeat.version }}' 18 | 19 | - name: Install Packetbeat agent 20 | apt: deb=/tmp/packetbeat_{{ packetbeat.version }}-1_{{ packetbeat.deb.arch }}.deb 21 | when: installed_version.stdout != '{{ packetbeat.version }}' 22 | -------------------------------------------------------------------------------- /roles/kibana/templates/kibana.nginx.conf: -------------------------------------------------------------------------------- 1 | upstream elasticsearch { 2 | server {{ aggregator_bind_ip }}:9200 fail_timeout=0; 3 | } 4 | 5 | server { 6 | 7 | listen 80; 8 | server_name {{ kibana.config.server_name }}; 9 | 10 | access_log /var/log/nginx/kibana.access.log; 11 | error_log /var/log/nginx/kibana.error.log; 12 | 13 | # Kibana 14 | location / { 15 | root /var/www/kibana; 16 | index index.html; 17 | allow all; 18 | } 19 | 20 | # Elasticsearch 21 | location /ES/ { 22 | # Deny access to Cluster API 23 | location ~ /_cluster { 24 | return 403; 25 | break; 26 | } 27 | 28 | rewrite ^/ES/(.*) /$1 break; 29 | 30 | proxy_set_header X-Real-IP $remote_addr; 31 | proxy_set_header X-Forward-For $proxy_add_x_forwarded_for; 32 | proxy_set_header Host $http_host; 33 | proxy_redirect off; 34 | 35 | proxy_pass http://elasticsearch; 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /roles/esnode/vars/main.yml: -------------------------------------------------------------------------------- 1 | elasticsearch: 2 | version: '1.4' 3 | 4 | # configuration options 5 | config: 6 | 7 | cluster_name: "packetbeat" 8 | disable_scripting: true 9 | limit_cache_size: true 10 | 11 | # By default we let ES allocate half of the available 12 | # memory on each host. 13 | heap_size: "{{ ansible_memtotal_mb / 2 }}" 14 | 15 | # mlockall will disable swapping for the ES process 16 | mlockall: true 17 | 18 | # index settings 19 | refresh_interval: "5s" 20 | number_of_shards: 10 21 | number_of_replicas: "{{ 1 if 'esnodes' in groups else 0 }}" 22 | 23 | dashboards: 24 | url: "https://github.com/packetbeat/dashboards/archive" 25 | archive: "v0.5.0K3.tar.gz" 26 | dir_name: "dashboards-0.5.0K3" 27 | 28 | # Auto-detect IP address to bind on based on the configured network interface 29 | bind_iface: "{{ hostvars[inventory_hostname].get('main_iface', 'eth0') }}" 30 | elasticsearch_bind_ip: "{{ hostvars[inventory_hostname]['ansible_' + bind_iface ].ipv4.address }}" 31 | -------------------------------------------------------------------------------- /roles/esnode/templates/packetbeat.template.json: -------------------------------------------------------------------------------- 1 | { 2 | "template" : "packetbeat-*", 3 | "settings" : { 4 | "index.refresh_interval" : "1s" 5 | }, 6 | "mappings" : { 7 | "_default_" : { 8 | "_all" : {"enabled" : false}, 9 | "date_detection": false, 10 | "properties": { 11 | "request_raw" : { 12 | "type": "string", 13 | "index" : "analyzed" 14 | }, 15 | "response_raw" : { 16 | "type": "string", 17 | "index" : "analyzed" 18 | }, 19 | "params": { 20 | "type": "string", 21 | "index": "analyzed" 22 | }, 23 | "@timestamp": { 24 | "type": "date" 25 | }, 26 | "client_location": { 27 | "type": "geo_point" 28 | } 29 | }, 30 | "dynamic_templates": [{ 31 | "template1": { 32 | "match": "*", 33 | "mapping": { 34 | "type": "{dynamic_type}", 35 | "index": "not_analyzed", 36 | "doc_values": true 37 | } 38 | } 39 | }] 40 | } 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /roles/aggregator/templates/logstash_packetbeat.conf: -------------------------------------------------------------------------------- 1 | input { 2 | redis { 3 | codec => "json" 4 | host => "127.0.0.1" 5 | port => {{ aggregator.config.redis_port }} 6 | data_type => "list" 7 | key => "{{ aggregator.config.redis_packetbeat_key }}" 8 | add_field => { 9 | "_source" => "packetbeat" 10 | } 11 | } 12 | 13 | redis { 14 | codec => "json" 15 | host => "127.0.0.1" 16 | port => {{ aggregator.config.redis_port }} 17 | data_type => "list" 18 | key => "logstash" 19 | add_field => { 20 | "_source" => "logstash" 21 | } 22 | } 23 | } 24 | 25 | output { 26 | if [_source] == "packetbeat" { 27 | elasticsearch { 28 | protocol => "http" 29 | host => "{{ aggregator_bind_ip }}" 30 | manage_template => false 31 | index => "packetbeat-%{+YYYY.MM.dd}" 32 | } 33 | } 34 | 35 | if [_source] == "logstash" { 36 | elasticsearch { 37 | protocol => "http" 38 | host => "{{ aggregator_bind_ip }}" 39 | index => "logstash-%{+YYYY.MM.dd}" 40 | } 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /roles/kibana/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Install nginx 2 | apt: pkg=nginx state=present update_cache=yes 3 | tags: deps 4 | 5 | - name: Download Kibana 6 | get_url: url={{ kibana.url }}/{{ kibana.archive }} dest=/tmp/{{ kibana.archive }} 7 | register: kibana_archive 8 | 9 | - name: Make install directory 10 | file: path=/var/www state=directory owner=www-data group=www-data mode=755 11 | 12 | - name: Delete old installation 13 | file: path=/var/www/kibana state=absent 14 | when: kibana_archive.changed 15 | 16 | - name: Install kibana 17 | script: install-kibana.sh {{ kibana.archive }} /var/www/kibana www-data 18 | when: kibana_archive.changed 19 | 20 | - name: Install nginx conf 21 | template: src=kibana.nginx.conf dest=/etc/nginx/sites-available/kibana mode=644 22 | notify: nginx reload 23 | 24 | - name: Nginx enable site 25 | file: state=link src=/etc/nginx/sites-available/kibana path=/etc/nginx/sites-enabled/kibana 26 | notify: nginx reload 27 | 28 | - name: Nginx disable default site 29 | file: path=/etc/nginx/sites-enabled/default state=absent 30 | when: kibana.config.nginx_disable_default 31 | 32 | - name: Nginx start 33 | service: name=nginx state=started 34 | 35 | - name: Set ES URL in Kibana configuration 36 | lineinfile: > 37 | dest=/var/www/kibana/config.js 38 | regexp="^[ \t]*elasticsearch: " 39 | line=' elasticsearch: {{ kibana.config.elasticsearch_url }},' 40 | 41 | - name: Set default dashboard 42 | lineinfile: > 43 | dest=/var/www/kibana/config.js 44 | regexp="^[ \t]*default_route[ \t]*: " 45 | line=' default_route: "/dashboard/elasticsearch/Packetbeat%20Statistics",' 46 | -------------------------------------------------------------------------------- /roles/packetbeat/vars/main.yml: -------------------------------------------------------------------------------- 1 | packetbeat: 2 | version: 0.5.0 3 | 4 | deb: 5 | arch: "{{ 'amd64' if ansible_architecture == 'x86_64' else 'i386' }}" 6 | 7 | rpm: 8 | arch: "{{ 'x86_64' if ansible_architecture == 'x86_64' else 'i686' }}" 9 | 10 | config: 11 | interface: 12 | device: "any" 13 | output: 14 | elasticsearch: 15 | enabled: "false" 16 | host: "" 17 | port: 9200 18 | save_topology: "false" 19 | redis: 20 | enabled: "true" 21 | host: "{{ aggregator_bind_ip }}" 22 | port: 6380 23 | save_topology: "true" 24 | 25 | protocols: 26 | http: 27 | enabled: true 28 | ports: 29 | - 80 30 | - 8080 31 | mysql: 32 | enabled: true 33 | ports: 34 | - 3306 35 | pgsql: 36 | enabled: true 37 | ports: 38 | - 5432 39 | redis: 40 | enabled: true 41 | ports: 42 | - 6379 43 | 44 | processes: 45 | mysqld: 46 | enabled: true 47 | cmdline_grep: "mysqld" 48 | pgsql: 49 | enabled: true 50 | cmdline_grep: "postgres" 51 | nginx: 52 | enabled: true 53 | cmdline_grep: "nginx" 54 | redis: 55 | enabled: true 56 | cmdline_grep: "redis" 57 | 58 | # Find aggregator's IP address from the inventory vars 59 | aggregator_bind_iface: "{{ hostvars[groups['aggregator'][0]].get('main_iface', 'eth0') }}" 60 | aggregator_bind_ip: "{{ hostvars[groups['aggregator'][0]]['ansible_' + aggregator_bind_iface ].ipv4.address }}" 61 | 62 | -------------------------------------------------------------------------------- /roles/aggregator/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Install REDIS 2 | apt: pkg=redis-server state=present update_cache=yes 3 | tags: deps 4 | 5 | - name: Install dependencies 6 | apt: pkg={{ item }} update_cache=yes state=present 7 | with_items: 8 | - python-pycurl 9 | - python-apt 10 | - python-setuptools 11 | tags: deps 12 | 13 | - easy_install: name=pip 14 | tags: deps 15 | 16 | - name: Install ES Curator 17 | pip: name=elasticsearch-curator 18 | tags: deps 19 | 20 | - name: Configure Redis 21 | template: src=redis.conf dest=/etc/redis/redis.conf 22 | notify: Restart Redis 23 | 24 | - name: Add Logstash repository 25 | apt_repository: repo="deb http://packages.elasticsearch.org/logstash/{{ aggregator.logstash_version }}/debian stable main" state=present 26 | 27 | - name: Add Logstash repository key 28 | apt_key: url="http://packages.elasticsearch.org/GPG-KEY-elasticsearch" state=present 29 | 30 | - name: Install Logstash 31 | apt: pkg={{ item }} update_cache=yes state=present 32 | with_items: 33 | - logstash 34 | - logstash-contrib 35 | 36 | - name: Logstash set heap size 37 | action: > 38 | lineinfile dest=/etc/init.d/logstash state=present 39 | regexp="^LS_HEAP_SIZE=" 40 | line='LS_HEAP_SIZE="{{aggregator.config.logstash_heap_size|int}}m"' 41 | notify: Restart Logstash 42 | 43 | - name: Install Logstash packetbeat configuration 44 | template: src=logstash_packetbeat.conf dest=/etc/logstash/conf.d/logstash_packetbeat.conf 45 | notify: Restart Logstash 46 | 47 | - name: Start Logstash 48 | service: name=elasticsearch state=started enabled=yes 49 | 50 | - name: Schedule curator for Packetbeat 51 | cron: > 52 | name='Curate Packetbeat' 53 | hour=2 minute=15 54 | job="{{ curator_bin }} -D -p packetbeat- -d {{ aggregator.config.days_of_history }}" 55 | user=elasticsearch cron_file=packetbeat 56 | 57 | 58 | - name: Schedule curator for Logstash 59 | cron: > 60 | name='Curate Logstash' 61 | hour=3 minute=15 62 | job="{{ curator_bin }} -D -p logstash- -d {{ aggregator.config.days_of_history }}" 63 | user=elasticsearch cron_file=packetbeat 64 | -------------------------------------------------------------------------------- /roles/esnode/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install dependencies 3 | apt: pkg={{ item }} update_cache=yes state=present 4 | with_items: 5 | - curl 6 | tags: deps 7 | 8 | - name: Install Java7 9 | apt: pkg=java7-runtime-headless update_cache=yes state=latest 10 | tags: deps 11 | 12 | - name: Add Elasticsearch repository 13 | apt_repository: repo="deb http://packages.elasticsearch.org/elasticsearch/{{ elasticsearch.version }}/debian stable main" state=present 14 | 15 | - name: Add Elasticsearch repository key 16 | apt_key: url="http://packages.elasticsearch.org/GPG-KEY-elasticsearch" state=present 17 | 18 | - name: Install Elasticsearch 19 | apt: pkg=elasticsearch update_cache=yes state=latest 20 | 21 | - name: ES configuration 22 | template: src=elasticsearch.yml dest=/etc/elasticsearch/elasticsearch.yml 23 | notify: Restart ES 24 | 25 | - name: ES set heap size 26 | action: > 27 | lineinfile dest=/etc/init.d/elasticsearch state=present 28 | regexp="^ES_HEAP_SIZE" insertafter="\#ES_HEAP_SIZE" 29 | line="ES_HEAP_SIZE={{elasticsearch.config.heap_size|int}}m" 30 | notify: Restart ES 31 | 32 | - name: Start ES 33 | service: name=elasticsearch state=started enabled=yes 34 | 35 | - name: Wait for ES to start 36 | wait_for: host={{ elasticsearch_bind_ip }} port=9200 37 | 38 | - name: Copy ES template file for Packetbeat 39 | template: src=packetbeat.template.json dest=/tmp/packetbeat.template.json 40 | register: es_template 41 | 42 | - name: Load template 43 | shell: > 44 | chdir=/tmp 45 | curl -XPUT 'http://{{elasticsearch_bind_ip}}:9200/_template/packetbeat' -d @packetbeat.template.json 46 | when: es_template.changed 47 | 48 | - name: Download Packetbeat Dashboards 49 | get_url: url={{ dashboards.url }}/{{ dashboards.archive }} dest=/tmp/{{ dashboards.archive }} 50 | register: dashboards_archive 51 | 52 | - name: Uncompress Dashboards 53 | command: chdir=/tmp/ tar xvf {{ dashboards.archive }} 54 | when: dashboards_archive.changed 55 | 56 | - name: Load dashboards 57 | command: chdir=/tmp/{{ dashboards.dir_name }} ./load.sh {{ elasticsearch_bind_ip }} 58 | when: dashboards_archive.changed 59 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # This Vagrantfile is for testing the setup locally. 5 | 6 | # Vagrantfile API/syntax version. Don't touch unless you know what you're doing! 7 | VAGRANTFILE_API_VERSION = "2" 8 | 9 | Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| 10 | 11 | # Use a debian squeeze base machine 12 | config.vm.box_url = "http://puppet-vagrant-boxes.puppetlabs.com/debian-73-x64-virtualbox-nocm.box" 13 | 14 | config.vm.define "esnode1" do |esnode| 15 | esnode.vm.box = "esnode1" 16 | 17 | esnode.ssh.port = 2301 18 | esnode.vm.network "forwarded_port", guest: 22, host: esnode.ssh.port 19 | esnode.vm.network "private_network", ip: "192.168.33.11" 20 | end 21 | 22 | config.vm.define "esnode2" do |esnode| 23 | esnode.vm.box = "esnode2" 24 | 25 | esnode.ssh.port = 2302 26 | esnode.vm.network "forwarded_port", guest: 22, host: esnode.ssh.port 27 | esnode.vm.network "private_network", ip: "192.168.33.12" 28 | end 29 | 30 | config.vm.define "aggregator" do |aggregator| 31 | aggregator.vm.box = "aggregator" 32 | 33 | aggregator.ssh.port = 2303 34 | aggregator.vm.network "forwarded_port", guest: 22, host: aggregator.ssh.port 35 | aggregator.vm.network "private_network", ip: "192.168.33.13" 36 | end 37 | config.vm.synced_folder ".", "/vagrant" 38 | 39 | config.vm.define "app-deb-1" do |app| 40 | app.vm.box = "app-deb-1" 41 | 42 | app.ssh.port = 2304 43 | app.vm.network "forwarded_port", guest: 22, host: app.ssh.port 44 | app.vm.network "private_network", ip: "192.168.33.14" 45 | end 46 | config.vm.synced_folder ".", "/vagrant" 47 | 48 | config.vm.define "app-centos-1" do |app| 49 | app.vm.box = "app-centos-1" 50 | app.vm.box_url = "http://puppet-vagrant-boxes.puppetlabs.com/centos-65-x64-virtualbox-puppet.box" 51 | 52 | app.ssh.port = 2305 53 | app.vm.network "forwarded_port", guest: 22, host: app.ssh.port 54 | app.vm.network "private_network", ip: "192.168.33.15" 55 | end 56 | config.vm.synced_folder ".", "/vagrant" 57 | end 58 | -------------------------------------------------------------------------------- /roles/packetbeat/templates/packetbeat.conf: -------------------------------------------------------------------------------- 1 | ### 2 | ### Packetbeat Agent configuration file. 3 | ### 4 | ### Packetbeat is an application monitoring system that works by sniffing 5 | ### the network traffic between your application ### components. 6 | ### 7 | ### For more configuration options, please visit: 8 | ### 9 | ### http://packetbeat.com/docs/configuration.html 10 | ### 11 | 12 | [output] 13 | 14 | [output.elasticsearch] 15 | # Comment this option if you don't want to output to Elasticsearch. 16 | enabled = {{ packetbeat.config.output.elasticsearch.enabled }} 17 | 18 | # Set the host and port where to find Elasticsearch. 19 | host = "{{ packetbeat.config.output.elasticsearch.host }}" 20 | port = {{ packetbeat.config.output.elasticsearch.port }} 21 | 22 | # Comment this option if you don't want to store the topology in Elasticsearch. 23 | save_topology = {{ packetbeat.config.output.elasticsearch.save_topology }} 24 | 25 | [output.redis] 26 | # Uncomment out this option if you want to output to Redis. 27 | enabled = {{ packetbeat.config.output.redis.enabled }} 28 | 29 | # Set the host and port where to find Redis. 30 | host = "{{ packetbeat.config.output.redis.host }}" 31 | port = {{ packetbeat.config.output.redis.port }} 32 | 33 | # Uncomment out this option if you want to store the topology in Redis. 34 | save_topology = {{ packetbeat.config.output.redis.save_topology }} 35 | 36 | [interfaces] 37 | # Select on which network interfaces to sniff. You can use the "any" 38 | # keyword to sniff on all connected interfaces. 39 | device = "{{ packetbeat.config.interface.device }}" 40 | 41 | [protocols] 42 | # Configure which protocols to monitor and on which ports are they 43 | # running. You can disable a given protocol by commenting out its 44 | # configuration. 45 | {% for name, protocol in packetbeat.config.protocols.items() %} 46 | {%- if protocol.enabled %} 47 | 48 | [protocols.{{ name }}] 49 | ports = [{{ protocol.ports|join(", ") }}] 50 | {%- endif %} 51 | 52 | {% endfor %} 53 | 54 | [procs] 55 | # Which processes to monitor and how to find them. The processes can 56 | # be found by searching their command line by a given string. 57 | {% for name, proc in packetbeat.config.processes.items() %} 58 | {%- if proc.enabled %} 59 | 60 | [procs.monitored.{{ name }}] 61 | cmdline_grep = "{{ proc.cmdline_grep }}" 62 | {%- endif %} 63 | 64 | {% endfor %} 65 | 66 | [agent] 67 | # The name of the agent as it will show up in the web interface. If not 68 | # defined, we will just use the hostname. 69 | # 70 | #name= 71 | 72 | # Uncomment the following if you want to ignore transactions created 73 | # by the server on which the agent is installed. This option is useful 74 | # to remove duplicates if agents are installed on multiple servers. 75 | #ignore_outgoing = true 76 | 77 | [passwords] 78 | # Uncomment the following to hide certain parameters from HTTP POST 79 | # requests. The value of the parameters will be replaced with '*' characters 80 | # This is generally useful for avoiding storing user passwords or other 81 | # sensitive information. 82 | #hide_keywords = ["pass=", "password=", "passwd=", "Password="] 83 | 84 | # vim: set ft=toml: 85 | -------------------------------------------------------------------------------- /roles/aggregator/templates/redis.conf: -------------------------------------------------------------------------------- 1 | # Redis configuration file example 2 | 3 | # Note on units: when memory size is needed, it is possible to specifiy 4 | # it in the usual form of 1k 5GB 4M and so forth: 5 | # 6 | # 1k => 1000 bytes 7 | # 1kb => 1024 bytes 8 | # 1m => 1000000 bytes 9 | # 1mb => 1024*1024 bytes 10 | # 1g => 1000000000 bytes 11 | # 1gb => 1024*1024*1024 bytes 12 | # 13 | # units are case insensitive so 1GB 1Gb 1gB are all the same. 14 | 15 | # By default Redis does not run as a daemon. Use 'yes' if you need it. 16 | # Note that Redis will write a pid file in /var/run/redis.pid when daemonized. 17 | daemonize yes 18 | 19 | # When running daemonized, Redis writes a pid file in /var/run/redis.pid by 20 | # default. You can specify a custom pid file location here. 21 | pidfile /var/run/redis/redis-server.pid 22 | 23 | # Accept connections on the specified port, default is 6379. 24 | # If port 0 is specified Redis will not listen on a TCP socket. 25 | port {{ aggregator.config.redis_port }} 26 | 27 | # If you want you can bind a single interface, if the bind option is not 28 | # specified all the interfaces will listen for incoming connections. 29 | # 30 | bind 0.0.0.0 31 | 32 | # Specify the path for the unix socket that will be used to listen for 33 | # incoming connections. There is no default, so Redis will not listen 34 | # on a unix socket when not specified. 35 | # 36 | # unixsocket /var/run/redis/redis.sock 37 | # unixsocketperm 755 38 | 39 | # Close the connection after a client is idle for N seconds (0 to disable) 40 | timeout 0 41 | 42 | # Set server verbosity to 'debug' 43 | # it can be one of: 44 | # debug (a lot of information, useful for development/testing) 45 | # verbose (many rarely useful info, but not a mess like the debug level) 46 | # notice (moderately verbose, what you want in production probably) 47 | # warning (only very important / critical messages are logged) 48 | loglevel notice 49 | 50 | # Specify the log file name. Also 'stdout' can be used to force 51 | # Redis to log on the standard output. Note that if you use standard 52 | # output for logging but daemonize, logs will be sent to /dev/null 53 | logfile /var/log/redis/redis-server.log 54 | 55 | # To enable logging to the system logger, just set 'syslog-enabled' to yes, 56 | # and optionally update the other syslog parameters to suit your needs. 57 | # syslog-enabled no 58 | 59 | # Specify the syslog identity. 60 | # syslog-ident redis 61 | 62 | # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. 63 | # syslog-facility local0 64 | 65 | # Set the number of databases. The default database is DB 0, you can select 66 | # a different one on a per-connection basis using SELECT where 67 | # dbid is a number between 0 and 'databases'-1 68 | databases 16 69 | 70 | ################################ SNAPSHOTTING ################################# 71 | # 72 | # Save the DB on disk: 73 | # 74 | # save 75 | # 76 | # Will save the DB if both the given number of seconds and the given 77 | # number of write operations against the DB occurred. 78 | # 79 | # In the example below the behaviour will be to save: 80 | # after 900 sec (15 min) if at least 1 key changed 81 | # after 300 sec (5 min) if at least 10 keys changed 82 | # after 60 sec if at least 10000 keys changed 83 | # 84 | # Note: you can disable saving at all commenting all the "save" lines. 85 | 86 | #save 900 1 87 | #save 300 10 88 | #save 60 10000 89 | 90 | # Compress string objects using LZF when dump .rdb databases? 91 | # For default that's set to 'yes' as it's almost always a win. 92 | # If you want to save some CPU in the saving child set it to 'no' but 93 | # the dataset will likely be bigger if you have compressible values or keys. 94 | rdbcompression yes 95 | 96 | # The filename where to dump the DB 97 | dbfilename dump.rdb 98 | 99 | # The working directory. 100 | # 101 | # The DB will be written inside this directory, with the filename specified 102 | # above using the 'dbfilename' configuration directive. 103 | # 104 | # Also the Append Only File will be created inside this directory. 105 | # 106 | # Note that you must specify a directory here, not a file name. 107 | dir /var/lib/redis 108 | 109 | ################################# REPLICATION ################################# 110 | 111 | # Master-Slave replication. Use slaveof to make a Redis instance a copy of 112 | # another Redis server. Note that the configuration is local to the slave 113 | # so for example it is possible to configure the slave to save the DB with a 114 | # different interval, or to listen to another port, and so on. 115 | # 116 | # slaveof 117 | 118 | # If the master is password protected (using the "requirepass" configuration 119 | # directive below) it is possible to tell the slave to authenticate before 120 | # starting the replication synchronization process, otherwise the master will 121 | # refuse the slave request. 122 | # 123 | # masterauth 124 | 125 | # When a slave lost the connection with the master, or when the replication 126 | # is still in progress, the slave can act in two different ways: 127 | # 128 | # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will 129 | # still reply to client requests, possibly with out of data data, or the 130 | # data set may just be empty if this is the first synchronization. 131 | # 132 | # 2) if slave-serve-stale data is set to 'no' the slave will reply with 133 | # an error "SYNC with master in progress" to all the kind of commands 134 | # but to INFO and SLAVEOF. 135 | # 136 | slave-serve-stale-data yes 137 | 138 | # Slaves send PINGs to server in a predefined interval. It's possible to change 139 | # this interval with the repl_ping_slave_period option. The default value is 10 140 | # seconds. 141 | # 142 | # repl-ping-slave-period 10 143 | 144 | # The following option sets a timeout for both Bulk transfer I/O timeout and 145 | # master data or ping response timeout. The default value is 60 seconds. 146 | # 147 | # It is important to make sure that this value is greater than the value 148 | # specified for repl-ping-slave-period otherwise a timeout will be detected 149 | # every time there is low traffic between the master and the slave. 150 | # 151 | # repl-timeout 60 152 | 153 | ################################## SECURITY ################################### 154 | 155 | # Require clients to issue AUTH before processing any other 156 | # commands. This might be useful in environments in which you do not trust 157 | # others with access to the host running redis-server. 158 | # 159 | # This should stay commented out for backward compatibility and because most 160 | # people do not need auth (e.g. they run their own servers). 161 | # 162 | # Warning: since Redis is pretty fast an outside user can try up to 163 | # 150k passwords per second against a good box. This means that you should 164 | # use a very strong password otherwise it will be very easy to break. 165 | # 166 | # requirepass foobared 167 | 168 | # Command renaming. 169 | # 170 | # It is possilbe to change the name of dangerous commands in a shared 171 | # environment. For instance the CONFIG command may be renamed into something 172 | # of hard to guess so that it will be still available for internal-use 173 | # tools but not available for general clients. 174 | # 175 | # Example: 176 | # 177 | # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 178 | # 179 | # It is also possilbe to completely kill a command renaming it into 180 | # an empty string: 181 | # 182 | # rename-command CONFIG "" 183 | 184 | ################################### LIMITS #################################### 185 | 186 | # Set the max number of connected clients at the same time. By default there 187 | # is no limit, and it's up to the number of file descriptors the Redis process 188 | # is able to open. The special value '0' means no limits. 189 | # Once the limit is reached Redis will close all the new connections sending 190 | # an error 'max number of clients reached'. 191 | # 192 | # maxclients 128 193 | 194 | # Don't use more memory than the specified amount of bytes. 195 | # When the memory limit is reached Redis will try to remove keys 196 | # accordingly to the eviction policy selected (see maxmemmory-policy). 197 | # 198 | # If Redis can't remove keys according to the policy, or if the policy is 199 | # set to 'noeviction', Redis will start to reply with errors to commands 200 | # that would use more memory, like SET, LPUSH, and so on, and will continue 201 | # to reply to read-only commands like GET. 202 | # 203 | # This option is usually useful when using Redis as an LRU cache, or to set 204 | # an hard memory limit for an instance (using the 'noeviction' policy). 205 | # 206 | # WARNING: If you have slaves attached to an instance with maxmemory on, 207 | # the size of the output buffers needed to feed the slaves are subtracted 208 | # from the used memory count, so that network problems / resyncs will 209 | # not trigger a loop where keys are evicted, and in turn the output 210 | # buffer of slaves is full with DELs of keys evicted triggering the deletion 211 | # of more keys, and so forth until the database is completely emptied. 212 | # 213 | # In short... if you have slaves attached it is suggested that you set a lower 214 | # limit for maxmemory so that there is some free RAM on the system for slave 215 | # output buffers (but this is not needed if the policy is 'noeviction'). 216 | # 217 | # maxmemory 218 | maxmemory {{ aggregator.config.redis_maxmemory }}m 219 | 220 | # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory 221 | # is reached? You can select among five behavior: 222 | # 223 | # volatile-lru -> remove the key with an expire set using an LRU algorithm 224 | # allkeys-lru -> remove any key accordingly to the LRU algorithm 225 | # volatile-random -> remove a random key with an expire set 226 | # allkeys->random -> remove a random key, any key 227 | # volatile-ttl -> remove the key with the nearest expire time (minor TTL) 228 | # noeviction -> don't expire at all, just return an error on write operations 229 | # 230 | # Note: with all the kind of policies, Redis will return an error on write 231 | # operations, when there are not suitable keys for eviction. 232 | # 233 | # At the date of writing this commands are: set setnx setex append 234 | # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd 235 | # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby 236 | # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby 237 | # getset mset msetnx exec sort 238 | # 239 | # The default is: 240 | # 241 | # maxmemory-policy volatile-lru 242 | maxmemory-policy noeviction 243 | 244 | # LRU and minimal TTL algorithms are not precise algorithms but approximated 245 | # algorithms (in order to save memory), so you can select as well the sample 246 | # size to check. For instance for default Redis will check three keys and 247 | # pick the one that was used less recently, you can change the sample size 248 | # using the following configuration directive. 249 | # 250 | # maxmemory-samples 3 251 | 252 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Ansible roles for Packetbeat + ELK 2 | ================================== 3 | 4 | This repository contains automation scripts based on Ansible for deploying the 5 | full Packetbeat system. It is a quick way of getting everything up and running. 6 | 7 | It will install the following components for you: 8 | 9 | * [Packetbeat](http://packetbeat.com) reads and parses network protocols on the 10 | fly, correlating the requests with the responses and measuring the response times. 11 | 12 | * [Elasticsearch](http://elasticsearch.org) is a distributed search system that indexes 13 | semi-structured data (i.e. JSON documents) and can do ad-hoc analytics on 14 | them. 15 | 16 | * [Logstash](http://logstash.net) reads, ships and aggregates log files, but it can be used for other 17 | data as well. 18 | 19 | * [Kibana](http://kibana.org) is a UI application that offers various visualisations widgets which 20 | you can flexibly organize into dashboards. 21 | 22 | * [Redis](http://redis.io) is an in-memory data structure server which can do a lot of things, 23 | here we just use it as a queue with multiple writers. 24 | 25 | 26 | All these tools are not only able to work together, but they were **designed** 27 | to be composable and work together. So while the amount of projects involved 28 | might seem overwhelming, as a user you don't really have to care. Follow our 29 | tutorial to get everything up and running and then simply use the web 30 | interface. 31 | 32 | ## Deploy steps (high level overview - more details below) 33 | 34 | **Step 1:** Spin up one or more Virtual Machines that will host the monitoring system. 35 | 36 | **Step 2:** Clone or download this GitHub project: 37 | 38 | git clone https://github.com/packetbeat/packetbeat-deploy.git 39 | 40 | **Step 3:** Create an inventory file containing your servers. It looks something 41 | like this: 42 | 43 | 44 | [esnodes] 45 | 10.50.10.13 46 | 10.50.10.14 47 | 48 | [aggregator] 49 | 10.50.10.15 50 | 51 | **Step 4:** Adjust the configuration files if you want to. The defaults should be good in most cases. 52 | 53 | **Step 5:** Install Ansible and run one command to deploy everything: 54 | 55 | ansible-playbook -i hosts site.yml 56 | 57 | ## Tutorial 58 | 59 | The following sections cover in detail the steps you need to take to build your 60 | own packet analytics and log aggregation system. 61 | 62 | ### Step 1: Decide on the architecture and get servers 63 | 64 | In this tutorial, we'll give two examples: 65 | 66 | * an all-in-one installation, in which all components except the agents are 67 | running on a single server. This is useful for trying things and for 68 | applications without too much traffic. No redundancy is provided in 69 | this setup. 70 | 71 | * an installation on 3 servers. All servers run an Elasticsearch node and one 72 | of them also has the *aggregator* role and serves the Kibana web interface. 73 | In this setup the data is duplicated to one replica, so loosing one of the 74 | severs doesn't mean data loss. If the *aggregator* node is lost, the system 75 | doesn't record data for the time the *aggregator* is down, but the data that 76 | was already indexed is not lost. 77 | 78 | Because the Deploy system itself is open-source, you can easily extend it later 79 | to more elaborate designs, including removing the single point of failure. 80 | 81 | The servers running the Aggregator node and the Elasticsearch nodes currently 82 | need to be *Debian 7* or newer. Extending this to support more operating system 83 | is easy, but for now we want to keep our testing efforts lower. 84 | 85 | Most cloud providers offer Debian images. For example, on [Digital 86 | Ocean](http://digitalocean.com), you can select the `Debian 7.0 x64` image: 87 | 88 | ![Select image](/images/digitalocean-debian.png) 89 | 90 | If you want to run the Packetbeat system on one or more physical servers, 91 | you need to install Debian 7 on all of them. 92 | 93 | Note that the Debian 7 is a requirement only for the servers running the 94 | Packetbeat Monitoring System itself. Your applications servers can run any 95 | Debian based (e.g. Ubuntu) or RedHat based (e.g. CentOS, Oracle Linux) 96 | distribution. Running the Packetbeat agents is also possible on Windows, but 97 | Packetbeat Deploy currently doesn't support it. 98 | 99 | We recommend using servers with at least 512 MB RAM each. 100 | 101 | 102 | ### Step 2: Clone the Packetbeat Deploy repository 103 | 104 | You should run this and the next steps on a computer that has SSH access to all 105 | the servers, ideally using key based authentication. 106 | 107 | If you have Git installed on your computer, simply run: 108 | 109 | git clone https://github.com/packetbeat/packetbeat-deploy.git 110 | 111 | If you plan to modify the deploy scripts and you are fine with storing the 112 | configuration files on GitHub, it might be a good idea to first fork this 113 | repository and then clone your fork. 114 | 115 | ### Step 3: Create the inventory file 116 | 117 | An Ansible [inventory file](http://docs.ansible.com/intro_inventory.html) is a 118 | simple configuration file defining your network. It typically groups hosts by 119 | their intended role in your system. 120 | 121 | Let's start by creating an inventory file named ``hosts`` in the 122 | ``packetbeat-deploy`` folder. 123 | 124 | For the **all-in-one** example, your inventory file should look something like 125 | this: 126 | 127 | # packetbeat-deploy/hosts 128 | 129 | [aggregator] 130 | aggregator.example.com 131 | 132 | [app-servers] 133 | app1.example.com 134 | app2.example.com 135 | app3.example.com 136 | 137 | In this example, the ``aggregator.example.com`` is the hostname of the system 138 | running the monitoring system. The rest of the servers are your application 139 | servers, on which Packetbeat Deploy will install the Packetbeat and Logstash 140 | agents. 141 | 142 | ![Packetbeat all-in-one](images/pb_system_allinone.png) 143 | 144 | You can also use IP addresses instead of hostnames, something like 145 | this: 146 | 147 | # packetbeat-deploy/hosts 148 | 149 | [aggrgator] 150 | 10.50.10.15 151 | 152 | [app-servers] 153 | 10.50.50.1 154 | 10.50.50.2 155 | 10.50.50.2 156 | 157 | If you don't want to use Ansible for installing the Packetbeat and Logstash 158 | agents (for example, if you already do that via Chef or Puppet), you can simply 159 | remove the ``[app-servers]`` section from the inventory file, so you get 160 | something like this: 161 | 162 | # packetbeat-deploy/hosts 163 | 164 | [aggrgator] 165 | 10.50.10.15 166 | 167 | For the **installation on 3 severs**, your inventory file should look something 168 | like this: 169 | 170 | # packetbeat-deploy/hosts 171 | 172 | [aggregator] 173 | packetbeat.example.com 174 | 175 | [esnodes] 176 | esnode1.example.com 177 | esnode2.example.com 178 | 179 | [app-servers] 180 | app1.example.com 181 | app2.example.com 182 | app3.example.com 183 | 184 | ![Packetbeat on 3 servers](images/pb_system_multiple.png) 185 | 186 | The inventory file can also contain configuration options on a per host basis. 187 | One usage for this is setting the interface on which the services bind to. The 188 | default is ``eth0``, but in some cases you might want to use a different 189 | device. You can change the interface like this: 190 | 191 | # packetbeat-deploy/hosts 192 | 193 | [aggregator] 194 | packetbeat.example.com main_iface=eth1 195 | 196 | [esnodes] 197 | esnode1.example.com main_iface=eth1 198 | esnode2.example.com main_iface=eth1 199 | 200 | [app-servers] 201 | app1.example.com 202 | app2.example.com 203 | app3.example.com 204 | 205 | Another parameter that you might need to adjust is the SSH user that should be 206 | used by Ansible to connect to the servers. The parameter is named 207 | ``ansible_ssh_user`` and you can set it like this: 208 | 209 | # packetbeat-deploy/hosts 210 | 211 | [aggregator] 212 | packetbeat.example.com ansible_ssh_user=root 213 | 214 | ### Step 4: Adjust configuration files 215 | 216 | Packetbeat Deploy generally uses sane defaults and uses environmental 217 | information to automatically set the right configuration parameters. For 218 | example, it automatically configures Elasticsearch to use half of the available 219 | memory on each node. So while you normally don't need to change any of the 220 | default configuration variables, you can have a look over the following files 221 | to see if you want anything different. Packetbeat Deploy keeps the options 222 | organized per role, so you have too look into the ``vars`` folder of each role: 223 | 224 | roles/esnode/vars/main.yml 225 | roles/aggregator/vars/main.yml 226 | roles/kibana/vars/main.yml 227 | roles/packetbeat/vars/main.yml 228 | roles/logstash/vars/main.yml 229 | 230 | In particular you might want to ship different log files from the logstash 231 | configuration: 232 | 233 | # roles/logstash/vars/main.yml 234 | ... 235 | file_inputs: 236 | syslog: 237 | enabled: true 238 | path: 239 | - "/var/log/syslog" 240 | - "/var/log/messages" 241 | nginx_access: 242 | enabled: true 243 | path: 244 | - "/var/log/nginx/access.log" 245 | nginx_error: 246 | enabled: true 247 | path: 248 | - "/var/log/nginx/error.log" 249 | ... 250 | 251 | You might also want to adjust the TCP ports that the Packetbeat agent sniffs 252 | on: 253 | 254 | # roles/packetbeat/vars/main.yml 255 | ... 256 | protocols: 257 | http: 258 | enabled: true 259 | ports: 260 | - 80 261 | - 8080 262 | mysql: 263 | enabled: true 264 | ports: 265 | - 3306 266 | pgsql: 267 | enabled: true 268 | ports: 269 | - 5432 270 | redis: 271 | enabled: true 272 | ports: 273 | - 6379 274 | ... 275 | 276 | If your application doesn't use some of the protocols above, simply set 277 | ``enabled`` to false to disable them. 278 | 279 | Another important setting is the amount of history the Packetbeat Monitoring 280 | System stores. A nightly curator task will delete all indexes older than a 281 | given amount of days. The default is 3 days, which we think is usually enough 282 | for troubleshooting while keeping the disk requirements relatively low. You can 283 | change it from the *Aggregator* ``vars`` file: 284 | 285 | 286 | # roles/aggregator/vars/main.yml 287 | aggregator: 288 | ... 289 | config: 290 | days_of_history: 3 291 | ... 292 | 293 | ### Step 5: Install Ansible and run the playbook 294 | 295 | There are [multiple ways](http://docs.ansible.com/intro_installation.html) to 296 | install Ansible, you should choose the one that makes the most sense on your 297 | operating system. However, if you have python already installed, the following 298 | steps should be enough: 299 | 300 | sudo easy_install pip 301 | pip install ansible 302 | 303 | Now it's time to get everything up by running the following command: 304 | 305 | ansible-playbook -i hosts site.yml 306 | 307 | Where ``hosts`` is the inventory file you created in Step 3. 308 | 309 | After the installation is finished, you can access the web interface by opening 310 | the Aggregator's URL in the browser. 311 | 312 | ## Developing / Testing 313 | 314 | Vagrant can be used for testing Packetbeat Deploy. The ``Vagrantfile`` contains 315 | definitions for 5 VMs that we use for testing. 316 | 317 | To bring the test system up, do the following: 318 | 319 | vagrant up 320 | 321 | Add the SSH connection information to your SSH config, from where Ansible can 322 | read it: 323 | 324 | vagrant ssh-config >> ~/.ssh/config 325 | 326 | Now you are ready to run the Ansible roles: 327 | 328 | ansible-playbook -i hosts-vagrant site.yml 329 | 330 | Or for the all-in-one version (installs the full Packetbeat System on the 331 | aggregator host): 332 | 333 | ansible-playbook -i hosts-vagrant-allinone site.yml 334 | -------------------------------------------------------------------------------- /roles/esnode/templates/elasticsearch.yml: -------------------------------------------------------------------------------- 1 | ##################### Elasticsearch Configuration Example ##################### 2 | 3 | # This file contains an overview of various configuration settings, 4 | # targeted at operations staff. Application developers should 5 | # consult the guide at . 6 | # 7 | # The installation procedure is covered at 8 | # . 9 | # 10 | # Elasticsearch comes with reasonable defaults for most settings, 11 | # so you can try it out without bothering with configuration. 12 | # 13 | # Most of the time, these defaults are just fine for running a production 14 | # cluster. If you're fine-tuning your cluster, or wondering about the 15 | # effect of certain configuration option, please _do ask_ on the 16 | # mailing list or IRC channel [http://elasticsearch.org/community]. 17 | 18 | # Any element in the configuration can be replaced with environment variables 19 | # by placing them in \${...} notation. For example: 20 | # 21 | #node.rack: \${RACK_ENV_VAR} 22 | 23 | # For information on supported formats and syntax for the config file, see 24 | # 25 | 26 | 27 | ################################### Cluster ################################### 28 | 29 | # Cluster name identifies your cluster for auto-discovery. If you're running 30 | # multiple clusters on the same network, make sure you're using unique names. 31 | # 32 | cluster.name: {{ elasticsearch.config.cluster_name }} 33 | 34 | 35 | #################################### Node ##################################### 36 | 37 | # Node names are generated dynamically on startup, so you're relieved 38 | # from configuring them manually. You can tie this node to a specific name: 39 | # 40 | #node.name: "Franz Kafka" 41 | 42 | # Every node can be configured to allow or deny being eligible as the master, 43 | # and to allow or deny to store the data. 44 | # 45 | # Allow this node to be eligible as a master node (enabled by default): 46 | # 47 | #node.master: true 48 | # 49 | # Allow this node to store data (enabled by default): 50 | # 51 | #node.data: true 52 | 53 | # You can exploit these settings to design advanced cluster topologies. 54 | # 55 | # 1. You want this node to never become a master node, only to hold data. 56 | # This will be the "workhorse" of your cluster. 57 | # 58 | #node.master: false 59 | #node.data: true 60 | # 61 | # 2. You want this node to only serve as a master: to not store any data and 62 | # to have free resources. This will be the "coordinator" of your cluster. 63 | # 64 | #node.master: true 65 | #node.data: false 66 | # 67 | # 3. You want this node to be neither master nor data node, but 68 | # to act as a "search load balancer" (fetching data from nodes, 69 | # aggregating results, etc.) 70 | # 71 | #node.master: false 72 | #node.data: false 73 | 74 | # Use the Cluster Health API [http://localhost:9200/_cluster/health], the 75 | # Node Info API [http://localhost:9200/_nodes] or GUI tools 76 | # such as , 77 | # , 78 | # and 79 | # to inspect the cluster state. 80 | 81 | # A node can have generic attributes associated with it, which can later be used 82 | # for customized shard allocation filtering, or allocation awareness. An attribute 83 | # is a simple key value pair, similar to node.key: value, here is an example: 84 | # 85 | #node.rack: rack314 86 | 87 | # By default, multiple nodes are allowed to start from the same installation location 88 | # to disable it, set the following: 89 | #node.max_local_storage_nodes: 1 90 | 91 | 92 | #################################### Index #################################### 93 | 94 | # You can set a number of options (such as shard/replica options, mapping 95 | # or analyzer definitions, translog settings, ...) for indices globally, 96 | # in this file. 97 | # 98 | # Note, that it makes more sense to configure index settings specifically for 99 | # a certain index, either when creating it or by using the index templates API. 100 | # 101 | # See and 102 | # 103 | # for more information. 104 | 105 | # Set the number of shards (splits) of an index (5 by default): 106 | # 107 | index.number_of_shards: {{ elasticsearch.config.number_of_shards }} 108 | 109 | # Set the number of replicas (additional copies) of an index (1 by default): 110 | # 111 | index.number_of_replicas: {{ elasticsearch.config.number_of_replicas }} 112 | 113 | # Note, that for development on a local machine, with small indices, it usually 114 | # makes sense to "disable" the distributed features: 115 | # 116 | #index.number_of_shards: 1 117 | #index.number_of_replicas: 0 118 | 119 | # These settings directly affect the performance of index and search operations 120 | # in your cluster. Assuming you have enough machines to hold shards and 121 | # replicas, the rule of thumb is: 122 | # 123 | # 1. Having more *shards* enhances the _indexing_ performance and allows to 124 | # _distribute_ a big index across machines. 125 | # 2. Having more *replicas* enhances the _search_ performance and improves the 126 | # cluster _availability_. 127 | # 128 | # The "number_of_shards" is a one-time setting for an index. 129 | # 130 | # The "number_of_replicas" can be increased or decreased anytime, 131 | # by using the Index Update Settings API. 132 | # 133 | # Elasticsearch takes care about load balancing, relocating, gathering the 134 | # results from nodes, etc. Experiment with different settings to fine-tune 135 | # your setup. 136 | 137 | # Use the Index Status API () to inspect 138 | # the index status. 139 | 140 | 141 | #################################### Paths #################################### 142 | 143 | # Path to directory containing configuration (this file and logging.yml): 144 | # 145 | #path.conf: /path/to/conf 146 | 147 | # Path to directory where to store index data allocated for this node. 148 | # 149 | #path.data: /path/to/data 150 | # 151 | # Can optionally include more than one location, causing data to be striped across 152 | # the locations (a la RAID 0) on a file level, favouring locations with most free 153 | # space on creation. For example: 154 | # 155 | #path.data: /path/to/data1,/path/to/data2 156 | 157 | # Path to temporary files: 158 | # 159 | #path.work: /path/to/work 160 | 161 | # Path to log files: 162 | # 163 | #path.logs: /path/to/logs 164 | 165 | # Path to where plugins are installed: 166 | # 167 | #path.plugins: /path/to/plugins 168 | 169 | 170 | #################################### Plugin ################################### 171 | 172 | # If a plugin listed here is not installed for current node, the node will not start. 173 | # 174 | #plugin.mandatory: mapper-attachments,lang-groovy 175 | 176 | 177 | ################################### Memory #################################### 178 | 179 | # Elasticsearch performs poorly when JVM starts swapping: you should ensure that 180 | # it _never_ swaps. 181 | # 182 | # Set this property to true to lock the memory: 183 | # 184 | #bootstrap.mlockall: true 185 | {% if elasticsearch.config.mlockall %} 186 | bootstrap.mlockall: true 187 | {% endif %} 188 | 189 | # Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set 190 | # to the same value, and that the machine has enough memory to allocate 191 | # for Elasticsearch, leaving enough memory for the operating system itself. 192 | # 193 | # You should also make sure that the Elasticsearch process is allowed to lock 194 | # the memory, eg. by using `ulimit -l unlimited`. 195 | 196 | 197 | ############################## Network And HTTP ############################### 198 | 199 | # Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens 200 | # on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node 201 | # communication. (the range means that if the port is busy, it will automatically 202 | # try the next port). 203 | 204 | # Set the bind address specifically (IPv4 or IPv6): 205 | # 206 | #network.bind_host: 192.168.0.1 207 | 208 | # Set the address other nodes will use to communicate with this node. If not 209 | # set, it is automatically derived. It must point to an actual IP address. 210 | # 211 | #network.publish_host: 192.168.0.1 212 | 213 | # Set both 'bind_host' and 'publish_host': 214 | # 215 | network.host: {{ elasticsearch_bind_ip }} 216 | 217 | # Set a custom port for the node to node communication (9300 by default): 218 | # 219 | #transport.tcp.port: 9300 220 | 221 | # Enable compression for all communication between nodes (disabled by default): 222 | # 223 | #transport.tcp.compress: true 224 | 225 | # Set a custom port to listen for HTTP traffic: 226 | # 227 | #http.port: 9200 228 | 229 | # Set a custom allowed content length: 230 | # 231 | #http.max_content_length: 100mb 232 | 233 | # Disable HTTP completely: 234 | # 235 | #http.enabled: false 236 | 237 | 238 | ################################### Gateway ################################### 239 | 240 | # The gateway allows for persisting the cluster state between full cluster 241 | # restarts. Every change to the state (such as adding an index) will be stored 242 | # in the gateway, and when the cluster starts up for the first time, 243 | # it will read its state from the gateway. 244 | 245 | # There are several types of gateway implementations. For more information, see 246 | # . 247 | 248 | # The default gateway type is the "local" gateway (recommended): 249 | # 250 | #gateway.type: local 251 | 252 | # Settings below control how and when to start the initial recovery process on 253 | # a full cluster restart (to reuse as much local data as possible when using shared 254 | # gateway). 255 | 256 | # Allow recovery process after N nodes in a cluster are up: 257 | # 258 | #gateway.recover_after_nodes: 1 259 | 260 | # Set the timeout to initiate the recovery process, once the N nodes 261 | # from previous setting are up (accepts time value): 262 | # 263 | #gateway.recover_after_time: 5m 264 | 265 | # Set how many nodes are expected in this cluster. Once these N nodes 266 | # are up (and recover_after_nodes is met), begin recovery process immediately 267 | # (without waiting for recover_after_time to expire): 268 | # 269 | #gateway.expected_nodes: 2 270 | 271 | 272 | ############################# Recovery Throttling ############################# 273 | 274 | # These settings allow to control the process of shards allocation between 275 | # nodes during initial recovery, replica allocation, rebalancing, 276 | # or when adding and removing nodes. 277 | 278 | # Set the number of concurrent recoveries happening on a node: 279 | # 280 | # 1. During the initial recovery 281 | # 282 | #cluster.routing.allocation.node_initial_primaries_recoveries: 4 283 | # 284 | # 2. During adding/removing nodes, rebalancing, etc 285 | # 286 | #cluster.routing.allocation.node_concurrent_recoveries: 2 287 | 288 | # Set to throttle throughput when recovering (eg. 100mb, by default 20mb): 289 | # 290 | #indices.recovery.max_bytes_per_sec: 20mb 291 | 292 | # Set to limit the number of open concurrent streams when 293 | # recovering a shard from a peer: 294 | # 295 | #indices.recovery.concurrent_streams: 5 296 | 297 | 298 | ################################## Discovery ################################## 299 | 300 | # Discovery infrastructure ensures nodes can be found within a cluster 301 | # and master node is elected. Multicast discovery is the default. 302 | 303 | # Set to ensure a node sees N other master eligible nodes to be considered 304 | # operational within the cluster. Its recommended to set it to a higher value 305 | # than 1 when running more than 2 nodes in the cluster. 306 | # 307 | #discovery.zen.minimum_master_nodes: 1 308 | 309 | # Set the time to wait for ping responses from other nodes when discovering. 310 | # Set this option to a higher value on a slow or congested network 311 | # to minimize discovery failures: 312 | # 313 | #discovery.zen.ping.timeout: 3s 314 | 315 | # For more information, see 316 | # 317 | 318 | # Unicast discovery allows to explicitly control which nodes will be used 319 | # to discover the cluster. It can be used when multicast is not present, 320 | # or to restrict the cluster communication-wise. 321 | # 322 | # 1. Disable multicast discovery (enabled by default): 323 | # 324 | #discovery.zen.ping.multicast.enabled: false 325 | # 326 | # 2. Configure an initial list of master nodes in the cluster 327 | # to perform discovery when new nodes (master or data) are started: 328 | # 329 | #discovery.zen.ping.unicast.hosts: ["host1", "host2:port"] 330 | 331 | # EC2 discovery allows to use AWS EC2 API in order to perform discovery. 332 | # 333 | # You have to install the cloud-aws plugin for enabling the EC2 discovery. 334 | # 335 | # For more information, see 336 | # 337 | # 338 | # See 339 | # for a step-by-step tutorial. 340 | 341 | # GCE discovery allows to use Google Compute Engine API in order to perform discovery. 342 | # 343 | # You have to install the cloud-gce plugin for enabling the GCE discovery. 344 | # 345 | # For more information, see . 346 | 347 | # Azure discovery allows to use Azure API in order to perform discovery. 348 | # 349 | # You have to install the cloud-azure plugin for enabling the Azure discovery. 350 | # 351 | # For more information, see . 352 | 353 | ################################## Slow Log ################################## 354 | 355 | # Shard level query and fetch threshold logging. 356 | 357 | index.search.slowlog.threshold.query.warn: 10s 358 | index.search.slowlog.threshold.query.info: 5s 359 | index.search.slowlog.threshold.query.debug: 2s 360 | index.search.slowlog.threshold.query.trace: 500ms 361 | 362 | index.search.slowlog.threshold.fetch.warn: 1s 363 | index.search.slowlog.threshold.fetch.info: 800ms 364 | index.search.slowlog.threshold.fetch.debug: 500ms 365 | index.search.slowlog.threshold.fetch.trace: 200ms 366 | 367 | index.indexing.slowlog.threshold.index.warn: 10s 368 | index.indexing.slowlog.threshold.index.info: 5s 369 | index.indexing.slowlog.threshold.index.debug: 2s 370 | index.indexing.slowlog.threshold.index.trace: 500ms 371 | 372 | ################################## GC Logging ################################ 373 | 374 | #monitor.jvm.gc.young.warn: 1000ms 375 | #monitor.jvm.gc.young.info: 700ms 376 | #monitor.jvm.gc.young.debug: 400ms 377 | 378 | #monitor.jvm.gc.old.warn: 10s 379 | #monitor.jvm.gc.old.info: 5s 380 | #monitor.jvm.gc.old.debug: 2s 381 | 382 | ################################## Security #################################### 383 | 384 | {% if elasticsearch.config.limit_cache_size %} 385 | # Limit cache sizes to avoid DoS attacks 386 | indices.fielddata.cache.size: 50% 387 | indices.fielddata.cache.expire: 2m 388 | {% endif %} 389 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 2, June 1991 3 | 4 | Copyright (C) 1989, 1991 Free Software Foundation, Inc., 5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 6 | Everyone is permitted to copy and distribute verbatim copies 7 | of this license document, but changing it is not allowed. 8 | 9 | Preamble 10 | 11 | The licenses for most software are designed to take away your 12 | freedom to share and change it. By contrast, the GNU General Public 13 | License is intended to guarantee your freedom to share and change free 14 | software--to make sure the software is free for all its users. This 15 | General Public License applies to most of the Free Software 16 | Foundation's software and to any other program whose authors commit to 17 | using it. (Some other Free Software Foundation software is covered by 18 | the GNU Lesser General Public License instead.) You can apply it to 19 | your programs, too. 20 | 21 | When we speak of free software, we are referring to freedom, not 22 | price. Our General Public Licenses are designed to make sure that you 23 | have the freedom to distribute copies of free software (and charge for 24 | this service if you wish), that you receive source code or can get it 25 | if you want it, that you can change the software or use pieces of it 26 | in new free programs; and that you know you can do these things. 27 | 28 | To protect your rights, we need to make restrictions that forbid 29 | anyone to deny you these rights or to ask you to surrender the rights. 30 | These restrictions translate to certain responsibilities for you if you 31 | distribute copies of the software, or if you modify it. 32 | 33 | For example, if you distribute copies of such a program, whether 34 | gratis or for a fee, you must give the recipients all the rights that 35 | you have. You must make sure that they, too, receive or can get the 36 | source code. And you must show them these terms so they know their 37 | rights. 38 | 39 | We protect your rights with two steps: (1) copyright the software, and 40 | (2) offer you this license which gives you legal permission to copy, 41 | distribute and/or modify the software. 42 | 43 | Also, for each author's protection and ours, we want to make certain 44 | that everyone understands that there is no warranty for this free 45 | software. If the software is modified by someone else and passed on, we 46 | want its recipients to know that what they have is not the original, so 47 | that any problems introduced by others will not reflect on the original 48 | authors' reputations. 49 | 50 | Finally, any free program is threatened constantly by software 51 | patents. We wish to avoid the danger that redistributors of a free 52 | program will individually obtain patent licenses, in effect making the 53 | program proprietary. To prevent this, we have made it clear that any 54 | patent must be licensed for everyone's free use or not licensed at all. 55 | 56 | The precise terms and conditions for copying, distribution and 57 | modification follow. 58 | 59 | GNU GENERAL PUBLIC LICENSE 60 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 61 | 62 | 0. This License applies to any program or other work which contains 63 | a notice placed by the copyright holder saying it may be distributed 64 | under the terms of this General Public License. The "Program", below, 65 | refers to any such program or work, and a "work based on the Program" 66 | means either the Program or any derivative work under copyright law: 67 | that is to say, a work containing the Program or a portion of it, 68 | either verbatim or with modifications and/or translated into another 69 | language. (Hereinafter, translation is included without limitation in 70 | the term "modification".) Each licensee is addressed as "you". 71 | 72 | Activities other than copying, distribution and modification are not 73 | covered by this License; they are outside its scope. The act of 74 | running the Program is not restricted, and the output from the Program 75 | is covered only if its contents constitute a work based on the 76 | Program (independent of having been made by running the Program). 77 | Whether that is true depends on what the Program does. 78 | 79 | 1. You may copy and distribute verbatim copies of the Program's 80 | source code as you receive it, in any medium, provided that you 81 | conspicuously and appropriately publish on each copy an appropriate 82 | copyright notice and disclaimer of warranty; keep intact all the 83 | notices that refer to this License and to the absence of any warranty; 84 | and give any other recipients of the Program a copy of this License 85 | along with the Program. 86 | 87 | You may charge a fee for the physical act of transferring a copy, and 88 | you may at your option offer warranty protection in exchange for a fee. 89 | 90 | 2. You may modify your copy or copies of the Program or any portion 91 | of it, thus forming a work based on the Program, and copy and 92 | distribute such modifications or work under the terms of Section 1 93 | above, provided that you also meet all of these conditions: 94 | 95 | a) You must cause the modified files to carry prominent notices 96 | stating that you changed the files and the date of any change. 97 | 98 | b) You must cause any work that you distribute or publish, that in 99 | whole or in part contains or is derived from the Program or any 100 | part thereof, to be licensed as a whole at no charge to all third 101 | parties under the terms of this License. 102 | 103 | c) If the modified program normally reads commands interactively 104 | when run, you must cause it, when started running for such 105 | interactive use in the most ordinary way, to print or display an 106 | announcement including an appropriate copyright notice and a 107 | notice that there is no warranty (or else, saying that you provide 108 | a warranty) and that users may redistribute the program under 109 | these conditions, and telling the user how to view a copy of this 110 | License. (Exception: if the Program itself is interactive but 111 | does not normally print such an announcement, your work based on 112 | the Program is not required to print an announcement.) 113 | 114 | These requirements apply to the modified work as a whole. If 115 | identifiable sections of that work are not derived from the Program, 116 | and can be reasonably considered independent and separate works in 117 | themselves, then this License, and its terms, do not apply to those 118 | sections when you distribute them as separate works. But when you 119 | distribute the same sections as part of a whole which is a work based 120 | on the Program, the distribution of the whole must be on the terms of 121 | this License, whose permissions for other licensees extend to the 122 | entire whole, and thus to each and every part regardless of who wrote it. 123 | 124 | Thus, it is not the intent of this section to claim rights or contest 125 | your rights to work written entirely by you; rather, the intent is to 126 | exercise the right to control the distribution of derivative or 127 | collective works based on the Program. 128 | 129 | In addition, mere aggregation of another work not based on the Program 130 | with the Program (or with a work based on the Program) on a volume of 131 | a storage or distribution medium does not bring the other work under 132 | the scope of this License. 133 | 134 | 3. You may copy and distribute the Program (or a work based on it, 135 | under Section 2) in object code or executable form under the terms of 136 | Sections 1 and 2 above provided that you also do one of the following: 137 | 138 | a) Accompany it with the complete corresponding machine-readable 139 | source code, which must be distributed under the terms of Sections 140 | 1 and 2 above on a medium customarily used for software interchange; or, 141 | 142 | b) Accompany it with a written offer, valid for at least three 143 | years, to give any third party, for a charge no more than your 144 | cost of physically performing source distribution, a complete 145 | machine-readable copy of the corresponding source code, to be 146 | distributed under the terms of Sections 1 and 2 above on a medium 147 | customarily used for software interchange; or, 148 | 149 | c) Accompany it with the information you received as to the offer 150 | to distribute corresponding source code. (This alternative is 151 | allowed only for noncommercial distribution and only if you 152 | received the program in object code or executable form with such 153 | an offer, in accord with Subsection b above.) 154 | 155 | The source code for a work means the preferred form of the work for 156 | making modifications to it. For an executable work, complete source 157 | code means all the source code for all modules it contains, plus any 158 | associated interface definition files, plus the scripts used to 159 | control compilation and installation of the executable. However, as a 160 | special exception, the source code distributed need not include 161 | anything that is normally distributed (in either source or binary 162 | form) with the major components (compiler, kernel, and so on) of the 163 | operating system on which the executable runs, unless that component 164 | itself accompanies the executable. 165 | 166 | If distribution of executable or object code is made by offering 167 | access to copy from a designated place, then offering equivalent 168 | access to copy the source code from the same place counts as 169 | distribution of the source code, even though third parties are not 170 | compelled to copy the source along with the object code. 171 | 172 | 4. You may not copy, modify, sublicense, or distribute the Program 173 | except as expressly provided under this License. Any attempt 174 | otherwise to copy, modify, sublicense or distribute the Program is 175 | void, and will automatically terminate your rights under this License. 176 | However, parties who have received copies, or rights, from you under 177 | this License will not have their licenses terminated so long as such 178 | parties remain in full compliance. 179 | 180 | 5. You are not required to accept this License, since you have not 181 | signed it. However, nothing else grants you permission to modify or 182 | distribute the Program or its derivative works. These actions are 183 | prohibited by law if you do not accept this License. Therefore, by 184 | modifying or distributing the Program (or any work based on the 185 | Program), you indicate your acceptance of this License to do so, and 186 | all its terms and conditions for copying, distributing or modifying 187 | the Program or works based on it. 188 | 189 | 6. Each time you redistribute the Program (or any work based on the 190 | Program), the recipient automatically receives a license from the 191 | original licensor to copy, distribute or modify the Program subject to 192 | these terms and conditions. You may not impose any further 193 | restrictions on the recipients' exercise of the rights granted herein. 194 | You are not responsible for enforcing compliance by third parties to 195 | this License. 196 | 197 | 7. If, as a consequence of a court judgment or allegation of patent 198 | infringement or for any other reason (not limited to patent issues), 199 | conditions are imposed on you (whether by court order, agreement or 200 | otherwise) that contradict the conditions of this License, they do not 201 | excuse you from the conditions of this License. If you cannot 202 | distribute so as to satisfy simultaneously your obligations under this 203 | License and any other pertinent obligations, then as a consequence you 204 | may not distribute the Program at all. For example, if a patent 205 | license would not permit royalty-free redistribution of the Program by 206 | all those who receive copies directly or indirectly through you, then 207 | the only way you could satisfy both it and this License would be to 208 | refrain entirely from distribution of the Program. 209 | 210 | If any portion of this section is held invalid or unenforceable under 211 | any particular circumstance, the balance of the section is intended to 212 | apply and the section as a whole is intended to apply in other 213 | circumstances. 214 | 215 | It is not the purpose of this section to induce you to infringe any 216 | patents or other property right claims or to contest validity of any 217 | such claims; this section has the sole purpose of protecting the 218 | integrity of the free software distribution system, which is 219 | implemented by public license practices. Many people have made 220 | generous contributions to the wide range of software distributed 221 | through that system in reliance on consistent application of that 222 | system; it is up to the author/donor to decide if he or she is willing 223 | to distribute software through any other system and a licensee cannot 224 | impose that choice. 225 | 226 | This section is intended to make thoroughly clear what is believed to 227 | be a consequence of the rest of this License. 228 | 229 | 8. If the distribution and/or use of the Program is restricted in 230 | certain countries either by patents or by copyrighted interfaces, the 231 | original copyright holder who places the Program under this License 232 | may add an explicit geographical distribution limitation excluding 233 | those countries, so that distribution is permitted only in or among 234 | countries not thus excluded. In such case, this License incorporates 235 | the limitation as if written in the body of this License. 236 | 237 | 9. The Free Software Foundation may publish revised and/or new versions 238 | of the General Public License from time to time. Such new versions will 239 | be similar in spirit to the present version, but may differ in detail to 240 | address new problems or concerns. 241 | 242 | Each version is given a distinguishing version number. If the Program 243 | specifies a version number of this License which applies to it and "any 244 | later version", you have the option of following the terms and conditions 245 | either of that version or of any later version published by the Free 246 | Software Foundation. If the Program does not specify a version number of 247 | this License, you may choose any version ever published by the Free Software 248 | Foundation. 249 | 250 | 10. If you wish to incorporate parts of the Program into other free 251 | programs whose distribution conditions are different, write to the author 252 | to ask for permission. For software which is copyrighted by the Free 253 | Software Foundation, write to the Free Software Foundation; we sometimes 254 | make exceptions for this. Our decision will be guided by the two goals 255 | of preserving the free status of all derivatives of our free software and 256 | of promoting the sharing and reuse of software generally. 257 | 258 | NO WARRANTY 259 | 260 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY 261 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN 262 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES 263 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED 264 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 265 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS 266 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE 267 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, 268 | REPAIR OR CORRECTION. 269 | 270 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 271 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR 272 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, 273 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING 274 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED 275 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY 276 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER 277 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE 278 | POSSIBILITY OF SUCH DAMAGES. 279 | 280 | END OF TERMS AND CONDITIONS 281 | 282 | How to Apply These Terms to Your New Programs 283 | 284 | If you develop a new program, and you want it to be of the greatest 285 | possible use to the public, the best way to achieve this is to make it 286 | free software which everyone can redistribute and change under these terms. 287 | 288 | To do so, attach the following notices to the program. It is safest 289 | to attach them to the start of each source file to most effectively 290 | convey the exclusion of warranty; and each file should have at least 291 | the "copyright" line and a pointer to where the full notice is found. 292 | 293 | {description} 294 | Copyright (C) {year} {fullname} 295 | 296 | This program is free software; you can redistribute it and/or modify 297 | it under the terms of the GNU General Public License as published by 298 | the Free Software Foundation; either version 2 of the License, or 299 | (at your option) any later version. 300 | 301 | This program is distributed in the hope that it will be useful, 302 | but WITHOUT ANY WARRANTY; without even the implied warranty of 303 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 304 | GNU General Public License for more details. 305 | 306 | You should have received a copy of the GNU General Public License along 307 | with this program; if not, write to the Free Software Foundation, Inc., 308 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 309 | 310 | Also add information on how to contact you by electronic and paper mail. 311 | 312 | If the program is interactive, make it output a short notice like this 313 | when it starts in an interactive mode: 314 | 315 | Gnomovision version 69, Copyright (C) year name of author 316 | Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 317 | This is free software, and you are welcome to redistribute it 318 | under certain conditions; type `show c' for details. 319 | 320 | The hypothetical commands `show w' and `show c' should show the appropriate 321 | parts of the General Public License. Of course, the commands you use may 322 | be called something other than `show w' and `show c'; they could even be 323 | mouse-clicks or menu items--whatever suits your program. 324 | 325 | You should also get your employer (if you work as a programmer) or your 326 | school, if any, to sign a "copyright disclaimer" for the program, if 327 | necessary. Here is a sample; alter the names: 328 | 329 | Yoyodyne, Inc., hereby disclaims all copyright interest in the program 330 | `Gnomovision' (which makes passes at compilers) written by James Hacker. 331 | 332 | {signature of Ty Coon}, 1 April 1989 333 | Ty Coon, President of Vice 334 | 335 | This General Public License does not permit incorporating your program into 336 | proprietary programs. If your program is a subroutine library, you may 337 | consider it more useful to permit linking proprietary applications with the 338 | library. If this is what you want to do, use the GNU Lesser General 339 | Public License instead of this License. 340 | --------------------------------------------------------------------------------