├── roles
└── elk
│ ├── files
│ ├── dnsmasq.conf
│ ├── example-pki-scripts
│ │ ├── clean.sh
│ │ ├── example.sh
│ │ ├── apply_config.sh
│ │ ├── gen_node_cert.sh
│ │ ├── gen_root_ca.sh
│ │ ├── gen_client_node_cert.sh
│ │ └── etc
│ │ │ ├── root-ca.conf
│ │ │ └── signing-ca.conf
│ ├── netty-tcnative-1.1.33.Fork13-linux-x86_64.jar
│ ├── logstash-patterns
│ │ ├── idmef.pattern
│ │ ├── cisco.pattern
│ │ ├── snort.pattern
│ │ └── ossec.pattern
│ ├── sg_roles_mapping.yml
│ ├── sg_internal_users.yml
│ ├── logstash-conf.d
│ │ ├── 98-mail.conf
│ │ ├── 50-auditd.conf
│ │ ├── 95-common.conf
│ │ ├── 20-ossec.conf
│ │ ├── 60-netflow.conf
│ │ ├── 30-snort.conf
│ │ └── 40-cisco.conf
│ ├── repos
│ │ └── elastico.repo
│ ├── sg_roles.yml
│ ├── template.json
│ └── Beats
│ │ └── filebeat.yml
│ ├── handlers
│ └── main.yml
│ ├── templates
│ ├── 99-output.conf.j2
│ ├── kibana.yml.j2
│ └── elasticsearch.yml.j2
│ └── tasks
│ └── main.yml
├── screenshot.png
├── screenshot2.png
├── .gitmodules
├── lightsiem-install.yml
├── test
├── pattern_tests
│ ├── ossec-OSSEC_MESSAGE_AGENTLESS_LINE2.yaml
│ ├── ossec-OSSEC_MESSAGE_FULL_LINE_MESSAGE.yaml
│ ├── ossec-OSSEC_MESSAGE_FULL_LINE_USER.yaml
│ ├── cisco-CISCO_ASA_EVENTID.yaml
│ ├── cisco-CISCO_TIMESTAMP.yaml
│ ├── ossec-OSSEC_MESSAGE_FULL_SYSCHECK_READD.yaml
│ ├── ossec-OSSEC_MESSAGE_FULL_SYSCHECK_FILE.yaml
│ ├── cisco-CISCO_IOS_EVENTID.yaml
│ ├── ossec-OSSEC_MESSAGE_FULL_LOGROT_FILE.yaml
│ ├── ossec-OSSEC_MESSAGE_FULL_SYSCHECK_DELETE.yaml
│ ├── cisco-CISCO_IOS_TIMESTAMP.yaml
│ ├── ossec-OSSEC_MESSAGE_FULL_SYSCHECK_SIZE.yaml
│ ├── ossec-OSSEC_MESSAGE_AGENTLESS_MESSAGE.yaml
│ ├── ossec-OSSEC_MESSAGE_FULL_SYSCHECK_CURMD5.yaml
│ ├── ossec-OSSEC_MESSAGE_FULL_SYSCHECK_OLDMD5.yaml
│ ├── ossec-MESSAGE_FULL_SYSCHECK_DELETE1.yaml
│ ├── ossec-OSSEC_MESSAGE_FULL_SYSCHECK_CURSHA.yaml
│ ├── ossec-OSSEC_MESSAGE_FULL_SYSCHECK_OLDSHA.yaml
│ ├── ossec-OSSEC_MESSAGE_FULL_LINE1.yaml
│ ├── ossec-OSSEC_MESSAGE_FULL_COMMDIFF_DIFF.yaml
│ ├── ossec-OSSEC_MESSAGE_FULL_LINE3.yaml
│ ├── ossec-OSSEC_MESSAGE_AGENTLESS.yaml
│ ├── ossec-OSSEC_MESSAGE_FULL_LINE2.yaml
│ ├── ossec-OSSEC_MESSAGE_USR_MSG.yaml
│ ├── ossec-OSSEC_MESSAGE_FULL_SYSCHECK_DIFF.yaml
│ ├── ossec-OSSEC_MESSAGE_FULL_SYSCHECK_READD_FULL.yaml
│ ├── ossec-OSSEC_AGENT_STARTED.yaml
│ ├── ossec-OSSEC_MESSAGE_FULL.yaml
│ ├── ossec-OSSEC_MESSAGE_FULL_NO_USER.yaml
│ ├── ossec-OSSEC_MESSAGE_FULL_SYSCHECK.yaml
│ └── ossec-OSSEC_MESSAGE_FULL_HKLM.yaml
├── Dockerfile.centos
└── test.rb
├── README.md
└── .travis.yml
/roles/elk/files/dnsmasq.conf:
--------------------------------------------------------------------------------
1 | interface=lo
2 | bind-interfaces
--------------------------------------------------------------------------------
/screenshot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dsvetlov/lightsiem/HEAD/screenshot.png
--------------------------------------------------------------------------------
/screenshot2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dsvetlov/lightsiem/HEAD/screenshot2.png
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "test/logstash-patterns-core"]
2 | path = test/logstash-patterns-core
3 | url = https://github.com/logstash-plugins/logstash-patterns-core.git
4 |
--------------------------------------------------------------------------------
/roles/elk/files/example-pki-scripts/clean.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | rm -rf ca/
3 | rm -rf certs/
4 | rm -rf crl/
5 | rm -f *.jks
6 | rm -f *.pem
7 | rm -f *.p12
8 | rm -f *.csr
9 |
--------------------------------------------------------------------------------
/roles/elk/files/netty-tcnative-1.1.33.Fork13-linux-x86_64.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dsvetlov/lightsiem/HEAD/roles/elk/files/netty-tcnative-1.1.33.Fork13-linux-x86_64.jar
--------------------------------------------------------------------------------
/lightsiem-install.yml:
--------------------------------------------------------------------------------
1 | - name: Install and configure Elasticsearch, Logstash, Kibana
2 | hosts: localhost
3 | become: root
4 | gather_facts: yes
5 | roles:
6 | - elk
7 |
--------------------------------------------------------------------------------
/test/pattern_tests/ossec-OSSEC_MESSAGE_AGENTLESS_LINE2.yaml:
--------------------------------------------------------------------------------
1 | pattern: OSSEC_MESSAGE_AGENTLESS_LINE2
2 | data: "1184d1155"
3 | results:
4 | "[Alert][Agentless][NumChange]": "1184d1155"
5 |
--------------------------------------------------------------------------------
/test/pattern_tests/ossec-OSSEC_MESSAGE_FULL_LINE_MESSAGE.yaml:
--------------------------------------------------------------------------------
1 | pattern: OSSEC_MESSAGE_FULL_LINE_MESSAGE
2 | data: "Message"
3 | results:
4 | "[Alert][Analyzer][rawmessage]": "Message"
5 |
--------------------------------------------------------------------------------
/test/pattern_tests/ossec-OSSEC_MESSAGE_FULL_LINE_USER.yaml:
--------------------------------------------------------------------------------
1 | pattern: OSSEC_MESSAGE_FULL_LINE_USER
2 | data: "User: username"
3 | results:
4 | "[Alert][Source][User][Ident]": "username"
5 |
--------------------------------------------------------------------------------
/roles/elk/files/logstash-patterns/idmef.pattern:
--------------------------------------------------------------------------------
1 | #IDMEF_ALERT %{OSSEC_MESSAGE}|%{SNORT_MESSAGE}|%{SNORT_S5_ERROR}
2 | IDMEF_ALERT %{SNORT_MESSAGE}|%{SNORT_S5_ERROR}
3 | IDMEF_MESSAGE %{IDMEF_ALERT}
4 |
--------------------------------------------------------------------------------
/test/pattern_tests/cisco-CISCO_ASA_EVENTID.yaml:
--------------------------------------------------------------------------------
1 | pattern: CISCO_ASA_EVENTID
2 | data: '%ASA-7-710005'
3 | results:
4 | "[Alert][Analyzer][Level][Origin]": 7
5 | "[Alert][Classification][Ident]": 710005
--------------------------------------------------------------------------------
/test/pattern_tests/cisco-CISCO_TIMESTAMP.yaml:
--------------------------------------------------------------------------------
1 | pattern: CISCO_TIMESTAMP
2 | data: 'Apr 17 2016 12:44:54'
3 | results:
4 | "MONTH": 'Apr'
5 | "MONTHDAY": 17
6 | "YEAR": 2016
7 | "TIME": '12:44:54'
--------------------------------------------------------------------------------
/test/pattern_tests/ossec-OSSEC_MESSAGE_FULL_SYSCHECK_READD.yaml:
--------------------------------------------------------------------------------
1 | pattern: OSSEC_MESSAGE_FULL_SYSCHECK_READD
2 | data: "File '/etc/nsswitch.conf' was re-added."
3 | results:
4 | "[Alert][Target][File][Path]": '/etc/nsswitch.conf'
5 |
--------------------------------------------------------------------------------
/test/pattern_tests/ossec-OSSEC_MESSAGE_FULL_SYSCHECK_FILE.yaml:
--------------------------------------------------------------------------------
1 | pattern: OSSEC_MESSAGE_FULL_SYSCHECK_FILE
2 | data: "Integrity checksum changed for: '/etc/switch.conf'"
3 | results:
4 | "[Alert][Target][File][Path]": '/etc/switch.conf'
5 |
--------------------------------------------------------------------------------
/test/pattern_tests/cisco-CISCO_IOS_EVENTID.yaml:
--------------------------------------------------------------------------------
1 | pattern: CISCO_IOS_EVENTID
2 | data: '%LINK-3-UPDOWN'
3 | results:
4 | "[Alert][Group][Text]": '%LINK'
5 | "[Alert][Analyzer][Level][Origin]": 3
6 | "[Alert][Classification][Ident]": UPDOWN
--------------------------------------------------------------------------------
/test/pattern_tests/ossec-OSSEC_MESSAGE_FULL_LOGROT_FILE.yaml:
--------------------------------------------------------------------------------
1 | pattern: OSSEC_MESSAGE_FULL_LOGROT_FILE
2 | data: "ossec: File rotated (inode changed): '/var/log/messages'."
3 | results:
4 | "[Alert][Target][File][Path]": '/var/log/messages'
5 |
--------------------------------------------------------------------------------
/test/pattern_tests/ossec-OSSEC_MESSAGE_FULL_SYSCHECK_DELETE.yaml:
--------------------------------------------------------------------------------
1 | pattern: OSSEC_MESSAGE_FULL_SYSCHECK_DELETE
2 | data: "File '/etc/nsswitch.conf' was deleted. Unable to retrieve checksum."
3 | results:
4 | "[Alert][Target][File][Path]": '/etc/nsswitch.conf'
--------------------------------------------------------------------------------
/test/pattern_tests/cisco-CISCO_IOS_TIMESTAMP.yaml:
--------------------------------------------------------------------------------
1 | pattern: CISCO_IOS_TIMESTAMP
2 | data: '14:17:41 MSK Mon May 25 2015'
3 | results:
4 | "MONTH": 'May'
5 | "MONTHDAY": 25
6 | "YEAR": 2015
7 | "TIME": '14:17:41'
8 | "Timezone": MSK
9 | "DAY": Mon
--------------------------------------------------------------------------------
/test/pattern_tests/ossec-OSSEC_MESSAGE_FULL_SYSCHECK_SIZE.yaml:
--------------------------------------------------------------------------------
1 | pattern: OSSEC_MESSAGE_FULL_SYSCHECK_SIZE
2 | data: "Size changed from '7896' to '7889'"
3 | results:
4 | "[Alert][Target][File][oldsize]": '7896'
5 | "[Alert][Target][File][newsize]": '7889'
6 |
--------------------------------------------------------------------------------
/test/pattern_tests/ossec-OSSEC_MESSAGE_AGENTLESS_MESSAGE.yaml:
--------------------------------------------------------------------------------
1 | pattern: OSSEC_MESSAGE_AGENTLESS_MESSAGE
2 | data: "< Connection to 1.2.1.2 closed by remote host."
3 | results:
4 | "[Alert][Analyzer][OSSEC][Diff]": "< Connection to 1.2.1.2 closed by remote host."
5 |
--------------------------------------------------------------------------------
/test/pattern_tests/ossec-OSSEC_MESSAGE_FULL_SYSCHECK_CURMD5.yaml:
--------------------------------------------------------------------------------
1 | pattern: OSSEC_MESSAGE_FULL_SYSCHECK_CURMD5
2 | data: "New md5sum is : 'ff541deeea8e01f6734961973f048ba4'"
3 | results:
4 | "[Alert][Target][File][chksum][md5cur]": 'ff541deeea8e01f6734961973f048ba4'
5 |
--------------------------------------------------------------------------------
/test/pattern_tests/ossec-OSSEC_MESSAGE_FULL_SYSCHECK_OLDMD5.yaml:
--------------------------------------------------------------------------------
1 | pattern: OSSEC_MESSAGE_FULL_SYSCHECK_OLDMD5
2 | data: "Old md5sum was: '035182ab6f0c688260134ac08513fe00'"
3 | results:
4 | "[Alert][Target][File][chksum][md5prev]": '035182ab6f0c688260134ac08513fe00'
5 |
--------------------------------------------------------------------------------
/test/pattern_tests/ossec-MESSAGE_FULL_SYSCHECK_DELETE1.yaml:
--------------------------------------------------------------------------------
1 | pattern: OSSEC_MESSAGE_FULL_SYSCHECK
2 | data: "File '/home/admin/.ssh/authorized_keys' was deleted. Unable to retrieve checksum."
3 | results:
4 | "[Alert][Target][File][Path]": "/home/admin/.ssh/authorized_keys"
5 |
--------------------------------------------------------------------------------
/test/pattern_tests/ossec-OSSEC_MESSAGE_FULL_SYSCHECK_CURSHA.yaml:
--------------------------------------------------------------------------------
1 | pattern: OSSEC_MESSAGE_FULL_SYSCHECK_CURSHA
2 | data: "New sha1sum is : '8a254b233c1b56479af4088fefff82764c6c02eb9'"
3 | results:
4 | "[Alert][Target][File][chksum][SHAcur]": '8a254b233c1b56479af4088fefff82764c6c02eb9'
5 |
--------------------------------------------------------------------------------
/test/pattern_tests/ossec-OSSEC_MESSAGE_FULL_SYSCHECK_OLDSHA.yaml:
--------------------------------------------------------------------------------
1 | pattern: OSSEC_MESSAGE_FULL_SYSCHECK_OLDSHA
2 | data: "Old sha1sum was: '1263b2226d6e6e44f33bbbbccc80ca76e9e614df'"
3 | results:
4 | "[Alert][Target][File][chksum][SHAprev]": '1263b2226d6e6e44f33bbbbccc80ca76e9e614df'
5 |
--------------------------------------------------------------------------------
/test/pattern_tests/ossec-OSSEC_MESSAGE_FULL_LINE1.yaml:
--------------------------------------------------------------------------------
1 | pattern: OSSEC_MESSAGE_FULL_LINE1
2 | data: "** Alert 1461745444.5415712: - windows,authentication_success,"
3 | results:
4 | "[Alert][CreateTime]": "1461745444.5415712"
5 | "[Alert][Group][Text]": "windows,authentication_success,"
6 |
--------------------------------------------------------------------------------
/test/pattern_tests/ossec-OSSEC_MESSAGE_FULL_COMMDIFF_DIFF.yaml:
--------------------------------------------------------------------------------
1 | pattern: OSSEC_MESSAGE_FULL_COMMDIFF_DIFF
2 | data: "ossec: output: 'netstat -tan |grep LISTEN |grep -v 127.0.0.1 | sort'"
3 | results:
4 | "[Alert][Analyzer][OSSEC][Diff]": "'netstat -tan |grep LISTEN |grep -v 127.0.0.1 | sort'"
5 |
--------------------------------------------------------------------------------
/roles/elk/files/example-pki-scripts/example.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 | ./clean.sh
4 | ./gen_root_ca.sh capass capass
5 | ./gen_node_cert.sh $HOSTNAME changeit capass #&& ./gen_node_cert.sh 1 changeit capass && ./gen_node_cert.sh 2 changeit capass
6 | ./gen_client_node_cert.sh admin changeit capass
7 |
--------------------------------------------------------------------------------
/test/pattern_tests/ossec-OSSEC_MESSAGE_FULL_LINE3.yaml:
--------------------------------------------------------------------------------
1 | pattern: OSSEC_MESSAGE_FULL_LINE3
2 | data: "Rule: 18107 (level 3) -> 'Windows Logon Success.'"
3 | results:
4 | "[Alert][Classification][Ident]": "18107"
5 | "[Alert][Analyzer][Level][Origin]": "3"
6 | "[Alert][Classification][Text]": "Windows Logon Success."
7 |
--------------------------------------------------------------------------------
/test/pattern_tests/ossec-OSSEC_MESSAGE_AGENTLESS.yaml:
--------------------------------------------------------------------------------
1 | pattern: OSSEC_MESSAGE_AGENTLESS
2 | data: "ossec: agentless: Change detected:\n1184d1155\n< Connection to 1.2.1.2 closed by remote host.\n"
3 | results:
4 | "[Alert][Agentless][NumChange]": "1184d1155"
5 | "[Alert][Analyzer][OSSEC][Diff]": "< Connection to 1.2.1.2 closed by remote host.\n"
6 |
--------------------------------------------------------------------------------
/roles/elk/files/example-pki-scripts/apply_config.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 | /usr/share/elasticsearch/plugins/search-guard-5/tools/sgadmin.sh \
4 | -cd /usr/share/elasticsearch/plugins/search-guard-5/sgconfig/ \
5 | -ks /etc/elasticsearch/sg/admin-keystore.jks \
6 | -ts /etc/elasticsearch/sg/truststore.jks \
7 | -kspass changeit \
8 | -tspass capass \
9 | -nhnv
10 |
--------------------------------------------------------------------------------
/roles/elk/files/sg_roles_mapping.yml:
--------------------------------------------------------------------------------
1 | sg_logstash:
2 | users:
3 | - usr_logstash
4 |
5 | sg_kibana4_server:
6 | users:
7 | - usr_kibana
8 |
9 | sg_kibana4:
10 | users:
11 | - kibanaro
12 | - usr_tv
13 | - usr_svetlov
14 |
15 | sg_all_access:
16 | users:
17 | - admin
18 | - kibanaro
19 | - usr_kibana
20 |
21 | sg_public:
22 | users:
23 | - '*'
24 |
--------------------------------------------------------------------------------
/test/pattern_tests/ossec-OSSEC_MESSAGE_FULL_LINE2.yaml:
--------------------------------------------------------------------------------
1 | pattern: OSSEC_MESSAGE_FULL_LINE2
2 | data: "2016 Apr 27 11:24:04 (host) 1.2.3.4->ExLog"
3 | results:
4 | "MONTH": "Apr"
5 | "MONTHDAY": "27"
6 | "YEAR": "2016"
7 | "TIME": "11:24:04"
8 | "[Alert][Sensor][Node][Name]": "host"
9 | "[Alert][Sensor][Node][Address]": "1.2.3.4"
10 | "[Alert][LogType]": "ExLog"
11 |
--------------------------------------------------------------------------------
/roles/elk/files/sg_internal_users.yml:
--------------------------------------------------------------------------------
1 | usr_kibana:
2 | username: usr_kibana
3 | hash: $2a$12$mN6BBxQQRq6ldHAowrMaMelROuNusvmr7jkZIt9BDWVhRXSDwsD4C
4 | # password: 2FgeR37e1
5 |
6 | kibanaro:
7 | username: kibanaro
8 | hash: $2a$12$mN6BBxQQRq6ldHAowrMaMelROuNusvmr7jkZIt9BDWVhRXSDwsD4C
9 | # password: 2FgeR37e1
10 |
11 | usr_logstash:
12 | username: usr_logstash
13 | hash: $2a$12$ZrNLfRzb2DPvx9wohoA1U.hU2v70xXOsRVNfWlt4yRjfVjtK9ExY2
14 | # password: 23shjY67
15 |
--------------------------------------------------------------------------------
/test/pattern_tests/ossec-OSSEC_MESSAGE_USR_MSG.yaml:
--------------------------------------------------------------------------------
1 | pattern: OSSEC_MESSAGE_USR_MSG
2 | data: "User: username\nMessage Example Message Example Message Example Message Example Message 123 Example Message 123 Example Message5664 Example Message\n"
3 | results:
4 | "[Alert][Source][User][Ident]": "username"
5 | "[Alert][Analyzer][rawmessage]": "Message Example Message Example Message Example Message Example Message 123 Example Message 123 Example Message5664 Example Message"
6 |
--------------------------------------------------------------------------------
/roles/elk/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart logstash
2 | service: name=logstash state=restarted
3 |
4 | - name: restart elasticsearch
5 | service: name=elasticsearch state=restarted
6 |
7 | - name: restart kibana
8 | command: killall -9 node
9 |
10 | - name: restart dnsmasq
11 | service: name=dnsmasq state=restarted
12 |
13 | - name: restart firewalld
14 | service: name=firewalld state=restarted
15 |
16 | - name: reload systemd
17 | command: systemctl daemon-reload
18 |
--------------------------------------------------------------------------------
/test/pattern_tests/ossec-OSSEC_MESSAGE_FULL_SYSCHECK_DIFF.yaml:
--------------------------------------------------------------------------------
1 | pattern: OSSEC_MESSAGE_FULL_SYSCHECK_DIFF
2 | data: "What changed:\n1,2c1,2\n< # up nothing. Note that if the search failed due to some other reason\n< # (like no NIS server responding) then the search continues with the\n---\n> #123\n> #123"
3 | results:
4 | "[Alert][Analyzer][OSSEC][Diff]": "1,2c1,2\n< # up nothing. Note that if the search failed due to some other reason\n< # (like no NIS server responding) then the search continues with the\n---\n> #123\n> #123"
5 |
--------------------------------------------------------------------------------
/test/pattern_tests/ossec-OSSEC_MESSAGE_FULL_SYSCHECK_READD_FULL.yaml:
--------------------------------------------------------------------------------
1 | pattern: OSSEC_MESSAGE_FULL
2 | data: "** Alert 1461745444.5415712: - local,syslog,syscheck,\n2016 Apr 27 11:24:04 test->syscheck\nRule: 554 (level 11) -> 'File added to the system.'\nFile '/home/authorized_keys' was re-added."
3 | results:
4 | "[Alert][CreateTime]": "1461745444.5415712"
5 | "[Alert][Group][Text]": "local,syslog,syscheck,"
6 | "MONTH": "Apr"
7 | "MONTHDAY": "27"
8 | "YEAR": "2016"
9 | "TIME": "11:24:04"
10 | "[Alert][Sensor][Node][Name]": "test"
11 | "[Alert][LogType]": "syscheck"
12 | "[Alert][Classification][Ident]": "554"
13 | "[Alert][Analyzer][Level][Origin]": "11"
14 | "[Alert][Classification][Text]": "File added to the system."
15 | "[Alert][Target][File][Path]": "/home/authorized_keys"
16 |
--------------------------------------------------------------------------------
/test/pattern_tests/ossec-OSSEC_AGENT_STARTED.yaml:
--------------------------------------------------------------------------------
1 | pattern: OSSEC_MESSAGE_FULL
2 | data: "** Alert 1462946354.41483391: mail - ossec,\n2016 May 1 08:59:14 (user) 1.2.3.4->ossec\nRule: 503 (level 3) -> 'Ossec agent started.'\nossec: Agent started: 'user->1.2.3.4'."
3 | results:
4 | "[Alert][CreateTime]": "1462946354.41483391"
5 | "[Alert][Group][Text]": "ossec,"
6 | "YEAR": "2016"
7 | "MONTH": "May"
8 | "MONTHDAY": "1"
9 | "TIME": "08:59:14"
10 | "[Alert][Sensor][Node][Name]": "user"
11 | "[Alert][Sensor][Node][Address]": "1.2.3.4"
12 | "[Alert][LogType]": "ossec"
13 | "[Alert][Classification][Ident]": "503"
14 | "[Alert][Analyzer][Level][Origin]": "3"
15 | "[Alert][Classification][Text]": "Ossec agent started."
16 | "[Alert][Analyzer][rawmessage]": "ossec: Agent started: 'user->1.2.3.4'."
17 |
--------------------------------------------------------------------------------
/roles/elk/files/logstash-conf.d/98-mail.conf:
--------------------------------------------------------------------------------
1 | output {
2 |
3 | #if [Alert.Analyzer.Level.Normalized] == "15"
4 | #{
5 | # email {
6 | # to => "somemail@company.com"
7 | # subject => "LightSIEM alert - %{Alert.Classification.Text}"
8 | # htmlbody => "%{message}"
9 | # }
10 | #}
11 | if [Alert][Classification][Ident] == "555" {
12 | if ("%{TIME}" =~ "^(1[8-9]|2[0-3]|0[0-8]):(0[0-9]|1[0-9]|2[0-9]|3[0-9]|4[0-9]|5[0-9]):(0[0-9]|1[0-9]|2[0-9]|3[0-9]|4[0-9]|5[0-9])$") {
13 | email {
14 | to => "somemail@company.com"
15 | subject => "LightSIEM alert - %{OSSEC_MESSAGE_FULL_LINE3}"
16 | htmlbody => "%{message}"
17 | }
18 | }
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/roles/elk/files/repos/elastico.repo:
--------------------------------------------------------------------------------
1 | [elasticsearch-5.x]
2 | name=Elasticsearch repository for 5.x packages
3 | baseurl=https://artifacts.elastic.co/packages/5.x/yum
4 | gpgcheck=1
5 | gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
6 | enabled=1
7 | autorefresh=1
8 | type=rpm-md
9 | includepkgs=elasticsearch
10 |
11 | [logstash-5.x]
12 | name=Elastic repository for 5.x packages
13 | baseurl=https://artifacts.elastic.co/packages/5.x/yum
14 | gpgcheck=1
15 | gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
16 | enabled=1
17 | autorefresh=1
18 | type=rpm-md
19 | includepkgs=logstash
20 |
21 | [kibana-5.x]
22 | name=Kibana repository for 5.x packages
23 | baseurl=https://artifacts.elastic.co/packages/5.x/yum
24 | gpgcheck=1
25 | gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
26 | enabled=1
27 | autorefresh=1
28 | type=rpm-md
29 | includepkgs=kibana
30 |
--------------------------------------------------------------------------------
/roles/elk/templates/99-output.conf.j2:
--------------------------------------------------------------------------------
1 | output {
2 | elasticsearch {
3 | # http protocol
4 | # hosts => ["localhost:9200"]
5 |
6 | # https protocol (use with search-guard)
7 | hosts => ["https://127.0.0.1:9200"]
8 | ssl => true
9 | truststore => '/etc/logstash/ssl/truststore.jks'
10 | truststore_password => "capass"
11 | ssl_certificate_verification => true
12 | keystore => "/etc/logstash/ssl/node-{{ansible_nodename}}-keystore.jks"
13 | keystore_password => "changeit"
14 | user => "usr_logstash"
15 | password => "23shjY67"
16 |
17 | # template
18 | template => "/etc/logstash/template.json"
19 | index => "lightsiem-%{+YYYY.MM.dd}"
20 | template_name => "lightsiem"
21 | template_overwrite => true
22 | }
23 |
24 | # stdout { codec => rubydebug }
25 |
26 | }
27 |
--------------------------------------------------------------------------------
/roles/elk/files/logstash-patterns/cisco.pattern:
--------------------------------------------------------------------------------
1 | CISCO_ASA_EVENTID %ASA-%{INT:[Alert][Analyzer][Level][Origin]}-%{POSINT:[Alert][Classification][Ident]}
2 |
3 | CISCO_IOS_EVENTID %{DATA:[Alert][Group][Text]}-%{INT:[Alert][Analyzer][Level][Origin]}-(?:%{GREEDYDATA:[Alert][Classification][Ident]})
4 |
5 | CISCO_TIMESTAMP %{MONTH} %{MONTHDAY} %{YEAR} %{TIME}
6 |
7 | CISCO_IOS_TIMESTAMP %{TIME} %{WORD:Timezone} %{DAY} %{MONTH} %{MONTHDAY} %{YEAR}
8 |
9 |
10 | CISCO_HEADER %{CISCO_ASA_HEADER_ORIGINAL}|%{CISCO_ASAORIOS_HEDAER_FORWARDED5424}
11 | CISCO_ASA_HEADER_ORIGINAL %{SYSLOG5424PRI}(?:%{CISCO_TIMESTAMP:timestamp}: )%{CISCO_ASA_EVENTID:[Alert][Classification][Text]}:
12 | CISCO_ASAORIOS_HEDAER_FORWARDED5424 %{SYSLOG5424PRI}%{NONNEGINT} +(?:%{TIMESTAMP_ISO8601:syslog5424_ts}|-) +(?:%{HOSTNAME:[Alert][Analyzer][Node][Address]}|-) +(?:%{CISCO_ASA_EVENTID:[Alert][Classification][Text]}|-) +(?:%{WORD:syslog5424_proc}|-) +(?:%{WORD:syslog5424_msgid}|-) +(?:%{SYSLOG5424SD:syslog5424_sd}|-|)
13 |
--------------------------------------------------------------------------------
/test/pattern_tests/ossec-OSSEC_MESSAGE_FULL.yaml:
--------------------------------------------------------------------------------
1 | pattern: OSSEC_MESSAGE_FULL
2 | data: "** Alert 1461745444.5415712: - windows,authentication_success,\n2016 Apr 27 11:24:04 (host) 1.2.3.4->WinLog\nRule: 18107 (level 3) -> 'Windows Logon Success.'\nUser: username\nMessage Example Message Example Message Example Message Example Message 123 Example Message 123 Example Message5664 Example Message\n"
3 | results:
4 | "[Alert][CreateTime]": "1461745444.5415712"
5 | "[Alert][Group][Text]": "windows,authentication_success,"
6 | "MONTH": "Apr"
7 | "MONTHDAY": "27"
8 | "YEAR": "2016"
9 | "TIME": "11:24:04"
10 | "[Alert][Sensor][Node][Name]": "host"
11 | "[Alert][Sensor][Node][Address]": "1.2.3.4"
12 | "[Alert][LogType]": "WinLog"
13 | "[Alert][Classification][Ident]": "18107"
14 | "[Alert][Analyzer][Level][Origin]": "3"
15 | "[Alert][Classification][Text]": "Windows Logon Success."
16 | "[Alert][Source][User][Ident]": "username"
17 | "[Alert][Analyzer][rawmessage]": "Message Example Message Example Message Example Message Example Message 123 Example Message 123 Example Message5664 Example Message"
18 |
--------------------------------------------------------------------------------
/test/pattern_tests/ossec-OSSEC_MESSAGE_FULL_NO_USER.yaml:
--------------------------------------------------------------------------------
1 | pattern: OSSEC_MESSAGE_FULL
2 | data: "** Alert 1461745444.5415712: - windows,authentication_success,\n2016 Apr 27 11:24:04 (host) 1.2.3.4->WinLog\nRule: 18107 (level 3) -> 'Windows Logon Success.'\nUser: (no user)\nMessage Example Message Example Message Example Message Example Message 123 Example Message 123 Example Message5664 Example Message\n"
3 | results:
4 | "[Alert][CreateTime]": "1461745444.5415712"
5 | "[Alert][Group][Text]": "windows,authentication_success,"
6 | "MONTH": "Apr"
7 | "MONTHDAY": "27"
8 | "YEAR": "2016"
9 | "TIME": "11:24:04"
10 | "[Alert][Sensor][Node][Name]": "host"
11 | "[Alert][Sensor][Node][Address]": "1.2.3.4"
12 | "[Alert][LogType]": "WinLog"
13 | "[Alert][Classification][Ident]": "18107"
14 | "[Alert][Analyzer][Level][Origin]": "3"
15 | "[Alert][Classification][Text]": "Windows Logon Success."
16 | "[Alert][Source][User][Ident]": "(no user)"
17 | "[Alert][Analyzer][rawmessage]": "Message Example Message Example Message Example Message Example Message 123 Example Message 123 Example Message5664 Example Message"
18 |
--------------------------------------------------------------------------------
/test/Dockerfile.centos:
--------------------------------------------------------------------------------
1 | FROM centos:7
2 | ENV JAVACMD=/usr/bin/java
3 | # Install systemd -- See https://hub.docker.com/_/centos/
4 | RUN yum -y swap -- remove fakesystemd -- install systemd systemd-libs
5 | RUN yum -y update; yum clean all; \
6 | (cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); \
7 | rm -f /lib/systemd/system/multi-user.target.wants/*; \
8 | rm -f /etc/systemd/system/*.wants/*; \
9 | rm -f /lib/systemd/system/local-fs.target.wants/*; \
10 | rm -f /lib/systemd/system/sockets.target.wants/*udev*; \
11 | rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \
12 | rm -f /lib/systemd/system/basic.target.wants/*; \
13 | rm -f /lib/systemd/system/anaconda.target.wants/*;
14 | # Install Ansible
15 | RUN yum -y install epel-release
16 | RUN yum -y install git ansible sudo net-tools
17 | RUN yum clean all
18 | # Disable requiretty
19 | RUN sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers
20 | # Install Ansible inventory file
21 | RUN echo -e '[local]\nlocalhost ansible_connection=local' > /etc/ansible/hosts
22 | VOLUME [ "/sys/fs/cgroup" ]
23 | CMD ["/usr/sbin/init"]
--------------------------------------------------------------------------------
/test/pattern_tests/ossec-OSSEC_MESSAGE_FULL_SYSCHECK.yaml:
--------------------------------------------------------------------------------
1 | pattern: OSSEC_MESSAGE_FULL_SYSCHECK
2 | data: "Integrity checksum changed for: '/etc/switch.conf'\nSize changed from '7896' to '7889'\nOld md5sum was: '035182ab6f0c688260134ac08513fe00'\nNew md5sum is : 'ff541deeea8e01f6734961973f048ba4'\nOld sha1sum was: '1263b2226d6e6e44f33bbbbccc80ca76e9e614df'\nNew sha1sum is : '8a254b233c1b56479af4088fefff82764c6c02eb9'\nWhat changed:\n1,2c1,2\n< # up nothing. Note that if the search failed due to some other reason\n< # (like no NIS server responding) then the search continues with the\n---\n> #123\n> #123"
3 | results:
4 | "[Alert][Target][File][Path]": "/etc/switch.conf"
5 | "[Alert][Target][File][oldsize]": "7896"
6 | "[Alert][Target][File][newsize]": "7889"
7 | "[Alert][Target][File][chksum][md5prev]": "035182ab6f0c688260134ac08513fe00"
8 | "[Alert][Target][File][chksum][md5cur]": "ff541deeea8e01f6734961973f048ba4"
9 | "[Alert][Target][File][chksum][SHAprev]": "1263b2226d6e6e44f33bbbbccc80ca76e9e614df"
10 | "[Alert][Target][File][chksum][SHAcur]": "8a254b233c1b56479af4088fefff82764c6c02eb9"
11 | "[Alert][Analyzer][OSSEC][Diff]": "1,2c1,2\n< # up nothing. Note that if the search failed due to some other reason\n< # (like no NIS server responding) then the search continues with the\n---\n> #123\n> #123"
12 |
--------------------------------------------------------------------------------
/roles/elk/files/sg_roles.yml:
--------------------------------------------------------------------------------
1 | sg_kibana4_server:
2 | cluster:
3 | - cluster:monitor/nodes/info
4 | - cluster:monitor/health
5 | indices:
6 | '?kibana':
7 | '*':
8 | - ALL
9 |
10 | sg_kibana4:
11 | indices:
12 | '*':
13 | '*':
14 | - READ
15 | - indices:admin/mappings/fields/get*
16 | - indices:admin/validate/query*
17 | - indices:admin/get*
18 | '?kibana':
19 | '*':
20 | - indices:admin/exists*
21 | - indices:admin/mapping/put*
22 | - indices:admin/mappings/fields/get*
23 | - indices:admin/refresh*
24 | - indices:admin/validate/query*
25 | - indices:data/read/get*
26 | - indices:data/read/mget*
27 | - indices:data/read/search*
28 | - indices:data/write/delete*
29 | - indices:data/write/index*
30 | - indices:data/write/update*
31 |
32 | sg_logstash:
33 | cluster:
34 | - indices:admin/template/get
35 | - indices:admin/template/put
36 | - cluster:monitor/main
37 | - "indices:data/write/bulk*"
38 | indices:
39 | 'lightsiem-*':
40 | '*':
41 | - CRUD
42 | - CREATE_INDEX
43 | 'logstash-*':
44 | '*':
45 | - CRUD
46 | - CREATE_INDEX
47 | '*beat*':
48 | '*':
49 | - CRUD
50 | - CREATE_INDEX
51 |
52 |
53 | sg_all_access:
54 | cluster:
55 | - '*'
56 | indices:
57 | '*':
58 | '*':
59 | - '*'
60 |
--------------------------------------------------------------------------------
/test/pattern_tests/ossec-OSSEC_MESSAGE_FULL_HKLM.yaml:
--------------------------------------------------------------------------------
1 | pattern: OSSEC_MESSAGE_FULL
2 | data: "** Alert 1462276773.70453249: - ossec,syscheck,\n2016 May 03 14:59:33 (user) 1.2.3.4->syscheck-registry\nRule: 595 (level 5) -> 'Registry Integrity Checksum Changed Again (2nd time)'\nIntegrity checksum changed for: 'HKEY_LOCAL_MACHINE\\System\\CurrentControlSet\\Services\\{AAA-123AA-ASDSD123}'\nOld md5sum was: '2407faa6736857f972cf949004124124'\nNew md5sum is : '5d64256732cc97824e09d9f59e124124'\nOld sha1sum was: 'cf6fee98160bbbe3987a44dae8f898061244124'\nNew sha1sum is : '5c368729d8515f0b8109245e9d75e879f1241241'\n"
3 | results:
4 | "[Alert][CreateTime]": "1462276773.70453249"
5 | "[Alert][Group][Text]": "ossec,syscheck,"
6 | "MONTH": "May"
7 | "MONTHDAY": "03"
8 | "YEAR": "2016"
9 | "TIME": "14:59:33"
10 | "[Alert][Sensor][Node][Name]": "user"
11 | "[Alert][Sensor][Node][Address]": "1.2.3.4"
12 | "[Alert][LogType]": "syscheck-registry"
13 | "[Alert][Classification][Ident]": "595"
14 | "[Alert][Analyzer][Level][Origin]": "5"
15 | "[Alert][Classification][Text]": "Registry Integrity Checksum Changed Again (2nd time)"
16 | "[Alert][Target][File][Path]": "HKEY_LOCAL_MACHINE\\System\\CurrentControlSet\\Services\\{AAA-123AA-ASDSD123}"
17 | "[Alert][Target][File][chksum][md5prev]": "2407faa6736857f972cf949004124124"
18 | "[Alert][Target][File][chksum][md5cur]": "5d64256732cc97824e09d9f59e124124"
19 | "[Alert][Target][File][chksum][SHAprev]": "cf6fee98160bbbe3987a44dae8f898061244124"
20 | "[Alert][Target][File][chksum][SHAcur]": "5c368729d8515f0b8109245e9d75e879f1241241"
21 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # LightSIEM
2 | Lightweight and sexy Security Information and Event Managment system for OSSEC, Snort and other IDS/IPS
3 | ![Screenshot of LightSIEM]
4 | (https://github.com/dsvetlov/lightsiem/blob/master/screenshot.png)
5 | ![Screenshot of LightSIEM GeoIP map]
6 | (https://github.com/dsvetlov/lightsiem/blob/master/screenshot2.png)
7 | # Installation
8 | LightSIEM now distributing as ansible playbook for RHEL/CentOS/Oracle Linix 7.x.
9 | Install EPEL repository
10 | ```
11 | yum install http://fedora-mirror01.rbc.ru/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm -y
12 | ```
13 | Install Ansible and additional packages
14 | ```
15 | yum install ansible -y
16 | yum install wget unzip -y
17 | ```
18 | Download latest playbook code and unpack it
19 | ```
20 | wget https://github.com/dsvetlov/lightsiem/archive/master.zip
21 | unzip master.zip
22 | ```
23 | Run playbook
24 | ```
25 | ansible-playbook lightsiem-master/lightsiem-install.yml
26 | ```
27 | On your OSSEC server enable ability to send alerts via syslog
28 | ```
29 | /var/ossec/bin/ossec-control enable client-syslog
30 | ```
31 | Then add in /var/ossec/etc/ossec.conf this lines to send ossec alerts via sysslog in logstash
32 | ```
33 |
34 |
35 | ...
36 |
37 |
38 | address of LightSIEM server
39 | 9000
40 | default
41 |
42 | ...
43 |
44 | ```
45 | Forward snort log to LightSIEM via IETF-syslog format (RFC 5424).
46 | Example configuration for rsyslogd.
47 | ```
48 | if $programname == 'snort' then {
49 | *.* @( o )
:9010;RSYSLOG_SyslogProtocol23Format
50 | &stop
51 | }
52 | ```
53 | Now point your web-browser to port 5601 of your LightSIEM server. Default login and password is kibanaro/2FgeR37e1.
54 |
--------------------------------------------------------------------------------
/roles/elk/files/example-pki-scripts/gen_node_cert.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 | NODE_NAME=node-$1
4 |
5 | if [ -z "$3" ] ; then
6 | unset CA_PASS KS_PASS
7 | read -p "Enter CA pass: " -s CA_PASS ; echo
8 | read -p "Enter Keystore pass: " -s KS_PASS ; echo
9 | else
10 | KS_PASS=$2
11 | CA_PASS=$3
12 | fi
13 |
14 | rm -f $NODE_NAME-keystore.jks
15 | rm -f $NODE_NAME.csr
16 | rm -f $NODE_NAME-signed.pem
17 |
18 | echo Generating keystore and certificate for node $NODE_NAME
19 |
20 | keytool -genkey \
21 | -alias $NODE_NAME \
22 | -keystore $NODE_NAME-keystore.jks \
23 | -keyalg RSA \
24 | -keysize 2048 \
25 | -validity 712 \
26 | -keypass $KS_PASS \
27 | -storepass $KS_PASS \
28 | -dname "CN=$NODE_NAME.example.com, OU=SSL, O=Test, L=Test, C=DE" \
29 | -ext san=dns:$NODE_NAME.example.com,ip:127.0.0.1,oid:1.2.3.4.5.5
30 |
31 | echo Generating certificate signing request for node $NODE_NAME
32 |
33 | keytool -certreq \
34 | -alias $NODE_NAME \
35 | -keystore $NODE_NAME-keystore.jks \
36 | -file $NODE_NAME.csr \
37 | -keyalg rsa \
38 | -keypass $KS_PASS \
39 | -storepass $KS_PASS \
40 | -dname "CN=$NODE_NAME.example.com, OU=SSL, O=Test, L=Test, C=DE" \
41 | -ext san=dns:$NODE_NAME.example.com,ip:127.0.0.1,oid:1.2.3.4.5.5
42 |
43 | echo Sign certificate request with CA
44 | openssl ca \
45 | -in $NODE_NAME.csr \
46 | -notext \
47 | -out $NODE_NAME-signed.pem \
48 | -config etc/signing-ca.conf \
49 | -extensions v3_req \
50 | -batch \
51 | -passin pass:$CA_PASS \
52 | -extensions server_ext
53 |
54 | echo "Import back to keystore (including CA chain)"
55 |
56 | cat ca/chain-ca.pem $NODE_NAME-signed.pem | keytool \
57 | -importcert \
58 | -keystore $NODE_NAME-keystore.jks \
59 | -storepass $KS_PASS \
60 | -noprompt \
61 | -alias $NODE_NAME
62 |
63 | keytool -importkeystore -srckeystore $NODE_NAME-keystore.jks -srcstorepass $KS_PASS -srcstoretype JKS -deststoretype PKCS12 -deststorepass $KS_PASS -destkeystore $NODE_NAME-keystore.p12
64 |
65 | echo All done for $NODE_NAME
66 |
--------------------------------------------------------------------------------
/roles/elk/files/example-pki-scripts/gen_root_ca.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 | rm -rf ca certs* crl *.jks
4 |
5 | if [ -z "$2" ] ; then
6 | unset CA_PASS TS_PASS
7 | read -p "Enter CA pass: " -s CA_PASS ; echo
8 | read -p "Enter Truststore pass: " -s TS_PASS ; echo
9 | else
10 | CA_PASS=$1
11 | TS_PASS=$2
12 | fi
13 |
14 | mkdir -p ca/root-ca/private ca/root-ca/db crl certs
15 | chmod 700 ca/root-ca/private
16 |
17 | cp /dev/null ca/root-ca/db/root-ca.db
18 | cp /dev/null ca/root-ca/db/root-ca.db.attr
19 | echo 01 > ca/root-ca/db/root-ca.crt.srl
20 | echo 01 > ca/root-ca/db/root-ca.crl.srl
21 |
22 | openssl req -new \
23 | -config etc/root-ca.conf \
24 | -out ca/root-ca.csr \
25 | -keyout ca/root-ca/private/root-ca.key \
26 | -batch \
27 | -passout pass:$CA_PASS
28 |
29 |
30 | openssl ca -selfsign \
31 | -config etc/root-ca.conf \
32 | -in ca/root-ca.csr \
33 | -out ca/root-ca.crt \
34 | -extensions root_ca_ext \
35 | -batch \
36 | -passin pass:$CA_PASS
37 |
38 | echo Root CA generated
39 |
40 | mkdir -p ca/signing-ca/private ca/signing-ca/db crl certs
41 | chmod 700 ca/signing-ca/private
42 |
43 | cp /dev/null ca/signing-ca/db/signing-ca.db
44 | cp /dev/null ca/signing-ca/db/signing-ca.db.attr
45 | echo 01 > ca/signing-ca/db/signing-ca.crt.srl
46 | echo 01 > ca/signing-ca/db/signing-ca.crl.srl
47 |
48 | openssl req -new \
49 | -config etc/signing-ca.conf \
50 | -out ca/signing-ca.csr \
51 | -keyout ca/signing-ca/private/signing-ca.key \
52 | -batch \
53 | -passout pass:$CA_PASS
54 |
55 | openssl ca \
56 | -config etc/root-ca.conf \
57 | -in ca/signing-ca.csr \
58 | -out ca/signing-ca.crt \
59 | -extensions signing_ca_ext \
60 | -batch \
61 | -passin pass:$CA_PASS
62 |
63 | echo Signing CA generated
64 |
65 | openssl x509 -in ca/root-ca.crt -out ca/root-ca.pem -outform PEM
66 | openssl x509 -in ca/signing-ca.crt -out ca/signing-ca.pem -outform PEM
67 | cat ca/signing-ca.pem ca/root-ca.pem > ca/chain-ca.pem
68 |
69 | #http://stackoverflow.com/questions/652916/converting-a-java-keystore-into-pem-format
70 |
71 | cat ca/root-ca.pem | keytool \
72 | -import \
73 | -v \
74 | -keystore truststore.jks \
75 | -storepass $TS_PASS \
76 | -noprompt -alias root-ca-chain
77 |
--------------------------------------------------------------------------------
/roles/elk/files/logstash-conf.d/50-auditd.conf:
--------------------------------------------------------------------------------
1 | input {
2 |
3 | lumberjack {
4 | port => 9030
5 | ssl_certificate => "/etc/logstash/ssl/logstash-forwarder.crt"
6 | ssl_key => "/etc/logstash/ssl/logstash-forwarder.key"
7 | type => auditd
8 |
9 | codec => multiline {
10 | pattern => "^----"
11 | negate => true
12 | what => "previous"
13 | }
14 | }
15 |
16 |
17 | #Stdin input for debug purposes
18 | stdin {
19 | type => auditd
20 |
21 | codec => multiline {
22 | pattern => "^----"
23 | negate => true
24 | what => "previous"
25 | }
26 | }
27 |
28 | }
29 |
30 | filter {
31 |
32 | if [type] == "auditd" {
33 |
34 | grok {
35 | #match => { "message" => "%{DATA}type=EXECVE msg=audit\(%{DATESTAMP}:%{NUMBER}\) \: %{DATA:execve_arguments} \\n%{DATA}\\ntype=SYSCALL %{DATA} auid=%{USERNAME:user} %{GREEDYDATA}" }
36 | #match => { "message" => "%{DATA}type=EXECVE msg=audit\(%{DATESTAMP}:%{NUMBER}\) \: argc=%{NUMBER} %{DATA:Alert.Source.Command} \n(%{DATA}\n)?type=SYSCALL %{DATA} auid=%{USERNAME:Alert.Source.User.Ident} %{GREEDYDATA}" }
37 | match => { "message" => "%{DATA}type=EXECVE msg=audit\(%{DATESTAMP}:%{NUMBER}\) \: argc=%{NUMBER} %{DATA:Alert.Source.Command} \n(%{DATA}\n)?type=SYSCALL %{DATA} auid=%{USERNAME:auid} uid=%{USERNAME:uid} %{GREEDYDATA}" }
38 |
39 |
40 | }
41 |
42 | mutate {
43 | add_field => [ "Alert.Sensor.Node.Name", "%{host}" ]
44 | gsub => [ "Alert.Source.Command", "a\d=", "" ]
45 | }
46 |
47 | if [auid] == "unset" {
48 | mutate {add_field => [ "Alert.Source.User.Ident", "%{uid}" ]}
49 | }
50 | else {
51 | mutate {add_field => [ "Alert.Source.User.Ident", "%{auid}" ]}
52 | }
53 |
54 | }
55 |
56 | }
57 |
--------------------------------------------------------------------------------
/roles/elk/files/example-pki-scripts/gen_client_node_cert.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 | CLIENT_NAME=$1
4 |
5 | if [ -z "$3" ] ; then
6 | unset CA_PASS KS_PASS
7 | read -p "Enter CA pass: " -s CA_PASS ; echo
8 | read -p "Enter Keystore pass: " -s KS_PASS ; echo
9 | else
10 | KS_PASS=$2
11 | CA_PASS=$3
12 | fi
13 |
14 | rm -f $CLIENT_NAME-keystore.jks
15 | rm -f $CLIENT_NAME.csr
16 | rm -f $CLIENT_NAME-signed.pem
17 |
18 | echo Generating keystore and certificate for node $CLIENT_NAME
19 |
20 | keytool -genkey \
21 | -alias $CLIENT_NAME \
22 | -keystore $CLIENT_NAME-keystore.jks \
23 | -keyalg RSA \
24 | -keysize 2048 \
25 | -validity 712 \
26 | -keypass $KS_PASS \
27 | -storepass $KS_PASS \
28 | -dname "CN=$CLIENT_NAME, OU=SSL, O=Test, L=Test, C=DE"
29 |
30 | echo Generating certificate signing request for node $CLIENT_NAME
31 |
32 | keytool -certreq \
33 | -alias $CLIENT_NAME \
34 | -keystore $CLIENT_NAME-keystore.jks \
35 | -file $CLIENT_NAME.csr \
36 | -keyalg rsa \
37 | -keypass $KS_PASS \
38 | -storepass $KS_PASS \
39 | -dname "CN=$CLIENT_NAME, OU=client, O=client, L=Test, C=DE"
40 |
41 | echo Sign certificate request with CA
42 | openssl ca \
43 | -in $CLIENT_NAME.csr \
44 | -notext \
45 | -out $CLIENT_NAME-signed.pem \
46 | -config etc/signing-ca.conf \
47 | -extensions v3_req \
48 | -batch \
49 | -passin pass:$CA_PASS \
50 | -extensions server_ext
51 |
52 | echo "Import back to keystore (including CA chain)"
53 |
54 | cat ca/chain-ca.pem $CLIENT_NAME-signed.pem | keytool \
55 | -importcert \
56 | -keystore $CLIENT_NAME-keystore.jks \
57 | -storepass $KS_PASS \
58 | -noprompt \
59 | -alias $CLIENT_NAME
60 |
61 | keytool -importkeystore -srckeystore $CLIENT_NAME-keystore.jks -srcstorepass $KS_PASS -srcstoretype JKS -deststoretype PKCS12 -deststorepass $KS_PASS -destkeystore $CLIENT_NAME-keystore.p12
62 |
63 | openssl pkcs12 -in $CLIENT_NAME-keystore.p12 -out $CLIENT_NAME.key.pem -nocerts -nodes -passin pass:$KS_PASS
64 | openssl pkcs12 -in $CLIENT_NAME-keystore.p12 -out $CLIENT_NAME.crt.pem -clcerts -nokeys -passin pass:$KS_PASS
65 |
66 | echo All done for $CLIENT_NAME
67 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: ruby
2 | sudo: required
3 | env:
4 | - CONTAINER_ID=$(mktemp)
5 |
6 | rvm:
7 | - "1.9.3"
8 |
9 | services:
10 | - docker
11 |
12 | before_install:
13 | - sudo apt-get update
14 | # Pull containers
15 | - sudo docker pull centos:7
16 | # Customize containers
17 | - sudo docker build --rm=true --file=test/Dockerfile.centos --tag=centos:ansible .
18 | - gem install jls-grok
19 |
20 | script:
21 | - ruby test/test.rb
22 | # Run container in detached state
23 | - sudo docker run --detach --privileged --volume="${PWD}":/etc/ansible/lightsiem:ro --volume=/sys/fs/cgroup:/sys/fs/cgroup:ro centos:ansible /usr/lib/systemd/systemd > /tmp/container_id
24 | # Check syntax of ansible playbook
25 | - sudo docker exec "$(cat /tmp/container_id)" ansible-playbook /etc/ansible/lightsiem/lightsiem-install.yml --syntax-check
26 | # Run ansible playbook
27 | - sudo docker exec "$(cat /tmp/container_id)" ansible-playbook /etc/ansible/lightsiem/lightsiem-install.yml
28 | - sudo docker exec "$(cat /tmp/container_id)" /opt/logstash/bin/logstash -f /etc/logstash/conf.d/ &
29 | - sleep 60
30 |
31 |
32 | - sudo docker exec "$(cat /tmp/container_id)" netstat -lntup
33 | # - sudo docker exec "$(cat /tmp/container_id)" tail /var/log/logstash/logstash.*
34 | # - sudo docker exec "$(cat /tmp/container_id)" /etc/init.d/logstash status
35 |
36 | # - sleep 100
37 | # - sudo docker exec "$(cat /tmp/container_id)" whereis java
38 | # - sudo docker exec "$(cat /tmp/container_id)" export
39 | #Logstash Cisco
40 | - sudo docker exec "$(cat /tmp/container_id)" netstat -lnup | grep "9020"
41 | #Logstash Ossec
42 | - sudo docker exec "$(cat /tmp/container_id)" netstat -lnup | grep "9000"
43 | - sudo docker exec "$(cat /tmp/container_id)" netstat -lntp | grep "9001"
44 | #Logstash snort
45 | - sudo docker exec "$(cat /tmp/container_id)" netstat -lntp | grep "9010"
46 | #Logstash Cisco
47 | - sudo docker exec "$(cat /tmp/container_id)" netstat -lnup | grep "9999"
48 | #Elastcisearch
49 | - sudo docker exec "$(cat /tmp/container_id)" netstat -lntp | grep "127.0.0.1:9200"
50 | - sudo docker exec "$(cat /tmp/container_id)" netstat -lntp | grep "127.0.0.1:9300"
51 | #Kibana
52 | - sudo docker exec "$(cat /tmp/container_id)" netstat -lntp | grep "5601"
53 | #Dnsmasq
54 | - sudo docker exec "$(cat /tmp/container_id)" netstat -lnup | grep "127.0.0.1:53"
55 |
56 | # Clean up
57 | - sudo docker stop "$(cat /tmp/container_id)"
58 |
59 | notifications:
60 | email:
61 | on_failure: always
62 | on_success: change
63 |
--------------------------------------------------------------------------------
/roles/elk/files/logstash-conf.d/95-common.conf:
--------------------------------------------------------------------------------
1 | filter
2 | {
3 |
4 | geoip {
5 | source => "[Alert][Source][Node][Address]"
6 | target => "[Alert][Source][Node][Geoip]"
7 | }
8 |
9 | mutate {
10 | remove_field => ["[Alert][Source][Node][Geoip][real_region_name]"]
11 | remove_field => ["[Alert][Source][Node][Geoip][region_name]"]
12 | remove_field => ["[Alert][Source][Node][Geoip][timezone]"]
13 | remove_field => ["[Alert][Source][Node][Geoip][latitude]"]
14 | remove_field => ["[Alert][Source][Node][Geoip][longitude]"]
15 | remove_field => ["[Alert][Source][Node][Geoip][postal_code]"]
16 | remove_field => ["[Alert][Source][Node][Geoip][dma_code]"]
17 | remove_field => ["[Alert][Source][Node][Geoip][area_code]"]
18 | }
19 |
20 | geoip {
21 | source => "[Alert][Target][Node][Address]"
22 | target => "[Alert][Target][Node][Geoip]"
23 | }
24 |
25 | mutate {
26 | remove_field => ["[Alert][Target][Node][Geoip][real_region_name]"]
27 | remove_field => ["[Alert][Target][Node][Geoip][region_name]"]
28 | remove_field => ["[Alert][Target][Node][Geoip][timezone]"]
29 | remove_field => ["[Alert][Target][Node][Geoip][latitude]"]
30 | remove_field => ["[Alert][Target][Node][Geoip][longitude]"]
31 | remove_field => ["[Alert][Target][Node][Geoip][postal_code]"]
32 | remove_field => ["[Alert][Target][Node][Geoip][dma_code]"]
33 | remove_field => ["[Alert][Target][Node][Geoip][area_code]"]
34 | }
35 |
36 | if ![Alert][Sensor][Node][Name]
37 | { mutate { add_field => [ "[Alert][Sensor][Node][Name]", "%{[Alert][Sensor][Node][Address]}" ] } }
38 |
39 |
40 | if ![Alert][Source][Node][Name] and [Alert][Source][Node][Address]
41 | {
42 | mutate { add_field => [ "[Alert][Source][Node][Name]", "%{[Alert][Source][Node][Address]}" ] }
43 |
44 | dns {
45 | reverse => [ "[Alert][Source][Node][Name]"]
46 | action => "replace"
47 | nameserver => "127.0.0.1"
48 | }
49 | }
50 |
51 | if ![Alert][Target][Node][Name] and [Alert][Target][Node][Address]
52 | {
53 | mutate { add_field => [ "[Alert][Target][Node][Name]", "%{[Alert][Target][Node][Address]}" ] }
54 |
55 | dns {
56 | reverse => [ "[Alert][Target][Node][Name]"]
57 | action => "replace"
58 | nameserver => "127.0.0.1"
59 | }
60 | }
61 |
62 | mutate
63 | {
64 | remove_field => ["syslog5424_pri"]
65 | remove_field => ["timestamp"]
66 | remove_field => ["host"]
67 | add_field => { "[Alert][ReceiveTime]" => "%{@timestamp}" }
68 | #remove_field => [ "message" ]
69 | }
70 |
71 | #date {
72 | #Apr 19 11:55:57
73 | # match => ["@timestamp", "UNIX"]
74 | # target => "[Alert][ReceiveTime]"
75 | #}
76 |
77 |
78 | }
--------------------------------------------------------------------------------
/test/test.rb:
--------------------------------------------------------------------------------
1 | require 'rubygems'
2 | require 'minitest/autorun'
3 | require 'grok-pure'
4 | require 'yaml'
5 |
6 | # Test suite runner for grok patterns
7 | #
8 | # Author:: Tom Hendrikx
9 | # License:: New (3-clause) BSD license
10 |
11 | # This class tests grok patterns, mainly known from logstash.
12 | # It creates test cases from all yaml files in the current
13 | # directory.
14 |
15 | class TestGrokPatterns < MiniTest::Unit::TestCase
16 |
17 | @@test_dir = File.dirname(__FILE__)
18 | @@tests_dir = File.dirname(__FILE__) + '/pattern_tests'
19 | @@upstream_pattern_dir = @@test_dir + '/logstash-patterns-core/patterns'
20 | @@local_pattern_dir = File.dirname(File.expand_path(@@test_dir))
21 | @@local_pattern_dir = @@local_pattern_dir + '/roles/elk/files/logstash-patterns'
22 |
23 |
24 | # Prepare a grok object.
25 | #
26 | # Adds the available upstream and local grok pattern files to
27 | # a new grok object, so it's ready for being used in a test.
28 |
29 | def setup
30 | @grok = Grok.new
31 | Dir.new(@@upstream_pattern_dir).each do |file|
32 | next if file =~ /^\./
33 |
34 | @grok.add_patterns_from_file(@@upstream_pattern_dir + '/' + file)
35 | end
36 | Dir.new(@@local_pattern_dir).each do |file|
37 | next if file !~ /\.pattern$/
38 | @grok.add_patterns_from_file(@@local_pattern_dir + '/' + file)
39 | end
40 | end
41 |
42 | # Test a grok pattern.
43 | #
44 | # The following things are checked:
45 | # - the given pattern name can be compiled using the grok regex library
46 | # - the given data can be parsed using the pattern
47 | # - the given results actually appear in the regex captures.
48 | #
49 | # Arguments:
50 | # pattern:: A pattern name that occurs in the loaded pattern files
51 | # data:: Input data that should be grokked, f.i. a log line
52 | # results:: A list of named captures and their expected contents
53 | def grok_pattern_tester(pattern, data, results)
54 | assert @grok.compile("%{" + pattern + "}", false), "Failed to compile pattern #{pattern}"
55 | assert matches = @grok.match(data), "Pattern #{pattern} did not match data."
56 |
57 | refute_equal results, nil, "Test case is flawed, no results are defined"
58 | captures = matches.captures()
59 | results.each do |field, expected|
60 | assert_includes captures.keys, field
61 | assert_includes captures[field], expected.to_s, "Missing expected data in field '#{field}'"
62 | end
63 | end
64 |
65 | # collect all tests
66 | tests = Hash.new()
67 | Dir.new(@@tests_dir).each do |file|
68 | next if file !~ /\.yaml$/
69 | test = File.basename(file, '.yaml')
70 | puts "#{file}"
71 | conf = YAML.load(File.read(@@tests_dir + '/' + file))
72 | tests[test] = conf
73 | end
74 |
75 | # define test methods for all collected tests
76 | tests.each do |name, conf|
77 | define_method("test_#{name}") do
78 | grok_pattern_tester(conf['pattern'], conf['data'], conf['results'])
79 | end
80 | end
81 | end
82 |
--------------------------------------------------------------------------------
/roles/elk/templates/kibana.yml.j2:
--------------------------------------------------------------------------------
1 | # Kibana is served by a back end server. This controls which port to use.
2 | server.port: 5601
3 |
4 | # The host to bind the server to.
5 | server.host: "0.0.0.0"
6 |
7 | # If you are running kibana behind a proxy, and want to mount it at a path,
8 | # specify that path here. The basePath can't end in a slash.
9 | # server.basePath: ""
10 |
11 | # The Elasticsearch instance to use for all your queries.
12 | elasticsearch.url: "https://localhost:9200"
13 |
14 | # preserve_elasticsearch_host true will send the hostname specified in `elasticsearch`. If you set it to false,
15 | # then the host you use to connect to *this* Kibana instance will be sent.
16 | # elasticsearch.preserveHost: true
17 |
18 | # Kibana uses an index in Elasticsearch to store saved searches, visualizations
19 | # and dashboards. It will create a new index if it doesn't already exist.
20 | # kibana.index: ".kibana"
21 |
22 | # The default application to load.
23 | # kibana.defaultAppId: "discover"
24 |
25 | # If your Elasticsearch is protected with basic auth, these are the user credentials
26 | # used by the Kibana server to perform maintenance on the kibana_index at startup. Your Kibana
27 | # users will still need to authenticate with Elasticsearch (which is proxied through
28 | # the Kibana server)
29 | elasticsearch.username: "usr_kibana"
30 | elasticsearch.password: "2FgeR37e1"
31 |
32 | # SSL for outgoing requests from the Kibana Server to the browser (PEM formatted)
33 | # server.ssl.cert: /path/to/your/server.crt
34 | # server.ssl.key: /path/to/your/server.key
35 |
36 | # Optional setting to validate that your Elasticsearch backend uses the same key files (PEM formatted)
37 | # elasticsearch.ssl.cert: /path/to/your/client.crt
38 | # elasticsearch.ssl.key: /path/to/your/client.key
39 |
40 | # If you need to provide a CA certificate for your Elasticsearch instance, put
41 | # the path of the pem file here.
42 | # elasticsearch.ssl.ca: /path/to/your/CA.pem
43 |
44 | # Set to false to have a complete disregard for the validity of the SSL
45 | # certificate.
46 | elasticsearch.ssl.verify: false
47 |
48 | # Time in milliseconds to wait for elasticsearch to respond to pings, defaults to
49 | # request_timeout setting
50 | # elasticsearch.pingTimeout: 1500
51 |
52 | # Time in milliseconds to wait for responses from the back end or elasticsearch.
53 | # This must be > 0
54 | # elasticsearch.requestTimeout: 300000
55 |
56 | # Time in milliseconds for Elasticsearch to wait for responses from shards.
57 | # Set to 0 to disable.
58 | # elasticsearch.shardTimeout: 0
59 |
60 | # Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying
61 | # elasticsearch.startupTimeout: 5000
62 |
63 | # Set the path to where you would like the process id file to be created.
64 | # pid.file: /var/run/kibana.pid
65 |
66 | # If you would like to send the log output to a file you can set the path below.
67 | # logging.dest: stdout
68 |
69 | # Set this to true to suppress all logging output.
70 | # logging.silent: false
71 |
72 | # Set this to true to suppress all logging output except for error messages.
73 | # logging.quiet: false
74 |
75 | # Set this to true to log all events, including system usage information and all requests.
76 | # logging.verbose: false
77 |
78 | searchguard.cookie.password: "{{ lookup('password', 'chars=ascii_letters,digits,hexdigits,punctuation length=32') }}"
79 |
--------------------------------------------------------------------------------
/roles/elk/files/logstash-patterns/snort.pattern:
--------------------------------------------------------------------------------
1 | SNORT_MESSAGE (%{SYSLOG5424BASE}|%{SYSLOGBASE2}) \[%{DATA:[Alert][Classification][Ident]}\] %{DATA:[Alert][Classification][Text]} \[Classification: %{DATA:[Alert][Group][Text]}\] \[Priority: %{BASE10NUM:[Alert][Analyzer][Level][Origin]}\] \{%{DATA:[Alert][Protocol]}\} %{IPORHOST:[Alert][Source][Node][Address]}(?::%{NONNEGINT:[Alert][Source][Node][Port]})? -> %{IPORHOST:[Alert][Target][Node][Address]}(?::%{NONNEGINT:[Alert][Target][Node][Port]})?
2 |
3 | SNORT_S5_ERROR (%{SYSLOG5424BASE}|%{SYSLOGBASE2}) %{DATA:[Alert][Classification][Ident]}: %{DATA:[Alert][Classification][Text]} %{IPORHOST:[Alert][Source][Node][Address]} %{NONNEGINT:[Alert][Source][Node][Port]} --> %{IPORHOST:[Alert][Target][Node][Address]}
4 |
5 | SNORT_DATE %{MONTHNUM}/%{MONTHDAY}-%{TIME}
6 | SNORT_MESSAGE_LINE1 \[\*\*\] \[%{DATA:[Alert][Classification][Ident]}\] %{DATA:[Alert][Classification][Text]} \[\*\*\]
7 | SNORT_MESSAGE_LINE2 \[Classification: %{DATA:[Alert][Group][Text]}\] \[Priority: %{BASE10NUM:[Alert][Analyzer][Level][Origin]}\]
8 | SNORT_MESSAGE_LINE3 %{DATA:Snort_date} %{IPORHOST:[Alert][Source][Node][Address]}(?::%{NONNEGINT:[Alert][Source][Node][Port]})? -> %{IPORHOST:[Alert][Target][Node][Address]}(?::%{NONNEGINT:[Alert][Target][Node][Port]})?
9 | SNORT_MESSAGE_LINES_CAPTURE %{GREEDYDATA:[Alert][Additional][Capture]}
10 | SNORT_MESSAGE_LINE_REFERENCE \[Xref =\> %{DATA:[Alert][Classification][Reference]}\]
11 | SNORT_MESSAGE_FULL (?m)%{SNORT_MESSAGE_LINE1}[.\r\n]%{SNORT_MESSAGE_LINE2}[.\r\n]*%{SNORT_MESSAGE_LINE3}[.\r\n](%{SNORT_MESSAGE_LINES_CAPTURE}%{SNORT_MESSAGE_LINE_REFERENCE}|%{SNORT_MESSAGE_LINES_CAPTURE})
12 |
13 |
14 | #Use this line in barnyrd.conf output log_syslog_full: sensor_name snortIds1-eth2, server 192.168.1.15, protocol tcp, port 9012, operation_mode complete, payload_encoding ascii
15 | #|| [SNORTIDS[LOG]: [snortIds1-eth2] ] || 2015-06-20 13:46:27.541+003 2 [129:12:1] stream5: TCP Small Segment Threshold Exceeded || bad-unknown || 6 2.8.1.2 2.8.1.1 4 20 0 81 45056 2 0 1831 0 || 43395 9011 3638166799 3232151186 8 0 24 137 5660 0 || 95 ..>..K.P.Y....E..Q..@.@..'... ......#3....................-L..-L.$.....t...SH..iOC~.K`..s=..... ||
16 |
17 | SNORT_BARNYARD_MESSAGE \| %{SNORT_BARNYARD_HEADER} \|\| %{SNORT_BARNYARD_ALERT} \|\| %{SNORT_BARNYARD_GROUP} \|\| %{SNORT_BARNYARD_IPHEADER} \|\| (?:%{GREEDYDATA:[Snort][Transport][Header]} \|\| )?%{DATA:[Alert][Additional][Capture]} \|\| %{GREEDYDATA}
18 |
19 | #[SNORTIDS[LOG]: [snortIds1-eth2] ]
20 | SNORT_BARNYARD_HEADER \[SNORTIDS\[LOG\]: \[%{DATA:[Alert][Sensor][Node][Name]}\] \]
21 |
22 | #2015-06-20 13:46:27.541+003 2 [129:12:1] stream5: TCP Small Segment Threshold Exceeded
23 | SNORT_BARNYARD_ALERT %{TIMESTAMP_ISO8601:[syslog5424_ts]} %{BASE10NUM:[Alert][Analyzer][Level][Origin]} \[%{DATA:[Alert][Classification][Ident]}\] (?:%{DATA:[Alert][Classification][Text]})?
24 |
25 | #bad-unknown
26 | SNORT_BARNYARD_GROUP %{DATA:[Alert][Group][Text]}
27 |
28 | #6 2.8.1.2 2.8.1.1 4 20 0 81 45056 2 0 1831 0
29 | SNORT_BARNYARD_IPHEADER %{BASE10NUM:[Alert][Protocol][Number]} %{IPORHOST:[Alert][Source][Node][Address]} %{IPORHOST:[Alert][Target][Node][Address]} %{DATA:[Alert][Protocol][Ip][Options]}
30 |
31 | #43395 9011 3638166799 3232151186 8 0 24 137 5660 0
32 | SNORT_BARNYARD_TCPHEADER %{NONNEGINT:[Alert][Source][Node][Port]} %{NONNEGINT:[Alert][Target][Node][Port]} %{DATA:[Alert][Protocol][Tcp][Options]}
33 |
34 | #35691 60000 75 44849
35 | SNORT_BARNYARD_UDPHEADER %{NONNEGINT:[Alert][Source][Node][Port]} %{NONNEGINT:[Alert][Target][Node][Port]} %{DATA:[Alert][Protocol][Udp][Options]}
36 |
37 | SNORT_BARNYARD_ICMPHEADER %{NONNEGINT:[Alert][Source][Icmp][Type]} %{NONNEGINT:[Alert][Source][Icmp][Code]} %{DATA:[Alert][Protocol][Icmp][Options]}
38 |
39 | #default protocol header
40 | SNORT_BARNYARD_DEFAULTHEADER %{GREEDYDATA:[Alert][Header]}
41 |
--------------------------------------------------------------------------------
/roles/elk/files/example-pki-scripts/etc/root-ca.conf:
--------------------------------------------------------------------------------
1 | # Simple Root CA
2 |
3 | # The [default] section contains global constants that can be referred to from
4 | # the entire configuration file. It may also hold settings pertaining to more
5 | # than one openssl command.
6 |
7 | [ default ]
8 | ca = root-ca # CA name
9 | dir = . # Top dir
10 |
11 | # The next part of the configuration file is used by the openssl req command.
12 | # It defines the CA's key pair, its DN, and the desired extensions for the CA
13 | # certificate.
14 |
15 | [ req ]
16 | default_bits = 2048 # RSA key size
17 | encrypt_key = yes # Protect private key
18 | default_md = sha1 # MD to use
19 | utf8 = yes # Input is UTF-8
20 | string_mask = utf8only # Emit UTF-8 strings
21 | prompt = no # Don't prompt for DN
22 | distinguished_name = ca_dn # DN section
23 | req_extensions = ca_reqext # Desired extensions
24 |
25 | [ ca_dn ]
26 | 0.domainComponent = "com"
27 | 1.domainComponent = "example"
28 | organizationName = "Example Com Inc."
29 | organizationalUnitName = "Example Com Inc. Root CA"
30 | commonName = "Example Com Inc. Root CA"
31 |
32 | [ ca_reqext ]
33 | keyUsage = critical,keyCertSign,cRLSign
34 | basicConstraints = critical,CA:true
35 | subjectKeyIdentifier = hash
36 |
37 | # The remainder of the configuration file is used by the openssl ca command.
38 | # The CA section defines the locations of CA assets, as well as the policies
39 | # applying to the CA.
40 |
41 | [ ca ]
42 | default_ca = root_ca # The default CA section
43 |
44 | [ root_ca ]
45 | certificate = $dir/ca/$ca.crt # The CA cert
46 | private_key = $dir/ca/$ca/private/$ca.key # CA private key
47 | new_certs_dir = $dir/ca/$ca # Certificate archive
48 | serial = $dir/ca/$ca/db/$ca.crt.srl # Serial number file
49 | crlnumber = $dir/ca/$ca/db/$ca.crl.srl # CRL number file
50 | database = $dir/ca/$ca/db/$ca.db # Index file
51 | unique_subject = no # Require unique subject
52 | default_days = 3652 # How long to certify for
53 | default_md = sha1 # MD to use
54 | policy = any_pol # Default naming policy
55 | email_in_dn = no # Add email to cert DN
56 | preserve = no # Keep passed DN ordering
57 | name_opt = ca_default # Subject DN display options
58 | cert_opt = ca_default # Certificate display options
59 | copy_extensions = copy # Copy extensions from CSR
60 | x509_extensions = signing_ca_ext # Default cert extensions
61 | default_crl_days = 365 # How long before next CRL
62 | crl_extensions = crl_ext # CRL extensions
63 |
64 | # Naming policies control which parts of a DN end up in the certificate and
65 | # under what circumstances certification should be denied.
66 |
67 | [ match_pol ]
68 | domainComponent = match # Must match 'simple.org'
69 | organizationName = match # Must match 'Simple Inc'
70 | organizationalUnitName = optional # Included if present
71 | commonName = supplied # Must be present
72 |
73 | [ any_pol ]
74 | domainComponent = optional
75 | countryName = optional
76 | stateOrProvinceName = optional
77 | localityName = optional
78 | organizationName = optional
79 | organizationalUnitName = optional
80 | commonName = optional
81 | emailAddress = optional
82 |
83 | # Certificate extensions define what types of certificates the CA is able to
84 | # create.
85 |
86 | [ root_ca_ext ]
87 | keyUsage = critical,keyCertSign,cRLSign
88 | basicConstraints = critical,CA:true
89 | subjectKeyIdentifier = hash
90 | authorityKeyIdentifier = keyid:always
91 |
92 | [ signing_ca_ext ]
93 | keyUsage = critical,keyCertSign,cRLSign
94 | basicConstraints = critical,CA:true,pathlen:0
95 | subjectKeyIdentifier = hash
96 | authorityKeyIdentifier = keyid:always
97 |
98 | # CRL extensions exist solely to point to the CA certificate that has issued
99 | # the CRL.
100 |
101 | [ crl_ext ]
102 | authorityKeyIdentifier = keyid:always
103 |
--------------------------------------------------------------------------------
/roles/elk/files/example-pki-scripts/etc/signing-ca.conf:
--------------------------------------------------------------------------------
1 | # Simple Signing CA
2 |
3 | # The [default] section contains global constants that can be referred to from
4 | # the entire configuration file. It may also hold settings pertaining to more
5 | # than one openssl command.
6 |
7 | [ default ]
8 | ca = signing-ca # CA name
9 | dir = . # Top dir
10 |
11 | # The next part of the configuration file is used by the openssl req command.
12 | # It defines the CA's key pair, its DN, and the desired extensions for the CA
13 | # certificate.
14 |
15 | [ req ]
16 | default_bits = 2048 # RSA key size
17 | encrypt_key = yes # Protect private key
18 | default_md = sha1 # MD to use
19 | utf8 = yes # Input is UTF-8
20 | string_mask = utf8only # Emit UTF-8 strings
21 | prompt = no # Don't prompt for DN
22 | distinguished_name = ca_dn # DN section
23 | req_extensions = ca_reqext # Desired extensions
24 |
25 | [ ca_dn ]
26 | 0.domainComponent = "com"
27 | 1.domainComponent = "example"
28 | organizationName = "Example Com Inc."
29 | organizationalUnitName = "Example Com Inc. Signing CA"
30 | commonName = "Example Com Inc. Signing CA"
31 |
32 | [ ca_reqext ]
33 | keyUsage = critical,keyCertSign,cRLSign
34 | basicConstraints = critical,CA:true,pathlen:0
35 | subjectKeyIdentifier = hash
36 |
37 | # The remainder of the configuration file is used by the openssl ca command.
38 | # The CA section defines the locations of CA assets, as well as the policies
39 | # applying to the CA.
40 |
41 | [ ca ]
42 | default_ca = signing_ca # The default CA section
43 |
44 | [ signing_ca ]
45 | certificate = $dir/ca/$ca.crt # The CA cert
46 | private_key = $dir/ca/$ca/private/$ca.key # CA private key
47 | new_certs_dir = $dir/ca/$ca # Certificate archive
48 | serial = $dir/ca/$ca/db/$ca.crt.srl # Serial number file
49 | crlnumber = $dir/ca/$ca/db/$ca.crl.srl # CRL number file
50 | database = $dir/ca/$ca/db/$ca.db # Index file
51 | unique_subject = no # Require unique subject
52 | default_days = 730 # How long to certify for
53 | default_md = sha1 # MD to use
54 | policy = any_pol # Default naming policy
55 | email_in_dn = no # Add email to cert DN
56 | preserve = no # Keep passed DN ordering
57 | name_opt = ca_default # Subject DN display options
58 | cert_opt = ca_default # Certificate display options
59 | copy_extensions = copy # Copy extensions from CSR
60 | x509_extensions = client_ext # Default cert extensions
61 | default_crl_days = 7 # How long before next CRL
62 | crl_extensions = crl_ext # CRL extensions
63 |
64 | # Naming policies control which parts of a DN end up in the certificate and
65 | # under what circumstances certification should be denied.
66 |
67 | [ match_pol ]
68 | domainComponent = match # Must match 'simple.org'
69 | organizationName = match # Must match 'Simple Inc'
70 | organizationalUnitName = optional # Included if present
71 | commonName = supplied # Must be present
72 |
73 | [ any_pol ]
74 | domainComponent = optional
75 | countryName = optional
76 | stateOrProvinceName = optional
77 | localityName = optional
78 | organizationName = optional
79 | organizationalUnitName = optional
80 | commonName = optional
81 | emailAddress = optional
82 |
83 | # Certificate extensions define what types of certificates the CA is able to
84 | # create.
85 |
86 | [ client_ext ]
87 | keyUsage = critical,digitalSignature,keyEncipherment
88 | basicConstraints = CA:false
89 | extendedKeyUsage = clientAuth
90 | subjectKeyIdentifier = hash
91 | authorityKeyIdentifier = keyid:always
92 |
93 | [ server_ext ]
94 | keyUsage = critical,digitalSignature,keyEncipherment
95 | basicConstraints = CA:false
96 | extendedKeyUsage = serverAuth,clientAuth
97 | subjectKeyIdentifier = hash
98 | authorityKeyIdentifier = keyid:always
99 |
100 | # CRL extensions exist solely to point to the CA certificate that has issued
101 | # the CRL.
102 |
103 | [ crl_ext ]
104 | authorityKeyIdentifier = keyid:always
105 |
--------------------------------------------------------------------------------
/roles/elk/files/logstash-conf.d/20-ossec.conf:
--------------------------------------------------------------------------------
1 | input {
2 | udp {
3 | port => 9000
4 | type => ossec
5 | #For users, who experiencing problems with encoding. Uncomment line bellow and place apropriate charset.
6 | #codec => plain { charset => "Windows-1251" }
7 | }
8 |
9 | #Stdin input for debug purposes
10 | # stdin {
11 | # type => ossec_full
12 |
13 | # codec => multiline {
14 | # pattern => "^\*\*"
15 | # negate => true
16 | # what => "previous"
17 | #For users, who experiencing problems with encoding. Uncomment line bellow and place apropriate charset.
18 | #codec => plain { charset => "Windows-1251" }
19 | # }
20 | #}
21 | #Input for filebeat
22 | beats {
23 | port => 9001
24 | type => ossec_full
25 |
26 | }
27 |
28 |
29 | }
30 |
31 |
32 | filter {
33 | if [type] == "ossec_full"
34 | {
35 |
36 | grok {
37 | #** Alert 1433185454.285: - pam,syslog,authentication_success,
38 | patterns_dir => ["/opt/lightsiem/patterns/"]
39 | match => { "message" => "%{OSSEC_MESSAGE_FULL}" }
40 | }
41 |
42 | date {
43 | match => ["[Alert][CreateTime]", "UNIX"]
44 | target => "[Alert][CreateTime]"
45 | }
46 | mutate {
47 | add_field => [ "[Alert][Analyzer][Name]", "ossec" ]
48 | add_field => [ "[Alert][Analyzer][Node][Name]", "%{host}" ]
49 | }
50 | }
51 |
52 | # if [type] == "ossec" {
53 |
54 | # grok {
55 | # match => { "message" => "%{IDMEF_MESSAGE}" }
56 | # }
57 |
58 | # if ![syslogbase] { mutate { add_field => { "syslogbase" => "" } } }
59 | # if ![program] { mutate { add_field => { "program" => "" } } }
60 | # if ![timestamp] { mutate { add_field => { "timestamp" => "" } } }
61 |
62 | # date {
63 | # #Apr 19 11:55:57
64 | # match => ["Alert.CreateTime", "MMM dd HH:mm:ss", "MMM d HH:mm:ss"]
65 | # target => "Alert.CreateTime"
66 | # }
67 |
68 | # mutate {
69 | # add_field => [ "Alert.Analyzer.Node.Address", "%{host}" ]
70 |
71 | # remove_field => [ "type" ]
72 | # remove_field => [ "logsource" ]
73 | # add_field => [ "Alert.Source.Process.Pid", "%{pid}" ]
74 | # remove_field => [ "pid" ]
75 | # add_field => [ "Alert.Source.Process.Name", "%{program}" ]
76 | # remove_field => [ "program" ]
77 | # add_field => [ "Alert.Analyzer.rawmessage", "%{syslogbase} %{syslog_message}" ]
78 | # add_field => [ "Alert.DetectTime", "%{timestamp}" ]
79 |
80 |
81 |
82 | # remove_field => [ "syslog_message" ]
83 | # remove_field => [ "syslogbase" ]
84 |
85 | # }
86 | # }
87 |
88 | if [type] in ["ossec", "ossec_full"]
89 | {
90 |
91 | # Drop message "< Connection to 1.2.3.4 closed by remote host."
92 |
93 | if [Alert][Classification][Ident] == "555" {
94 | if ([Alert][Analyzer][OSSEC][Diff] =~ "^[\>\<] Connection to [0-9.-]+ closed by remote host.$") {
95 | drop { }
96 | }
97 | }
98 | #Apr 19 11:55:57
99 | if [Alert][Analyzer][Level][Origin] == "1" {
100 | mutate { add_field => [ "[Alert][Analyzer][Level][Normalized]", "1" ] }
101 | } else if [Alert][Analyzer][Level][Origin] == "2" {
102 | mutate { add_field => [ "[Alert][Analyzer][Level][Normalized]", "2" ] }
103 | } else if [Alert][Analyzer][Level][Origin] == "3" {
104 | mutate { add_field => [ "[Alert][Analyzer][Level][Normalized]", "3" ] }
105 | } else if [Alert][Analyzer][Level][Origin] == "4" {
106 | mutate { add_field => [ "[Alert][Analyzer][Level][Normalized]", "4" ] }
107 | } else if [Alert][Analyzer][Level][Origin] == "5" {
108 | mutate { add_field => [ "[Alert][Analyzer][Level][Normalized]", "5" ] }
109 | } else if [Alert][Analyzer][Level][Origin] == "6" {
110 | mutate { add_field => [ "[Alert][Analyzer][Level][Normalized]", "6" ] }
111 | } else if [Alert][Analyzer][Level][Origin] == "7" {
112 | mutate { add_field => [ "[Alert][Analyzer][Level][Normalized]", "7" ] }
113 | }
114 |
115 | mutate {
116 | convert => [ "[Alert][Analyzer][Level][Origin]", "integer" ]
117 | convert => [ "[Alert][Analyzer][Level][Normalized]", "integer" ]
118 | }
119 |
120 |
121 | }
122 | }
123 |
--------------------------------------------------------------------------------
/roles/elk/files/logstash-conf.d/60-netflow.conf:
--------------------------------------------------------------------------------
1 | input {
2 | udp {
3 | port => 9999
4 | type => netflow
5 | codec => netflow {
6 | versions => [9]
7 | }
8 | }
9 | }
10 | filter {
11 | if [type] == "netflow"
12 | {
13 | mutate {
14 | add_field => [ "[Alert][Analyzer][Netflow][Flow_ID]", "%{[netflow][ipv4_src_addr]}:%{[netflow][l4_src_port]}=>%{[netflow][ipv4_dst_addr]}:%{[netflow][l4_dst_port]}" ]
15 | }
16 |
17 | # netflow protocol => InProtocol
18 | mutate {
19 | convert => [ "[netflow][protocol]", "string" ]
20 | }
21 |
22 | if [netflow][protocol] == "6" {
23 | mutate { add_field => [ "[Alert][Analyzer][Netflow][InProtocol]", "TCP" ] }
24 | } else if [netflow][protocol] == "17" {
25 | mutate { add_field => [ "[Alert][Analyzer][Netflow][InProtocol]", "UDP" ] }
26 | } else if [netflow][protocol] == "1" {
27 | mutate { add_field => [ "[Alert][Analyzer][Netflow][InProtocol]", "ICMP" ] }
28 | } else if [netflow][protocol] == "112" {
29 | mutate { add_field => [ "[Alert][Analyzer][Netflow][InProtocol]", "VRRP" ] }
30 | }
31 |
32 | # mutate {
33 | # remove_field => [ "[netflow][protocol]" ]
34 | # }
35 | #
36 | # Alert.Source.Node
37 | mutate {
38 | add_field => [ "[Alert][Source][Node][Address]", "%{[netflow][ipv4_src_addr]}" ]
39 | add_field => [ "[Alert][Source][Node][Port]", "%{[netflow][l4_src_port]}" ]
40 | add_field => [ "[Alert][Source][Node][AS]", "%{[netflow][src_as]}" ]
41 | add_field => [ "[Alert][Source][Node][Mask]", "%{[netflow][src_mask]}" ]
42 |
43 | convert => [ "[Alert][Source][Node][AS]", "integer" ]
44 | remove_field => [ "[netflow][ipv4_src_addr]" ]
45 | remove_field => [ "[netflow][l4_src_port]" ]
46 | remove_field => [ "[netflow][src_as]" ]
47 | remove_field => [ "[netflow][src_mask]" ]
48 | }
49 |
50 | # Alert.Target.Node
51 | mutate {
52 | add_field => [ "[Alert][Target][Node][Address]", "%{[netflow][ipv4_dst_addr]}" ]
53 | add_field => [ "[Alert][Target][Node][Port]", "%{[netflow][l4_dst_port]}" ]
54 | add_field => [ "[Alert][Target][Node][AS]", "%{[netflow][dst_as]}" ]
55 | add_field => [ "[Alert][Target][Node][Mask]", "%{[netflow][dst_mask]}" ]
56 |
57 | remove_field => [ "[netflow][ipv4_dst_addr]" ]
58 | remove_field => [ "[netflow][l4_dst_port]" ]
59 | remove_field => [ "[netflow][dst_as]" ]
60 | remove_field => [ "[netflow][dst_mask]" ]
61 |
62 | }
63 | # Alert.Analyzer.Netflow
64 | mutate {
65 | add_field => [ "[Alert][Analyzer][Netflow][Direction]", "%{[netflow][direction]}" ]
66 | add_field => [ "[Alert][Analyzer][Netflow][FlowSamplerId]", "%{[netflow][flow_sampler_id]}" ]
67 | add_field => [ "[Alert][Analyzer][Netflow][FlowSeqNum]", "%{[netflow][flow_seq_num]}" ]
68 | add_field => [ "[Alert][Analyzer][Netflow][FlowsetId]", "%{[netflow][flowset_id]}" ]
69 | add_field => [ "[Alert][Analyzer][Netflow][InBytes]", "%{[netflow][in_bytes]}" ]
70 | add_field => [ "[Alert][Analyzer][Netflow][InPkts]", "%{[netflow][in_pkts]}" ]
71 | add_field => [ "[Alert][Analyzer][Netflow][InputSNMP]", "%{[netflow][input_snmp]}" ]
72 | add_field => [ "[Alert][Analyzer][Netflow][NextHop]", "%{[netflow][ipv4_next_hop]}" ]
73 | add_field => [ "[Alert][Analyzer][Netflow][OutputSNMP]", "%{[netflow][output_snmp]}" ]
74 | add_field => [ "[Alert][Analyzer][Netflow][ToS]", "%{[netflow][src_tos]}" ]
75 | add_field => [ "[Alert][Analyzer][Netflow][TCPFlags]", "%{[netflow][tcp_flags]}" ]
76 | add_field => [ "[Alert][Analyzer][Netflow][NetflowVersion]", "%{[netflow][version]}" ]
77 |
78 | remove_field => [ "[netflow][direction]" ]
79 | remove_field => [ "[netflow][flow_sampler_id]" ]
80 | remove_field => [ "[netflow][flow_seq_num]" ]
81 | remove_field => [ "[netflow][flowset_id]" ]
82 | remove_field => [ "[netflow][in_bytes]" ]
83 | remove_field => [ "[netflow][in_pkts]" ]
84 | remove_field => [ "[netflow][input_snmp]" ]
85 | remove_field => [ "[netflow][ipv4_next_hop]" ]
86 | remove_field => [ "[netflow][output_snmp]" ]
87 | remove_field => [ "[netflow][src_tos]" ]
88 | remove_field => [ "[netflow][tcp_flags]" ]
89 | remove_field => [ "[netflow][version]" ]
90 |
91 | }
92 |
93 | # Alert.Analyzer.Netflow Time
94 | mutate {
95 | add_field => [ "[Alert][Analyzer][Netflow][FirstSwitched]", "%{[netflow][first_switched]}" ]
96 | add_field => [ "[Alert][Analyzer][Netflow][LastSwiched]", "%{[netflow][last_switched]}" ]
97 |
98 | remove_field => [ "[netflow][first_switched]" ]
99 | remove_field => [ "[netflow][last_switched]" ]
100 | }
101 | }
102 | }
103 |
--------------------------------------------------------------------------------
/roles/elk/files/logstash-patterns/ossec.pattern:
--------------------------------------------------------------------------------
1 | OSSECCNF ossec.conf
2 | WINPATHHKLM ((?>[\w_%!$@:.,-{}]+|\\.)*)+
3 | NUMCHANGE ((?%{OSSEC_MESSAGE_USR_MSG}|%{OSSEC_MESSAGE_FULL_SYSCHECK}|%{OSSEC_MESSAGE_FULL_COMMDIFF_DIFF}|%{OSSEC_MESSAGE_FULL_LOGROT_FILE}|%{OSSEC_MESSAGE_AGENTLESS}|%{OSSEC_MESSAGE_FULL_LINE_MESSAGE})
10 |
11 |
12 | #** Alert 1433185454.0: - syslog,sshd,authentication_success,WinEvtLog,Rootcheck,ossec
13 | #2016 Apr 18 03:57:24 (host) 1.2.3.4->logtype
14 | #Rule: 5715 (level 3) -> 'Authentication success.'
15 |
16 | OSSEC_MESSAGE_FULL_LINE1 \*\* Alert %{NUMBER:[Alert][CreateTime]}: %{WORD}?%{SPACE}\- ?%{GREEDYDATA:[Alert][Group][Text]}
17 |
18 | OSSEC_MESSAGE_FULL_LINE2 %{YEAR} %{MONTH} %{MONTHDAY} %{TIME} (%{USERNAME:[Alert][Sensor][Node][Name]}|\(%{USERNAME:[Alert][Sensor][Node][Name]}\))%{SPACE}((%{IP:[Alert][Sensor][Node][Address]}|ossec@%{IP:[Alert][Sensor][Node][Address]})->%{GREEDYDATA:[Alert][LogType]}|->%{GREEDYDATA:[Alert][LogType]})
19 |
20 | OSSEC_MESSAGE_FULL_LINE3 Rule: %{BASE10NUM:[Alert][Classification][Ident]} \(level %{BASE10NUM:[Alert][Analyzer][Level][Origin]}\) -> '%{DATA:[Alert][Classification][Text]}'
21 |
22 |
23 |
24 |
25 | #User and message
26 |
27 | OSSEC_MESSAGE_USR_MSG %{OSSEC_MESSAGE_FULL_LINE_USER}\n%{OSSEC_MESSAGE_FULL_LINE_MESSAGE}\n
28 |
29 | #User: username
30 |
31 | OSSEC_MESSAGE_FULL_LINE_USER (User: (%{USERNAME:[Alert][Source][User][Ident]}\$)|User: (%{USERNAME:[Alert][Source][User][Ident]})|User: %{GREEDYDATA:[Alert][Source][User][Ident]})
32 |
33 | #Message
34 |
35 | OSSEC_MESSAGE_FULL_LINE_MESSAGE %{YEAR} %{MONTH} %{MONTHDAY} %{TIME} WinEvtLog: %{DATA}: %{DATA}\(%{BASE10NUM:[Alert][Classification][WinEvtID]}\):%{GREEDYDATA:[Alert][Analyzer][rawmessage]}|%{GREEDYDATA:[Alert][Analyzer][rawmessage]}
36 |
37 |
38 |
39 |
40 | #Syscheck
41 |
42 | OSSEC_MESSAGE_FULL_SYSCHECK (?>%{OSSEC_MESSAGE_FULL_SYSCHECK_FILE}\n(%{OSSEC_MESSAGE_FULL_SYSCHECK_SIZE}\n)?(%{OSSEC_MESSAGE_FULL_SYSCHECK_OLDMD5}\n)?(%{OSSEC_MESSAGE_FULL_SYSCHECK_CURMD5}\n)?(%{OSSEC_MESSAGE_FULL_SYSCHECK_OLDSHA}\n)?(%{OSSEC_MESSAGE_FULL_SYSCHECK_CURSHA}\n)?(%{OSSEC_MESSAGE_FULL_SYSCHECK_DIFF})?)|(%{OSSEC_MESSAGE_FULL_SYSCHECK_DELETE})|(%{OSSEC_MESSAGE_FULL_SYSCHECK_READD})
43 |
44 | #Integrity checksum changed for: 'ossec.conf'
45 | #Size changed from '7896' to '7889'
46 | #Old md5sum was: '035182ab6f0c688260134ac08513fe00'
47 | #New md5sum is : 'ff541deeea8e01f6734961973f048ba4'
48 | #Old sha1sum was: '1263b2226d6e6e44f33bbbbccc80ca76e9e614df'
49 | #New sha1sum is : '8a254b233c1b56479af4088fefff82764c6c02eb9'
50 |
51 | #Integrity checksum changed for: '/etc/switch.conf'
52 | OSSEC_MESSAGE_FULL_SYSCHECK_FILE Integrity checksum changed for: '(%{PATH:[Alert][Target][File][Path]}|%{OSSECCNF:[Alert][Target][File][Path]}|%{WINPATHHKLM:[Alert][Target][File][Path]})'
53 |
54 | #Size changed from '1465' to '1336'
55 | OSSEC_MESSAGE_FULL_SYSCHECK_SIZE Size changed from '%{BASE10NUM:[Alert][Target][File][oldsize]}' to '%{BASE10NUM:[Alert][Target][File][newsize]}'
56 |
57 | #Old md5sum was: '3a38cfbb74aec4b3b011bb0ee3ce7828'
58 | OSSEC_MESSAGE_FULL_SYSCHECK_OLDMD5 Old md5sum was: '%{DATA:[Alert][Target][File][chksum][md5prev]}'
59 |
60 | #New md5sum is : '8a3cf0d94719677115db91589e73c54e'
61 | OSSEC_MESSAGE_FULL_SYSCHECK_CURMD5 New md5sum is : '%{DATA:[Alert][Target][File][chksum][md5cur]}'
62 |
63 | #Old sha1sum was: '63386c46e9f5dc88643ca09a731f7a8321287f2a'
64 | OSSEC_MESSAGE_FULL_SYSCHECK_OLDSHA Old sha1sum was: '%{DATA:[Alert][Target][File][chksum][SHAprev]}'
65 |
66 | #New sha1sum is : 'a19587b1386ac676ceebeff5a71d9e035fef4caf'
67 | OSSEC_MESSAGE_FULL_SYSCHECK_CURSHA New sha1sum is : '%{DATA:[Alert][Target][File][chksum][SHAcur]}'
68 |
69 |
70 |
71 | #What changed:
72 | #1,2c1,2
73 | #< # up nothing. Note that if the search failed due to some other reason
74 | #< # (like no NIS server responding) then the search continues with the
75 | #---
76 | #> #123
77 | #> #123
78 |
79 |
80 | OSSEC_MESSAGE_FULL_SYSCHECK_DIFF What changed:\n%{GREEDYDATA:[Alert][Analyzer][OSSEC][Diff]}
81 |
82 | #File '%.756s' was deleted. Unable to retrieve checksum.
83 | OSSEC_MESSAGE_FULL_SYSCHECK_DELETE File '%{PATH:[Alert][Target][File][Path]}' was deleted. Unable to retrieve checksum.
84 |
85 | #File '%.756s' was re-added.
86 | OSSEC_MESSAGE_FULL_SYSCHECK_READD File '%{PATH:[Alert][Target][File][Path]}' was re-added.
87 |
88 |
89 |
90 |
91 | #** Alert 1435051355.484034070: mail - ossec,
92 | #2015 Jun 23 12:22:35 (host) 1.2.1.1->netstat -tan |grep LISTEN |grep -v 127.0.0.1 | sort
93 | #Rule: 533 (level 7) -> 'Listened ports status (netstat) changed (new port opened or closed).'
94 | #ossec: output: 'netstat -tan |grep LISTEN |grep -v 127.0.0.1 | sort':
95 | #tcp6 0 0 :::22 :::* LISTEN
96 | #Previous output:
97 | #ossec: output: 'netstat -tan |grep LISTEN |grep -v 127.0.0.1 | sort':
98 | #tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN
99 | #tcp6 0 0 :::22 :::* LISTEN
100 |
101 | OSSEC_MESSAGE_FULL_COMMDIFF_DIFF ossec: output: %{GREEDYDATA:[Alert][Analyzer][OSSEC][Diff]}
102 |
103 |
104 |
105 |
106 | #** Alert 1434932371.94243012: - ossec,
107 | #2015 Jun 22 03:19:31 (host) 1.7.2.4->ossec-logcollector
108 | #Rule: 591 (level 3) -> 'Log file rotated.'
109 | #ossec: File rotated (inode changed): '/var/log/messages'.
110 |
111 | OSSEC_MESSAGE_FULL_LOGROT_FILE ossec\: File rotated \(inode changed\)\: '%{DATA:[Alert][Target][File][Path]}'.
112 |
113 |
114 |
115 | #Agentless
116 | #** Alert 1461069815.125470837: mail - ossec,syscheck,agentless
117 | #2016 Apr 19 15:43:35 (ssh_pixconfig_diff) ossec@1.2.1.2->agentless
118 | #Rule: 555 (level 7) -> 'Integrity checksum for agentless device changed.'
119 | #ossec: agentless: Change detected:
120 | #1184d1155
121 | #< Connection to 1.2.1.2 closed by remote host.
122 |
123 | OSSEC_MESSAGE_AGENTLESS %{OSSEC_MESSAGE_AGENTLESS_LINE1}\n%{OSSEC_MESSAGE_AGENTLESS_LINE2}\n%{OSSEC_MESSAGE_AGENTLESS_MESSAGE}
124 |
125 | OSSEC_MESSAGE_AGENTLESS_LINE1 ossec\: agentless\: Change detected:
126 | OSSEC_MESSAGE_AGENTLESS_LINE2 %{NUMCHANGE:[Alert][Agentless][NumChange]}
127 | OSSEC_MESSAGE_AGENTLESS_MESSAGE %{GREEDYDATA:[Alert][Analyzer][OSSEC][Diff]}
128 |
--------------------------------------------------------------------------------
/roles/elk/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Place Elasticsearch repo file
2 | copy: src=repos/elastico.repo dest=/etc/yum.repos.d/
3 |
4 | - name: Import GPG key
5 | command: rpm --import https://packages.elastic.co/GPG-KEY-elasticsearch
6 |
7 | - name: Install packages
8 | yum: name={{item}} state=present
9 | with_items:
10 | - elasticsearch-5.1.1
11 | - java
12 | - wget
13 | - epel-release
14 | - logstash
15 | - dnsmasq
16 | - kibana-5.1.1
17 | - elasticdump
18 | - openssl
19 | - psmisc
20 |
21 | - name: Add software to autostart to autostart
22 | command: systemctl enable {{item}}
23 | with_items:
24 | - elasticsearch
25 | - logstash
26 | - dnsmasq
27 | - kibana
28 |
29 | - name: Place Elasticsearch config file
30 | template: src=elasticsearch.yml.j2 dest=/etc/elasticsearch/elasticsearch.yml
31 | notify:
32 | - restart elasticsearch
33 | tags:
34 | - configuration
35 |
36 | # - name: Install search-guard-ssl
37 | # command: /usr/share/elasticsearch/bin/plugin install com.floragunn/search-guard-ssl/2.3.4.16
38 | # tags: configuration security
39 |
40 | - name: Check search-guard-5 is installed
41 | command: /usr/share/elasticsearch/bin/elasticsearch-plugin list | grep search-guard-5
42 | register: sg_installed
43 |
44 | - name: Install search-guard-5
45 | command: /usr/share/elasticsearch/bin/elasticsearch-plugin install -b com.floragunn:search-guard-5:5.1.1-9
46 | tags: configuration security
47 | when: sg_installed.stdout != "search-guard-5"
48 |
49 |
50 | - name: Add netty-tcnative to search-guard plugin
51 | copy: src=netty-tcnative-1.1.33.Fork13-linux-x86_64.jar dest=/usr/share/elasticsearch/plugins/search-guard-5/
52 | tags: configuration security
53 |
54 | - name: Create ssl folder for searchguard
55 | file: path=/opt/lightsiem/sg/ state=directory
56 |
57 | - name: Place search-guard-ssl pki scripts
58 | copy: src=example-pki-scripts/ dest=/opt/lightsiem/sg/ mode="o+x"
59 | tags: configuration security
60 |
61 | - name: Create your own Root CA, Generate Keystores, Client node cert
62 | shell: cd /opt/lightsiem/sg/ && /opt/lightsiem/sg/example.sh
63 | tags: configuration security
64 |
65 | - name: Create folder for ssl certificates
66 | file: path=/etc/elasticsearch/sg/ state=directory
67 |
68 | - name: Copy keystores
69 | copy: src=/opt/lightsiem/sg/{{item}} dest=/etc/elasticsearch/sg/
70 | with_items:
71 | - node-{{ansible_nodename}}-keystore.jks
72 | - node-{{ansible_nodename}}-keystore.p12
73 | - truststore.jks
74 | - admin-keystore.jks
75 | notify:
76 | - restart elasticsearch
77 |
78 | - name: Flush handlers
79 | meta: flush_handlers
80 |
81 | - name: Copy SG config
82 | copy: src={{item}} dest=/usr/share/elasticsearch/plugins/search-guard-5/sgconfig/
83 | with_items:
84 | - sg_internal_users.yml
85 | - sg_roles_mapping.yml
86 | - sg_roles.yml
87 |
88 | - name: Add execution right for sgadmin script
89 | file: path=/usr/share/elasticsearch/plugins/search-guard-5/tools/sgadmin.sh mode="o+x"
90 |
91 | - name: Waight for Elasticsearch
92 | wait_for: port={{item}} delay=10 connect_timeout=10
93 | with_items:
94 | - 9200
95 | - 9300
96 |
97 | - name: Apply sg_config
98 | shell: /opt/lightsiem/sg/apply_config.sh
99 | retries: 10
100 | delay: 1
101 | notify:
102 | - restart elasticsearch
103 | - restart kibana
104 | - restart logstash
105 | tags: configuration security
106 |
107 |
108 | - name: Add dnsmasq config
109 | copy: src=dnsmasq.conf dest=/etc/dnsmasq.d/lightsiem.conf
110 | tags:
111 | - configuration
112 | notify:
113 | - restart dnsmasq
114 |
115 | - name: Create folder for SG ssl certificates
116 | file: path=/etc/logstash/ssl/ state=directory
117 |
118 | - name: Copy keystores for logstash
119 | copy: src=/opt/lightsiem/sg/{{item}} dest=/etc/logstash/ssl/
120 | with_items:
121 | - node-{{ansible_nodename}}-keystore.jks
122 | - node-{{ansible_nodename}}-keystore.p12
123 | - truststore.jks
124 |
125 | - name: Add Logstash configs
126 | copy: src=logstash-conf.d/{{item}} dest=/etc/logstash/conf.d/
127 | with_items:
128 | - 20-ossec.conf
129 | - 30-snort.conf
130 | - 40-cisco.conf
131 | - 60-netflow.conf
132 | - 95-common.conf
133 | - 98-mail.conf
134 | tags:
135 | - configuration
136 | notify:
137 | - restart logstash
138 |
139 | - name: Add Logstash output config
140 | template: src=99-output.conf.j2 dest=/etc/logstash/conf.d/99-output.conf
141 | tags:
142 | - configuration
143 | notify:
144 | - restart logstash
145 |
146 | - name: Create patterns directory
147 | file: path=/opt/lightsiem/patterns state=directory
148 |
149 | - name: Place Logstash patterns
150 | copy: src=logstash-patterns/{{item}} dest=/opt/lightsiem/patterns/
151 | with_items:
152 | - cisco.pattern
153 | - ossec.pattern
154 | - snort.pattern
155 | - idmef.pattern
156 | notify:
157 | - restart logstash
158 | tags:
159 | - configuration
160 |
161 | - name: Place Logstash output template
162 | copy: src=template.json dest=/etc/logstash/
163 | tags:
164 | - configuration
165 | notify:
166 | - restart logstash
167 |
168 | - name: Install email output for logstash
169 | command: /usr/share/logstash/bin/logstash-plugin install logstash-output-email
170 | notify:
171 | - restart logstash
172 |
173 | - name: Open ports in firewalld
174 | firewalld: port={{item}} permanent=true state=enabled
175 | with_items:
176 | #kibana server
177 | - 5601/tcp
178 | #ossec syslog
179 | - 9000/udp
180 | #ossec filebeat
181 | - 9001/tcp
182 | #snort filebeat
183 | - 9010/tcp
184 | #cisco syslog
185 | - 9020/udp
186 | #netflow
187 | - 9999/udp
188 | tags:
189 | - configuration
190 | notify:
191 | - restart firewalld
192 | ignore_errors: yes
193 |
194 |
195 | # - name: Flush handlers
196 | # meta: flush_handlers
197 | #
198 | # - name: Waight for Logstash and Elasticsearch
199 | # wait_for: port={{item}} delay=10 connect_timeout=10
200 | # with_items:
201 | # - 9200
202 | # - 9300
203 | # - 9001
204 | # - 9010
205 |
206 |
207 | # - name: Flush handlers
208 | # meta: flush_handlers
209 | #
210 | # - name: Waight for Logstash and Elasticsearch
211 | # wait_for: port={{item}} delay=10 connect_timeout=10
212 | # with_items:
213 | # - 9200
214 | # - 9300
215 |
216 | - name: Install searchGuard plugin for Kibana
217 | command: /usr/share/kibana/bin/kibana-plugin install https://github.com/floragunncom/search-guard-kibana-plugin/releases/download/v5.1.1-alpha/searchguard-kibana-alpha-5.1.1.zip
218 | notify:
219 | - restart kibana
220 |
221 | - name: Place Kibana 5 config
222 | template: src=kibana.yml.j2 dest=/etc/kibana/kibana.yml
223 | tags:
224 | - configuration
225 | notify:
226 | - restart kibana
227 |
--------------------------------------------------------------------------------
/roles/elk/templates/elasticsearch.yml.j2:
--------------------------------------------------------------------------------
1 | # ======================== Elasticsearch Configuration =========================
2 | #
3 | # NOTE: Elasticsearch comes with reasonable defaults for most settings.
4 | # Before you set out to tweak and tune the configuration, make sure you
5 | # understand what are you trying to accomplish and the consequences.
6 | #
7 | # The primary way of configuring a node is via this file. This template lists
8 | # the most important settings you may want to configure for a production cluster.
9 | #
10 | # Please see the documentation for further information on configuration options:
11 | #
12 | #
13 | # ---------------------------------- Cluster -----------------------------------
14 | #
15 | # Use a descriptive name for your cluster:
16 | #
17 | # cluster.name: my-application
18 | #
19 | # ------------------------------------ Node ------------------------------------
20 | #
21 | # Use a descriptive name for the node:
22 | #
23 | # node.name: node-1
24 | #
25 | # Add custom attributes to the node:
26 | #
27 | # node.rack: r1
28 | #
29 | # ----------------------------------- Paths ------------------------------------
30 | #
31 | # Path to directory where to store the data (separate multiple locations by comma):
32 | #
33 | # path.data: /path/to/data
34 | #
35 | # Path to log files:
36 | #
37 | # path.logs: /path/to/logs
38 | #
39 | # ----------------------------------- Memory -----------------------------------
40 | #
41 | # Lock the memory on startup:
42 | #
43 | # bootstrap.mlockall: true
44 | #
45 | # Make sure that the `ES_HEAP_SIZE` environment variable is set to about half the memory
46 | # available on the system and that the owner of the process is allowed to use this limit.
47 | #
48 | # Elasticsearch performs poorly when the system is swapping the memory.
49 | #
50 | # ---------------------------------- Network -----------------------------------
51 | #
52 | # Set the bind address to a specific IP (IPv4 or IPv6):
53 | #
54 | # network.host: 192.168.0.1
55 | network.host: ["127.0.0.1", "localhost"]
56 | #
57 | # Set a custom port for HTTP:
58 | #
59 | # http.port: 9200
60 | #
61 | # For more information, see the documentation at:
62 | #
63 | #
64 | # --------------------------------- Discovery ----------------------------------
65 | #
66 | # Pass an initial list of hosts to perform discovery when new node is started:
67 | # The default list of hosts is ["127.0.0.1", "[::1]"]
68 | #
69 | # discovery.zen.ping.unicast.hosts: ["host1", "host2"]
70 | #
71 | # Prevent the "split brain" by configuring the majority of nodes (total number of nodes / 2 + 1):
72 | #
73 | # discovery.zen.minimum_master_nodes: 3
74 | #
75 | # For more information, see the documentation at:
76 | #
77 | #
78 | # ---------------------------------- Gateway -----------------------------------
79 | #
80 | # Block initial recovery after a full cluster restart until N nodes are started:
81 | #
82 | # gateway.recover_after_nodes: 3
83 | #
84 | # For more information, see the documentation at:
85 | #
86 | #
87 | # ---------------------------------- Various -----------------------------------
88 | #
89 | # Disable starting multiple nodes on a single system:
90 | #
91 | # node.max_local_storage_nodes: 1
92 | #
93 | # Require explicit names when deleting indices:
94 | #
95 | # action.destructive_requires_name: true
96 | #
97 | #
98 | ###############################################################################################
99 | ## SEARCH GUARD SSL #
100 | ## Configuration #
101 | ##############################################################################################
102 | #
103 | #
104 | ##############################################################################################
105 | ## Transport layer SSL #
106 | ## #
107 | ##############################################################################################
108 | ## Enable or disable node-to-node ssl encryption (default: true)
109 | searchguard.ssl.transport.enabled: true
110 | ## JKS or PKCS12 (default: JKS)
111 | ##searchguard.ssl.transport.keystore_type: PKCS12
112 | ## Relative path to the keystore file (mandatory, this seores the server certificates), must be placed under the config/ dir
113 | searchguard.ssl.transport.keystore_filepath: sg/node-{{ansible_nodename}}-keystore.jks
114 | ## Alias name (default: first alias which could be found)
115 | searchguard.ssl.transport.keystore_alias: node-{{ansible_nodename}}
116 | ## Keystore password (default: changeit)
117 | searchguard.ssl.transport.keystore_password: changeit
118 | #
119 | ## JKS or PKCS12 (default: JKS)
120 | #searchguard.ssl.transport.truststore_type: PKCS12
121 | ## Relative path to the truststore file (mandatory, this stores the client/root certificates), must be placed under the config/ dir
122 | searchguard.ssl.transport.truststore_filepath: sg/truststore.jks
123 | ## Alias name (default: first alias which could be found)
124 | searchguard.ssl.transport.truststore_alias: root-ca-chain
125 | ## Truststore password (default: changeit)
126 | searchguard.ssl.transport.truststore_password: capass
127 | ## Enforce hostname verification (default: true)
128 | ##searchguard.ssl.transport.enforce_hostname_verification: true
129 | ## If hostname verification specify if hostname should be resolved (default: true)
130 | ##searchguard.ssl.transport.resolve_hostname: true
131 | ## Use native Open SSL instead of JDK SSL if available (default: true)
132 | searchguard.ssl.transport.enable_openssl_if_available: true
133 | #
134 | ##############################################################################################
135 | ## HTTP/REST layer SSL #
136 | ## #
137 | ##############################################################################################
138 | ## Enable or disable rest layer security - https, (default: false)
139 | searchguard.ssl.http.enabled: true
140 | ## JKS or PKCS12 (default: JKS)
141 | #de
142 | searchguard.ssl.http.keystore_type: PKCS12
143 | ## Relative path to the keystore file (this stores the server certificates), must be placed under the config/ dir
144 | searchguard.ssl.http.keystore_filepath: sg/node-{{ansible_nodename}}-keystore.p12
145 | ## Alias name (default: first alias which could be found)
146 | searchguard.ssl.http.keystore_alias: node-{{ansible_nodename}}
147 | ## Keystore password (default: changeit)
148 | searchguard.ssl.http.keystore_password: changeit
149 | ## Do the clients (typically the browser or the proxy) have to authenticate themself to the http server, default is OPTIONAL
150 | ## To enforce authentication use REQUIRE, to completely disable client certificates use NONE
151 | searchguard.ssl.http.clientauth_mode: NONE
152 | ## JKS or PKCS12 (default: JKS)
153 | ##searchguard.ssl.http.truststore_type: PKCS12
154 | ## Relative path to the truststore file (this stores the client certificates), must be placed under the config/ dir
155 | ##searchguard.ssl.http.truststore_filepath: truststore_https.jks
156 | ## Alias name (default: first alias which could be found)
157 | ##searchguard.ssl.http.truststore_alias: my_alias
158 | ## Truststore password (default: changeit)
159 | ##searchguard.ssl.http.truststore_password: changeit
160 | ## Use native Open SSL instead of JDK SSL if available (default: true)
161 | ##searchguard.ssl.http.enable_openssl_if_available: false
162 |
163 | #security.manager.enabled: true
164 | searchguard.authcz.admin_dn:
165 | - "CN=admin, OU=client, O=client, L=Test, C=DE"
166 |
--------------------------------------------------------------------------------
/roles/elk/files/logstash-conf.d/30-snort.conf:
--------------------------------------------------------------------------------
1 | input {
2 | beats {
3 | port => 9010
4 | type => snort_full
5 | }
6 |
7 |
8 |
9 | #syslog {
10 | # port => 9012
11 | # type => snort_barnyard
12 | # }
13 |
14 | #stdin {
15 | # type => snort_barnyard
16 | #}
17 |
18 | }
19 |
20 |
21 |
22 | filter {
23 |
24 |
25 |
26 |
27 | if [type] == "snort_full" {
28 | grok {
29 | match => { "message" => "%{SNORT_MESSAGE_FULL}" }
30 | patterns_dir => ["/opt/lightsiem/patterns/"]
31 | }
32 |
33 | if [Snort_date] {
34 | mutate {
35 | strip => "Snort_date"
36 | add_field => [ "[Alert][Sensor][Node][Name]", "%{host}" ]
37 | remove_field => ["file"]
38 | remove_field => ["offset"]
39 | }
40 |
41 | date {
42 | match => ["Snort_date", "MM/dd-HH:mm:ss.SSSSSS"]
43 | target => "[Alert][CreateTime]"
44 | }
45 |
46 | mutate { remove_field => ["Snort_date"] }
47 | }
48 | }
49 |
50 | if [type] in ["snort", "snort_barnyard"] {
51 | grok {
52 | match => { "message" => "%{IDMEF_MESSAGE}" }
53 | patterns_dir => ["/opt/lightsiem/patterns/"]
54 | }
55 |
56 | if [syslog5424_ts] {
57 | date {
58 | #2015-03-21T01:44:27.757618+03:00
59 | match => ["syslog5424_ts", "ISO8601"]
60 | target => "[Alert][CreateTime]"
61 | }
62 | } else if [timestamp] {
63 | date {
64 | #Apr 19 11:55:57
65 | match => ["timestamp", "MMM dd HH:mm:ss", "MMM d HH:mm:ss"]
66 | target => "[Alert][CreateTime]"
67 | }
68 | }
69 |
70 | if [syslog5424_host] {
71 | mutate { add_field => [ "[Alert][Sensor][Node][Name]", "%{syslog5424_host}" ] }
72 | } elseif [logsource] {
73 | mutate { add_field => [ "[Alert][Sensor][Node][Name]", "%{logsource}" ] }
74 | }
75 |
76 | if [syslog5424_proc] {
77 | mutate { add_field => [ "[Alert][Sourse][Process][Pid]", "%{syslog5424_proc}" ] }
78 | } else if [pid] {
79 | mutate { add_field => [ "[Alert][Sourse][Process][Pid]", "%{pid}" ] }
80 | }
81 |
82 | if [syslog5424_app] {
83 | mutate { add_field => [ "[Alert][Sourse][Process][Name]", "%{syslog5424_app}" ] }
84 | } elseif [program] {
85 | mutate { add_field => [ "[Alert][Sourse][Process][Name]", "%{program}" ] }
86 | }
87 |
88 | mutate
89 | {
90 |
91 | remove_field => ["syslog5424_ver"]
92 | remove_field => ["syslog5424_ts"]
93 |
94 | remove_field => ["pid"]
95 | remove_field => ["syslog5424_proc"]
96 | remove_field => ["syslog5424_app"]
97 | remove_field => ["program"]
98 | remove_field => ["syslog5424_host"]
99 | remove_field => ["logsource"]
100 |
101 | }
102 | }
103 |
104 | # if [type] == "snort_barnyard" {
105 | #
106 | # grok {
107 | # match => { "message" => "%{SNORT_BARNYARD_MESSAGE}" }
108 | # patterns_dir => ["/opt/lightsiem/patterns/"]
109 | # }
110 |
111 | # if [Alert][Protocol][Number] == "6"
112 | # {
113 | # mutate { add_field => [ "[Alert][Analyzer][Protocol]", "TCP" ]}
114 |
115 | # grok { match => { "[Snort][Transport][Header]" => "%{SNORT_BARNYARD_TCPHEADER}" }}
116 | # }
117 | # else if [Alert][Protocol][Number] == "17"
118 | # {
119 | # mutate { add_field => [ "[Alert][Analyzer][Protocol]", "UDP" ]}
120 | # grok { match => { "[Snort][Transport][Header]" => "%{SNORT_BARNYARD_UDPHEADER}" }}
121 | # }
122 | # else if [Alert][Protocol][Number] == "1"
123 | # {
124 | # mutate { add_field => [ "[Alert][Analyzer][Protocol]", "ICMP" ]}
125 | # grok { match => { "[Snort][Transport][Header]" => "%{SNORT_BARNYARD_ICMPHEADER}" }}
126 | # }
127 | # else
128 | # {
129 | # mutate { add_field => [ "[Alert][Analyzer][Protocol]", "UNKNOWN" ]}
130 | # grok { match => { "[Snort][Transport][Header]" => "%{SNORT_BARNYARD_DEFAULTHEADER}" }}
131 | # }
132 |
133 |
134 | # mutate {
135 | # remove_tag => [ "_grokparsefailure_sysloginput" ]
136 | # }
137 | # }
138 |
139 | if [type] in ["snort", "snort_barnyard"] {
140 |
141 |
142 | if [syslog5424_ts] {
143 | date {
144 | #2015-03-21T01:44:27.757618+03:00
145 | match => ["syslog5424_ts", "ISO8601"]
146 | target => "[Alert][CreateTime]"
147 | }
148 | } else if [timestamp] {
149 | date {
150 | #Apr 19 11:55:57
151 | match => ["timestamp", "MMM dd HH:mm:ss", "MMM d HH:mm:ss"]
152 | target => "[Alert][CreateTime]"
153 | }
154 | }
155 | }
156 |
157 | if [type] in ["snort", "snort_full", "snort_barnyard"] {
158 |
159 | if [Alert][Analyzer][Level][Origin] == "1" {
160 | mutate { add_field => [ "[Alert][Analyzer][Level][Normalized]", "15" ] }
161 | } else if "[Alert][Analyzer][Level][Origin]" == "2" {
162 | mutate { add_field => [ "[Alert][Analyzer][Level][Normalized]", "11" ] }
163 | } else if "[Alert][Analyzer][Level][Origin]" == "3" {
164 | mutate { add_field => [ "[Alert][Analyzer][Level][Normalized]", "6" ] }
165 | } else if "[Alert][Analyzer][Level][Origin]" == "4" {
166 | mutate { add_field => [ "[Alert][Analyzer][Level][Normalized]", "4" ] }
167 | }
168 |
169 | mutate {
170 | add_field => [ "[Alert][Analyzer][Name]", "snort" ]
171 |
172 | #Next line is hack to fix issue #1. It connected with that Kibana in term graph uses .raw fields, but logstash creates it only for strings fields
173 | add_field => [ "[Alert][Analyzer][Level][Normalized][raw]", "%{[Alert][Analyzer][Level][Normalized]}" ]
174 | convert => [ "[Alert][Analyzer][Level]", "integer" ]
175 | convert => [ "[Alert][Analyzer][Level][Normalized]", "integer" ]
176 |
177 | add_field => [ "[Alert][Analyzer][Node][Name]", "%{[Alert][Sensor][Node][Name]}" ]
178 | }
179 | }
180 | }
181 |
--------------------------------------------------------------------------------
/roles/elk/files/template.json:
--------------------------------------------------------------------------------
1 | {
2 | "template": "lightsiem-*",
3 | "settings": {
4 | "index.refresh_interval": "5s"
5 | },
6 | "mappings": {
7 | "_default_": {
8 | "dynamic": true,
9 | "properties": {
10 | "@timestamp": {
11 | "type": "date",
12 | "doc_values": true
13 | },
14 | "@version": {
15 | "type": "string",
16 | "index": "not_analyzed",
17 | "doc_values": true
18 | },
19 | "geoip": {
20 | "type": "object",
21 | "properties": {
22 | "ip": {
23 | "type": "ip",
24 | "doc_values": true
25 | },
26 | "location": {
27 | "type": "geo_point",
28 | "doc_values": true
29 | },
30 | "latitude": {
31 | "type": "float",
32 | "doc_values": true
33 | },
34 | "longitude": {
35 | "type": "float",
36 | "doc_values": true
37 | }
38 | }
39 | },
40 | "Alert": {
41 | "type": "object",
42 | "properties": {
43 | "Source": {
44 | "type": "object",
45 | "properties": {
46 | "Node": {
47 | "type": "object",
48 | "properties": {
49 | "Address": {
50 | "type": "ip",
51 | "doc_values": true
52 | },
53 | "Port" : {"type" : "integer"},
54 | "AS" : {"type" : "integer"},
55 | "Mask" : {"type" : "integer"},
56 | "Geoip": {
57 | "type": "object",
58 | "properties": {
59 | "ip": {
60 | "type": "ip",
61 | "doc_values": true
62 | },
63 | "location": {
64 | "type": "geo_point",
65 | "doc_values": true
66 | },
67 | "latitude": {
68 | "type": "float",
69 | "doc_values": true
70 | },
71 | "longitude": {
72 | "type": "float",
73 | "doc_values": true
74 | },
75 | "continent_code": {"type": "string", "index":"not_analyzed" },
76 | "country_code2": {"type": "string", "index":"not_analyzed" },
77 | "country_code3": {"type": "string", "index":"not_analyzed" },
78 | "country_name": {"type": "string", "index":"not_analyzed" },
79 | "city_name": {"type": "string", "index":"not_analyzed" }
80 | }
81 | },
82 | "Name" : {"type":"string", "index":"not_analyzed"}
83 | }
84 | },
85 | "Process":{
86 | "type" : "object",
87 | "properties": {
88 | "Name" : {"type" : "string", "index":"not_analyzed" }
89 | }
90 | }
91 | }
92 | },
93 | "Sensor": {
94 | "type": "object",
95 | "properties": {
96 | "Node": {
97 | "type": "object",
98 | "properties": {
99 | "Address": {
100 | "type": "ip",
101 | "doc_values": true
102 | },
103 | "Name": {"type": "string", "index":"not_analyzed" }
104 | }
105 | }
106 | }
107 | },
108 | "Target": {
109 | "type": "object",
110 | "properties": {
111 | "Node": {
112 | "type": "object",
113 | "properties": {
114 | "Address": {
115 | "type": "ip",
116 | "doc_values": true
117 | },
118 | "Port" : {"type" : "integer"},
119 | "AS" : {"type" : "integer"},
120 | "Mask" : {"type" : "integer"},
121 | "Geoip": {
122 | "type": "object",
123 | "properties": {
124 | "ip": {
125 | "type": "ip",
126 | "doc_values": true
127 | },
128 | "location": {
129 | "type": "geo_point",
130 | "doc_values": true
131 | },
132 | "latitude": {
133 | "type": "float",
134 | "doc_values": true
135 | },
136 | "longitude": {
137 | "type": "float",
138 | "doc_values": true
139 | },
140 | "continent_code": {"type": "string", "index":"not_analyzed" },
141 | "country_code2": {"type": "string", "index":"not_analyzed" },
142 | "country_code3": {"type": "string", "index":"not_analyzed" },
143 | "country_name": {"type": "string", "index":"not_analyzed" },
144 | "city_name": {"type": "string", "index":"not_analyzed" }
145 | }
146 | },
147 | "Name" : {"type":"string", "index":"not_analyzed"}
148 | }
149 | }
150 | }
151 | },
152 | "Analyzer": {
153 | "type": "object",
154 | "properties": {
155 | "Level": {
156 | "type": "object",
157 | "properties": {
158 | "Normalized": {"type": "integer"},
159 | "Origin": {"type": "integer"}
160 | }
161 | },
162 | "Node":{
163 | "type": "object",
164 | "properties": {
165 | "Name": {"type" : "string", "index":"not_analyzed" }
166 | }
167 | },
168 | "Name": {"type" : "string", "index":"not_analyzed" },
169 | "Reason": {"type" : "string", "index":"not_analyzed" },
170 | "Protocol": {"type": "string", "index":"not_analyzed" },
171 | "tcpflags": {"type": "string", "index":"not_analyzed" },
172 | "Netflow":{ "type": "object",
173 | "properties": {
174 | "Direction": {"type": "integer"},
175 | "FlowSamplerId": {"type": "integer"},
176 | "FlowSeqNum": {"type": "integer"},
177 | "FlowsetId": {"type": "integer"},
178 | "InBytes": {"type": "integer"},
179 | "InPkts": {"type": "integer"},
180 | "InputSNMP": {"type": "integer"},
181 | "OutputSNMP": {"type": "integer"},
182 | "ToS": {"type": "integer"},
183 | "TCPFlags": {"type": "integer"},
184 | "NetflowVersion": {"type": "integer"},
185 | "Flow_ID": {"type" : "string", "index":"not_analyzed"},
186 | "NextHop":{
187 | "type": "ip",
188 | "doc_values": true
189 | }
190 | }
191 | }
192 | }
193 | },
194 | "Classification" : {
195 | "type" : "object",
196 | "properties": {
197 | "Ident": {"type" : "string", "index":"not_analyzed" },
198 | "Text": {"type" : "string", "index":"not_analyzed" },
199 | "Protocol": {"type" : "string", "index":"not_analyzed" }
200 | }
201 | }
202 | }
203 | },
204 | "type" : {"type": "string", "index":"not_analyzed" }
205 | }
206 | }
207 | }
208 | }
209 |
--------------------------------------------------------------------------------
/roles/elk/files/logstash-conf.d/40-cisco.conf:
--------------------------------------------------------------------------------
1 | input {
2 | udp {
3 | port => 9020
4 | type => cisco
5 | }
6 |
7 | # stdin {
8 | # type => syslog
9 | # }
10 | }
11 |
12 | filter {
13 | if [type] == "cisco" {
14 |
15 | grok
16 | {
17 | patterns_dir => "/opt/lightsiem/patterns/"
18 | match => { "message" => "%{CISCO_HEADER} (%{CISCO_IOS_EVENTID:[Alert][Classification][Text]}: )?%{GREEDYDATA:[Alert][Analyzer][rawmessage]}" }
19 | }
20 |
21 |
22 | grok
23 | {
24 | tag_on_failure => []
25 | patterns_dir => "/opt/lightsiem/patterns/"
26 | #<165>%ASA-5-111001: Begin configuration: 192.168.1.21 writing to memory
27 | match => { "[Alert][Analyzer][rawmessage]" => "Begin configuration: %{IP:[Alert][Source][Node][Address]} writing to %{DATA:[Alert][Target][Device]}" }
28 |
29 | #%ASA-5-111002: Begin configuration: 8.8.8.8 reading from device
30 | match => { "[Alert][Analyzer][rawmessage]" => "Begin configuration: %{IP:[Alert][Source][Node][Address]} reading from %{DATA:[Alert][Target][Device]}" }
31 |
32 | #%ASA-5-111003: 8.8.8.8 Erase configuration
33 | match => { "[Alert][Analyzer][rawmessage]" => "%{IP:[Alert][Source][Node][Address]} Erase configuration" }
34 |
35 | #%ASA-5-111004: 8.8.8.8 end configuration: {FAILED|OK}
36 | match => { "[Alert][Analyzer][rawmessage]" => "%{IP:[Alert][Source][Node][Address]} end configuration: %{DATA:[Alert][Target][Status]}" }
37 |
38 | #%ASA-5-111005: IP_address end configuration: OK
39 | match => { "[Alert][Analyzer][rawmessage]" => "%{IP:[Alert][Source][Node][Address]} end configuration: OK" }
40 |
41 | #%ASA-5-111007: Begin configuration: IP_address reading from device.
42 | match => { "[Alert][Analyzer][rawmessage]" => "Begin configuration: %{IP:[Alert][Source][Node][Address]} reading from %{DATA:[Alert][Target][Device]}" }
43 |
44 | #<165>%ASA-5-111008: User 'enable_15' executed the 'write memory' command.
45 | match => { "[Alert][Analyzer][rawmessage]" => "User '%{DATA:[Alert][Source][User][Name]}' executed the '%{DATA:[Alert][Source][Command]}' command." }
46 |
47 | #%ASA-7-111009:User user executed cmd: string
48 | match => { "[Alert][Analyzer][rawmessage]" => "User '%{DATA:[Alert][Source][User][Name]}' executed cmd: %{GREEDYDATA:[Alert][Source][Command]}" }
49 |
50 | #%ASA-6-113008: AAA transaction status ACCEPT: user = user
51 | match => { "[Alert][Analyzer][rawmessage]" => "AAA transaction status %{DATA:[Alert][Assessment][Action]}: user = %{DATA:[Alert][Source][User][Name]}" }
52 |
53 | #<166>%ASA-6-113012: AAA user authentication Successful : local database : user = admin
54 | match => { "[Alert][Analyzer][rawmessage]" => "AAA user authentication Successful : local database : user = %{DATA:[Alert][Source][User][Name]}" }
55 |
56 | #<166>%ASA-6-113015: AAA user authentication Rejected : reason = Invalid password : local database : user = ***** : user IP = 192.168.1.21
57 | match => { "[Alert][Analyzer][rawmessage]" => "AAA user authentication Rejected : reason = Invalid password : local database : user = %{DATA:[Alert][Source][User][Name]} : user IP = %{IP:[Alert][Source][Node][Address]}" }
58 |
59 | #%ASA-4-106023: Deny udp src outside:13.2.22.4/46455 dst External:12.18.3.8/68000 by access-group "outside_access_in" [0x0, 0x0]
60 | match => { "[Alert][Analyzer][rawmessage]" => "%{WORD:[Alert][Assessment][Action]} %{WORD:[Alert][Analyzer][Protocol]} src %{DATA}:%{IP:[Alert][Source][Node][Address]}/%{DATA:[Alert][Source][Node][Port]} dst %{DATA}:%{IP:[Alert][Target][Node][Address]}/%{DATA:[Alert][Target][Node][Port]} by access-group %{GREEDYDATA:[Alert][Source][Process][Name]}"}
61 |
62 | #%ASA-4-106023:Deny icmp src outside:1.5.1.8 dst External-DMZ:2.8.3.5 (type 3, code 3) by access-group "outside_access_in" [0x0, 0x0]
63 | match => { "[Alert][Analyzer][rawmessage]" => "%{WORD:[Alert][Assessment][Action]} %{WORD:[Alert][Analyzer][Protocol]} src %{DATA}:%{IP:[Alert][Source][Node][Address]} dst %{DATA}:%{IP:[Alert][Target][Node][Address]} \(type %{INT}, code %{INT}\) by access-group %{GREEDYDATA:[Alert][Source][Process][Name]}" }
64 |
65 | #%ASA-7-710005: TCP request discarded from 8.8.20.13/49 to outside:7.1.1.1/60
66 | match => { "[Alert][Analyzer][rawmessage]" => "%{WORD:[Alert][Analyzer][Protocol]} (?:request|access) %{WORD:[Alert][Assessment][Action]} from %{IP:[Alert][Source][Node][Address]}/%{DATA:[Alert][Source][Node][Port]} to %{DATA}:%{IP:[Alert][Target][Node][Address]}/%{WORD:[Alert][Target][Node][Port]}" }
67 |
68 | # ASA-6-302013, ASA-6-302014, ASA-6-302015, ASA-6-302016
69 | match => { "[Alert][Analyzer][rawmessage]" => "%{CISCO_ACTION:[Alert][Assessment][Action]}(?: %{CISCO_DIRECTION})? %{WORD:[Alert][Analyzer][Protocol]} connection %{INT} for %{DATA}:%{IP:[Alert][Source][Node][Address]}/%{INT:[Alert][Source][Node][Port]}( \(%{IP}/%{INT}\))?(\(%{DATA:src_fwuser}\))? to %{DATA:}:%{IP:[Alert][Target][Node][Address]}/%{INT:[Alert][Target][Node][Port]}( \(%{IP}/%{INT}\))?(\(%{DATA:dst_fwuser}\))?( duration %{TIME:duration} bytes %{INT:bytes})?(?: %{CISCO_REASON:[Alert][Analyzer][Reason]})?( \(%{DATA:user}\))?" }
70 |
71 | # ASA-6-106015
72 | match => { "[Alert][Analyzer][rawmessage]" => "%{CISCO_ACTION:[Alert][Assessment][Action]} %{WORD:[Alert][Analyzer][Protocol]} \(%{DATA:[Alert][Source][Process][Name]}\) from %{IP:[Alert][Source][Node][Address]}/%{INT:[Alert][Source][Node][Port]} to %{IP:[Alert][Target][Node][Address]}/%{INT:[Alert][Target][Node][Port]} flags %{DATA:[Alert][Analyzer][tcpflags]} on interface %{GREEDYDATA}" }
73 |
74 | #%SEC-6-IPACCESSLOGP
75 | match => { "[Alert][Analyzer][rawmessage]" => "list %{DATA:[Alert][Source][Process][Name]} %{WORD:[Alert][Assessment][Action]} %{WORD:[Alert][Analyzer][Protocol]} %{IP:[Alert][Source][Node][Address]}\(%{DATA:[Alert][Source][Node][Port]}\) \(%{DATA:interface}\) -> %{IP:[Alert][Target][Node][Address]}\(%{WORD:[Alert][Target][Node][Port]}\), %{GREEDYDATA}" }
76 |
77 | #%SEC_LOGIN-5-LOGIN_SUCCESS
78 | match => { "[Alert][Analyzer][rawmessage]" => "Login Success \[user: %{DATA:[Alert][Source][User][Name]}\] \[Source: %{IP:[Alert][Source][Node][Address]}\] \[localport: %{INT:[Alert][Target][Node][Port]}\] at %{CISCO_IOS_TIMESTAMP}" }
79 |
80 | #%SEC_LOGIN-4-LOGIN_FAILED
81 | match => { "[Alert][Analyzer][rawmessage]" => "Login failed \[user: %{DATA:[Alert][Source][User][Name]}\] \[Source: %{IP:[Alert][Source][Node][Address]}\] \[localport: %{INT:[Alert][Target][Node][Port]}\] \[Reason: %{DATA}\] at %{CISCO_IOS_TIMESTAMP}" }
82 |
83 | #%SEC_LOGIN-1-QUIET_MODE_ON
84 | match => { "[Alert][Analyzer][rawmessage]" => "Still timeleft for watching failures is %{DATA} secs, \[user: %{DATA:[Alert][Source][User][Name]}\] \[Source: %{IP:[Alert][Source][Node][Address]}\] \[localport: %{INT:[Alert][Target][Node][Port]}\] \[Reason: %{DATA}\] \[ACL: ${DATA}\] at %{CISCO_IOS_TIMESTAMP}" }
85 |
86 | #%PARSER-5-CFGLOG_LOGGEDCMD
87 | #match => { "[Alert][Analyzer][rawmessage]" => "User\:%{DATA:[Alert][Source][User][Name]}\ logged command:%{DATA:[Alert][Source][Command]} %{WORD:[Alert][Assessment][Action]}" }
88 | match => { "[Alert][Analyzer][rawmessage]" => "User\:%{DATA:[Alert][Source][User][Name]}\ logged command:%{GREEDYDATA:[Alert][Source][Command]}" }
89 |
90 | #%SEC-6-IPACCESSLOGDP
91 | match => { "[Alert][Analyzer][rawmessage]" => "list %{DATA:[Alert][Source][Process][Name]} %{WORD:[Alert][Assessment][Action]} %{WORD:[Alert][Analyzer][Protocol]} %{IP:[Alert][Source][Node][Address]} \(%{DATA:Interface}\) -> %{IP:[Alert][Target][Node][Address]} \(%{DATA}\), %{DATA}" }
92 |
93 | #%SYS-6-LOGOUT
94 | match => { "[Alert][Analyzer][rawmessage]" => "User %{GREEDYDATA:[Alert][Source][User][Name]} has exited tty session %{DATA}\(%{IP:[Alert][Source][Node][Address]}\)" }
95 |
96 | #%SYS-5-CONFIG_I
97 | match => { "[Alert][Analyzer][rawmessage]" => "Configured from %{WORD} by %{GREEDYDATA:[Alert][Source][User][Name]} on %{WORD} \(%{IP:[Alert][Source][Node][Address]}\)"}
98 |
99 | #All other IOS events
100 |
101 | #%ASA-5-713259
102 | match => { "[Alert][Analyzer][rawmessage]" => "(?:Group = %{DATA:[Alert][Analyzer][Group]}, )?(?:Username = %{DATA:[Alert][Source][User][Name]}, )?(?:IP = %{IP:[Alert][Source][Node][Address]}, )?Session is being torn down. Reason: %{CISCO_REASON:[Alert][Analyzer][Reason]}" }
103 |
104 | #%ASA-5-713050
105 | match => { "[Alert][Analyzer][rawmessage]" => "Connection terminated for peer %{IP:[Alert][Source][Node][Address]}. Reason: %{GREEDYDATA:[Alert][Analyzer][Reason]} Remote Proxy %{DATA}, Local Proxy %{DATA}" }
106 |
107 | #<167>%ASA-7-710005: TCP request discarded from 1.2.24.26/1069 to outside:7.8.8.8/50000
108 | #match => { "[Alert][Analyzer][rawmessage]" => "%{GREEDYDATA:[Alert][Analyzer][rawmessage]}" }
109 |
110 | }
111 | if [Alert][Target][Node][Port] == "snmp" {
112 | mutate { update => { "snmp" => "161" } }
113 | }
114 | mutate { convert => [ "[Alert][Target][Node][Port]", "integer" ] }
115 |
116 | if [Alert][Analyzer][Level][Origin] == "1" {
117 | mutate { add_field => [ "[Alert][Analyzer][Level][Normalized]", "13" ] }
118 | } else if [Alert][Analyzer][Level][Origin] == "2" {
119 | mutate { add_field => [ "[Alert][Analyzer][Level][Normalized]", "12" ] }
120 | } else if [Alert][Analyzer][Level][Origin] == "3" {
121 | mutate { add_field => [ "[Alert][Analyzer][Level][Normalized]", "7" ] }
122 | } else if [Alert][Analyzer][Level][Origin] == "4" {
123 | mutate { add_field => [ "[Alert][Analyzer][Level][Normalized]", "5" ] }
124 | } else if [Alert][Analyzer][Level][Origin] == "5" {
125 | mutate { add_field => [ "[Alert][Analyzer][Level][Normalized]", "2" ] }
126 | } else if [Alert][Analyzer][Level][Origin] == "6" {
127 | mutate { add_field => [ "[Alert][Analyzer][Level][Normalized]", "1" ] }
128 | } else if [Alert][Analyzer][Level][Origin] == "7" {
129 | mutate { add_field => [ "[Alert][Analyzer][Level][Normalized]", "0" ] }
130 | }
131 |
132 | if [ "Alert.Classification.Ident" ] in ["111001", "111002", "111003", "111004", "111005", "111007", "111008"]
133 | { mutate { update => ["[Alert][Analyzer][Level][Normalized]", "11"] } }
134 | else if [ "Alert.Classification.Ident" ] == "111009"
135 | { mutate { update => ["[Alert][Analyzer][Level][Normalized]", "8"] } }
136 | else if [ "Alert.Classification.Ident" ] in ["113012", "113015"]
137 | { mutate { update => ["[Alert][Analyzer][Level][Normalized]", "5"] } }
138 | else if [ "Alert.Classification.Ident" ] == "106023" and [ "Alert.Assessment.Action" ] == "Deny"
139 | { mutate { update => ["[Alert][Analyzer][Level][Normalized]", "9"] } }
140 | else if [ "Alert.Classification.Ident" ] == "710005" and [ "Alert.Assessment.Action" ] == "discarded"
141 | { mutate { update => ["[Alert][Analyzer][Level][Normalized]", "9"] } }
142 | else if [ "Alert.Classification.Ident" ] in ["305012", "305011"]
143 | { mutate { update => ["[Alert][Analyzer][Level][Normalized]", "0"] } }
144 | else if [ "Alert.Classification.Ident" ] == "302014" and [ "Alert.Analyzer.Reason" ] == "SYN Timeout"
145 | { mutate { update => ["[Alert][Analyzer][Level][Normalized]", "9"] } }
146 | else if [ "Alert.Classification.Ident" ] == "106015"
147 | { mutate { update => ["[Alert][Analyzer][Level][Normalized]", "9"] } }
148 |
149 | mutate {
150 | convert => [ "[Alert][Analyzer][Level]", "integer" ]
151 | convert => [ "[Alert][Analyzer][Level][Normalized]", "integer" ]
152 | #add_field => [ "[Alert][Analyzer][Level][Normalized][raw]", "%{[Alert][Analyzer][Level][Normalized]}" ]
153 | }
154 |
155 | if [syslog5424_ts] {
156 | date {
157 | #2015-03-21T01:44:27.757618+03:00
158 | match => ["syslog5424_ts", "ISO8601"]
159 | target => "[Alert][CreateTime]"
160 | }
161 | } else if [timestamp] {
162 | date {
163 | #Apr 19 11:55:57
164 | match => ["timestamp", "MMM dd HH:mm:ss", "MMM d HH:mm:ss", "MMM dd YYYY HH:mm:ss"]
165 | target => "[Alert][CreateTime]"
166 | }
167 | } else {
168 | mutate { add_field => [ "[Alert][CreateTime]", "%{@timestamp}" ] }
169 | }
170 |
171 | mutate {
172 | add_field => [ "[Alert][Sensor][Node][Address]", "%{host}" ]
173 | add_field => [ "[Alert][Analyzer][Node][Name]", "%{[Alert][Sensor][Node][Address]}" ]
174 | add_field => [ "[Alert][Analyzer][Name]", "cisco" ]
175 | }
176 |
177 |
178 | }
179 | }
180 |
--------------------------------------------------------------------------------
/roles/elk/files/Beats/filebeat.yml:
--------------------------------------------------------------------------------
1 | ################### Filebeat Configuration Example #########################
2 |
3 | ############################# Filebeat ######################################
4 | filebeat:
5 | # List of prospectors to fetch data.
6 | prospectors:
7 | # Each - is a prospector. Below are the prospector specific configurations
8 | -
9 | # Paths that should be crawled and fetched. Glob based paths.
10 | # To fetch all ".log" files from a specific level of subdirectories
11 | # /var/log/*/*.log can be used.
12 | # For each file found under this path, a harvester is started.
13 | # Make sure not file is defined twice as this can lead to unexpected behaviour.
14 | paths:
15 | - /var/log/alerts.log
16 | #document_type: ossec
17 | #- c:\programdata\elasticsearch\logs\*
18 |
19 | # Configure the file encoding for reading files with international characters
20 | # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding).
21 | # Some sample encodings:
22 | # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk,
23 | # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ...
24 | #encoding: plain
25 |
26 | # Type of the files. Based on this the way the file is read is decided.
27 | # The different types cannot be mixed in one prospector
28 | #
29 | # Possible options are:
30 | # * log: Reads every line of the log file (default)
31 | # * stdin: Reads the standard in
32 | input_type: log
33 |
34 | # Exclude lines. A list of regular expressions to match. It drops the lines that are
35 | # matching any regular expression from the list. The include_lines is called before
36 | # exclude_lines. By default, no lines are dropped.
37 | # exclude_lines: ["^DBG"]
38 |
39 | # Include lines. A list of regular expressions to match. It exports the lines that are
40 | # matching any regular expression from the list. The include_lines is called before
41 | # exclude_lines. By default, all the lines are exported.
42 | # include_lines: ["^ERR", "^WARN"]
43 |
44 | # Exclude files. A list of regular expressions to match. Filebeat drops the files that
45 | # are matching any regular expression from the list. By default, no files are dropped.
46 | # exclude_files: [".gz$"]
47 |
48 | # Optional additional fields. These field can be freely picked
49 | # to add additional information to the crawled log files for filtering
50 | # fields:
51 | # type: type
52 | # review: 1
53 |
54 | # Set to true to store the additional fields as top level fields instead
55 | # of under the "fields" sub-dictionary. In case of name conflicts with the
56 | # fields added by Filebeat itself, the custom fields overwrite the default
57 | # fields.
58 | #fields_under_root: false
59 |
60 | # Ignore files which were modified more then the defined timespan in the past.
61 | # In case all files on your system must be read you can set this value very large.
62 | # Time strings like 2h (2 hours), 5m (5 minutes) can be used.
63 | #ignore_older: 0
64 |
65 | # Close older closes the file handler for which were not modified
66 | # for longer then close_older
67 | # Time strings like 2h (2 hours), 5m (5 minutes) can be used.
68 | #close_older: 1h
69 |
70 | # Type to be published in the 'type' field. For Elasticsearch output,
71 | # the type defines the document type these entries should be stored
72 | # in. Default: log
73 | document_type: ossec
74 |
75 | # Scan frequency in seconds.
76 | # How often these files should be checked for changes. In case it is set
77 | # to 0s, it is done as often as possible. Default: 10s
78 | #scan_frequency: 10s
79 |
80 | # Defines the buffer size every harvester uses when fetching the file
81 | #harvester_buffer_size: 16384
82 |
83 | # Maximum number of bytes a single log event can have
84 | # All bytes after max_bytes are discarded and not sent. The default is 10MB.
85 | # This is especially useful for multiline log messages which can get large.
86 | #max_bytes: 10485760
87 |
88 | # Mutiline can be used for log messages spanning multiple lines. This is common
89 | # for Java Stack Traces or C-Line Continuation
90 | multiline:
91 |
92 | # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
93 | pattern: ^\*\*
94 |
95 | # Defines if the pattern set under pattern should be negated or not. Default is false.
96 | negate: true
97 |
98 | # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
99 | # that was (not) matched before or after or as long as a pattern is not matched based on negate.
100 | # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
101 | match: after
102 |
103 | # The maximum number of lines that are combined to one event.
104 | # In case there are more the max_lines the additional lines are discarded.
105 | # Default is 500
106 | #max_lines: 500
107 |
108 | # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event
109 | # Default is 5s.
110 | #timeout: 5s
111 |
112 | # Setting tail_files to true means filebeat starts readding new files at the end
113 | # instead of the beginning. If this is used in combination with log rotation
114 | # this can mean that the first entries of a new file are skipped.
115 | #tail_files: false
116 |
117 | # Backoff values define how agressively filebeat crawls new files for updates
118 | # The default values can be used in most cases. Backoff defines how long it is waited
119 | # to check a file again after EOF is reached. Default is 1s which means the file
120 | # is checked every second if new lines were added. This leads to a near real time crawling.
121 | # Every time a new line appears, backoff is reset to the initial value.
122 | #backoff: 1s
123 |
124 | # Max backoff defines what the maximum backoff time is. After having backed off multiple times
125 | # from checking the files, the waiting time will never exceed max_backoff idenependent of the
126 | # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log
127 | # file after having backed off multiple times, it takes a maximum of 10s to read the new line
128 | #max_backoff: 10s
129 |
130 | # The backoff factor defines how fast the algorithm backs off. The bigger the backoff factor,
131 | # the faster the max_backoff value is reached. If this value is set to 1, no backoff will happen.
132 | # The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached
133 | #backoff_factor: 2
134 |
135 | # This option closes a file, as soon as the file name changes.
136 | # This config option is recommended on windows only. Filebeat keeps the files it's reading open. This can cause
137 | # issues when the file is removed, as the file will not be fully removed until also Filebeat closes
138 | # the reading. Filebeat closes the file handler after ignore_older. During this time no new file with the
139 | # same name can be created. Turning this feature on the other hand can lead to loss of data
140 | # on rotate files. It can happen that after file rotation the beginning of the new
141 | # file is skipped, as the reading starts at the end. We recommend to leave this option on false
142 | # but lower the ignore_older value to release files faster.
143 | #force_close_files: false
144 |
145 | # Additional prospector
146 | #-
147 | # Configuration to use stdin input
148 | #input_type: stdin
149 |
150 | # General filebeat configuration options
151 | #
152 | # Event count spool threshold - forces network flush if exceeded
153 | #spool_size: 2048
154 |
155 | # Enable async publisher pipeline in filebeat (Experimental!)
156 | #publish_async: false
157 |
158 | # Defines how often the spooler is flushed. After idle_timeout the spooler is
159 | # Flush even though spool_size is not reached.
160 | #idle_timeout: 5s
161 |
162 | # Name of the registry file. Per default it is put in the current working
163 | # directory. In case the working directory is changed after when running
164 | # filebeat again, indexing starts from the beginning again.
165 | registry_file: /var/lib/filebeat/registry
166 |
167 | # Full Path to directory with additional prospector configuration files. Each file must end with .yml
168 | # These config files must have the full filebeat config part inside, but only
169 | # the prospector part is processed. All global options like spool_size are ignored.
170 | # The config_dir MUST point to a different directory then where the main filebeat config file is in.
171 | #config_dir:
172 |
173 | ###############################################################################
174 | ############################# Libbeat Config ##################################
175 | # Base config file used by all other beats for using libbeat features
176 |
177 | ############################# Output ##########################################
178 |
179 | # Configure what outputs to use when sending the data collected by the beat.
180 | # Multiple outputs may be used.
181 | output:
182 |
183 | ### Elasticsearch as output
184 | #elasticsearch:
185 | # Array of hosts to connect to.
186 | # Scheme and port can be left out and will be set to the default (http and 9200)
187 | # In case you specify and additional path, the scheme is required: http://localhost:9200/path
188 | # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
189 | # hosts: ["1.2.3.4:9200"]
190 |
191 | # Optional protocol and basic auth credentials.
192 | #protocol: "https"
193 | #username: "user"
194 | #password: "pass"
195 |
196 | # Number of workers per Elasticsearch host.
197 | #worker: 1
198 |
199 | # Optional index name. The default is "filebeat" and generates
200 | # [filebeat-]YYYY.MM.DD keys.
201 | #index: "filebeat"
202 |
203 | # A template is used to set the mapping in Elasticsearch
204 | # By default template loading is disabled and no template is loaded.
205 | # These settings can be adjusted to load your own template or overwrite existing ones
206 | #template:
207 |
208 | # Template name. By default the template name is filebeat.
209 | #name: "filebeat"
210 |
211 | # Path to template file
212 | #path: /etc/filebeat/filebeat.template.json
213 |
214 | # Overwrite existing template
215 | #overwrite: false
216 |
217 | # Optional HTTP Path
218 | #path: "/elasticsearch"
219 |
220 | # Proxy server url
221 | #proxy_url: http://proxy:3128
222 |
223 | # The number of times a particular Elasticsearch index operation is attempted. If
224 | # the indexing operation doesn't succeed after this many retries, the events are
225 | # dropped. The default is 3.
226 | #max_retries: 3
227 |
228 | # The maximum number of events to bulk in a single Elasticsearch bulk API index request.
229 | # The default is 50.
230 | #bulk_max_size: 50
231 |
232 | # Configure http request timeout before failing an request to Elasticsearch.
233 | #timeout: 90
234 |
235 | # The number of seconds to wait for new events between two bulk API index requests.
236 | # If `bulk_max_size` is reached before this interval expires, addition bulk index
237 | # requests are made.
238 | #flush_interval: 1
239 |
240 | # Boolean that sets if the topology is kept in Elasticsearch. The default is
241 | # false. This option makes sense only for Packetbeat.
242 | #save_topology: false
243 |
244 | # The time to live in seconds for the topology information that is stored in
245 | # Elasticsearch. The default is 15 seconds.
246 | #topology_expire: 15
247 |
248 | # tls configuration. By default is off.
249 | #tls:
250 | # List of root certificates for HTTPS server verifications
251 | #certificate_authorities: ["/etc/pki/root/ca.pem"]
252 |
253 | # Certificate for TLS client authentication
254 | #certificate: "/etc/pki/client/cert.pem"
255 |
256 | # Client Certificate Key
257 | #certificate_key: "/etc/pki/client/cert.key"
258 |
259 | # Controls whether the client verifies server certificates and host name.
260 | # If insecure is set to true, all server host names and certificates will be
261 | # accepted. In this mode TLS based connections are susceptible to
262 | # man-in-the-middle attacks. Use only for testing.
263 | #insecure: true
264 |
265 | # Configure cipher suites to be used for TLS connections
266 | #cipher_suites: []
267 |
268 | # Configure curve types for ECDHE based cipher suites
269 | #curve_types: []
270 |
271 | # Configure minimum TLS version allowed for connection to logstash
272 | #min_version: 1.0
273 |
274 | # Configure maximum TLS version allowed for connection to logstash
275 | #max_version: 1.2
276 |
277 |
278 | ### Logstash as output
279 | logstash:
280 | # The Logstash hosts
281 | hosts: ["1.2.3.4:9000"]
282 |
283 | # Number of workers per Logstash host.
284 | #worker: 1
285 |
286 | # Set gzip compression level.
287 | #compression_level: 3
288 |
289 | # Optional load balance the events between the Logstash hosts
290 | #loadbalance: true
291 |
292 | # Optional index name. The default index name depends on the each beat.
293 | # For Packetbeat, the default is set to packetbeat, for Topbeat
294 | # top topbeat and for Filebeat to filebeat.
295 | # index: filebeac
296 |
297 | # Optional TLS. By default is off.
298 | #tls:
299 | # List of root certificates for HTTPS server verifications
300 | #certificate_authorities: ["/etc/pki/root/ca.pem"]
301 |
302 | # Certificate for TLS client authentication
303 | #certificate: "/etc/pki/client/cert.pem"
304 |
305 | # Client Certificate Key
306 | #certificate_key: "/etc/pki/client/cert.key"
307 |
308 | # Controls whether the client verifies server certificates and host name.
309 | # If insecure is set to true, all server host names and certificates will be
310 | # accepted. In this mode TLS based connections are susceptible to
311 | # man-in-the-middle attacks. Use only for testing.
312 | #insecure: true
313 |
314 | # Configure cipher suites to be used for TLS connections
315 | #cipher_suites: []
316 |
317 | # Configure curve types for ECDHE based cipher suites
318 | #curve_types: []
319 |
320 |
321 | ### File as output
322 | #file:
323 | # Path to the directory where to save the generated files. The option is mandatory.
324 | #path: "/tmp/filebeat"
325 |
326 | # Name of the generated files. The default is `filebeat` and it generates files: `filebeat`, `filebeat.1`, `filebeat.2`, etc.
327 | #filename: filebeat
328 |
329 | # Maximum size in kilobytes of each file. When this size is reached, the files are
330 | # rotated. The default value is 10 MB.
331 | #rotate_every_kb: 10000
332 |
333 | # Maximum number of files under path. When this number of files is reached, the
334 | # oldest file is deleted and the rest are shifted from last to first. The default
335 | # is 7 files.
336 | #number_of_files: 7
337 |
338 |
339 | ### Console output
340 | # console:
341 | # Pretty print json event
342 | #pretty: false
343 |
344 |
345 | ############################# Shipper #########################################
346 |
347 | shipper:
348 | # The name of the shipper that publishes the network data. It can be used to group
349 | # all the transactions sent by a single shipper in the web interface.
350 | # If this options is not defined, the hostname is used.
351 | #name:
352 |
353 | # The tags of the shipper are included in their own field with each
354 | # transaction published. Tags make it easy to group servers by different
355 | # logical properties.
356 | #tags: ["service-X", "web-tier"]
357 |
358 | # Uncomment the following if you want to ignore transactions created
359 | # by the server on which the shipper is installed. This option is useful
360 | # to remove duplicates if shippers are installed on multiple servers.
361 | #ignore_outgoing: true
362 |
363 | # How often (in seconds) shippers are publishing their IPs to the topology map.
364 | # The default is 10 seconds.
365 | #refresh_topology_freq: 10
366 |
367 | # Expiration time (in seconds) of the IPs published by a shipper to the topology map.
368 | # All the IPs will be deleted afterwards. Note, that the value must be higher than
369 | # refresh_topology_freq. The default is 15 seconds.
370 | #topology_expire: 15
371 |
372 | # Internal queue size for single events in processing pipeline
373 | #queue_size: 1000
374 |
375 | # Configure local GeoIP database support.
376 | # If no paths are not configured geoip is disabled.
377 | #geoip:
378 | #paths:
379 | # - "/usr/share/GeoIP/GeoLiteCity.dat"
380 | # - "/usr/local/var/GeoIP/GeoLiteCity.dat"
381 |
382 |
383 | ############################# Logging #########################################
384 |
385 | # There are three options for the log ouput: syslog, file, stderr.
386 | # Under Windos systems, the log files are per default sent to the file output,
387 | # under all other system per default to syslog.
388 | logging:
389 |
390 | # Send all logging output to syslog. On Windows default is false, otherwise
391 | # default is true.
392 | #to_syslog: true
393 |
394 | # Write all logging output to files. Beats automatically rotate files if rotateeverybytes
395 | # limit is reached.
396 | #to_files: false
397 |
398 | # To enable logging to files, to_files option has to be set to true
399 | files:
400 | # The directory where the log files will written to.
401 | path: /var/log/mybeat
402 |
403 | # The name of the files where the logs are written to.
404 | #name: mybeat
405 |
406 | # Configure log file size limit. If limit is reached, log file will be
407 | # automatically rotated
408 | rotateeverybytes: 10485760 # = 10MB
409 |
410 | # Number of rotated log files to keep. Oldest files will be deleted first.
411 | #keepfiles: 7
412 |
413 | # Enable debug output for selected components. To enable all selectors use ["*"]
414 | # Other available selectors are beat, publish, service
415 | # Multiple selectors can be chained.
416 | #selectors: [ ]
417 |
418 | # Sets log level. The default log level is error.
419 | # Available log levels are: critical, error, warning, info, debug
420 | #level: error
421 |
--------------------------------------------------------------------------------