├── .dockerignore ├── .gitignore ├── Dockerfile ├── LICENSE ├── QA-Monitoring-demo-seed-data.postman_collection.json ├── README.md ├── config ├── elastic │ ├── elasticsearch.yml │ ├── log4j2.properties │ └── logrotate ├── filebeat │ └── filebeat.yml ├── golang │ └── no-pic.patch ├── logstash │ ├── bro-ids │ │ ├── LICENSE │ │ ├── README.md │ │ ├── conf_files │ │ │ ├── bro │ │ │ │ ├── bro-conn_log.conf │ │ │ │ ├── bro-dhcp_log.conf │ │ │ │ ├── bro-dns_log.conf │ │ │ │ ├── bro-files_log.conf │ │ │ │ ├── bro-http_log.conf │ │ │ │ ├── bro-notice_log.conf │ │ │ │ └── bro-weird_log.conf │ │ │ ├── log2timeline │ │ │ │ └── logstash-log2timeline.conf │ │ │ └── web_logs │ │ │ │ ├── logstash-apache-combined.conf │ │ │ │ ├── logstash-apache-common.conf │ │ │ │ ├── logstash-iis6.conf │ │ │ │ ├── logstash-iis7.conf │ │ │ │ └── logstash-iis8.conf │ │ ├── dictionaries │ │ │ ├── logstash-bro-conn-log.dict │ │ │ ├── logstash-ftp-status-codes.dict │ │ │ └── logstash-http-status-codes.dict │ │ └── type_mappings │ │ │ ├── log2timeline.type │ │ │ └── mhn-hpfeed.type │ ├── conf.d │ │ ├── 02-beats-input.conf │ │ ├── 03-http-input.conf │ │ ├── 10-syslog-filter.conf │ │ ├── 11-nginx-filter.conf │ │ └── 30-elasticsearch-output.conf │ ├── logstash.yml │ └── patterns │ │ └── nginx ├── metricbeat │ └── metricbeat.yml ├── misc │ ├── elasticsearch.nse │ └── test_index.py ├── nginx │ ├── htpasswd │ ├── kibana.conf │ ├── nginx.conf │ └── ssl.kibana.conf └── supervisord │ └── supervisord.conf ├── entrypoints ├── elastic-entrypoint.sh ├── filebeat-entrypoint.sh ├── kibana-entrypoint.sh ├── logstash-entrypoint.sh ├── metricbeat-entrypoint.sh └── nginx-entrypoint.sh ├── hooks └── post_push ├── images ├── configure-setting.png ├── define-index-pattern.png └── selected-fields.png └── robot-tests ├── Resources ├── Keywords.robot └── Variables.robot └── example_test_results.robot /.dockerignore: -------------------------------------------------------------------------------- 1 | # Ignore .git folder 2 | .git* 3 | .gitignore 4 | .gitmodules 5 | 6 | build 7 | CHANGELOG.md 8 | LICENSE 9 | Makefile 10 | README.md 11 | README.md.bu 12 | VERSION 13 | circle.yml 14 | docker-compose.yml 15 | docs 16 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | log.html 2 | output.xml 3 | report.html 4 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.7 2 | 3 | LABEL maintainer "https://github.com/blacktop" 4 | 5 | RUN apk add --no-cache openjdk8-jre tini su-exec 6 | 7 | ENV STACK 6.2.3 8 | 9 | RUN apk add --no-cache libzmq bash nodejs nginx apache2-utils openssl 10 | RUN mkdir -p /usr/local/lib \ 11 | && ln -s /usr/lib/*/libzmq.so.3 /usr/local/lib/libzmq.so 12 | RUN apk add --no-cache -t .build-deps wget ca-certificates \ 13 | && set -x \ 14 | && cd /tmp \ 15 | && echo "Download Elastic Stack ======================================================" \ 16 | && echo "Download Elasticsearch..." \ 17 | && wget --progress=bar:force -O elasticsearch-$STACK.tar.gz https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-$STACK.tar.gz \ 18 | && tar -xzf elasticsearch-$STACK.tar.gz \ 19 | && mv elasticsearch-$STACK /usr/share/elasticsearch \ 20 | && echo "Download Logstash..." \ 21 | && wget --progress=bar:force -O logstash-$STACK.tar.gz \ 22 | https://artifacts.elastic.co/downloads/logstash/logstash-$STACK.tar.gz \ 23 | && tar -xzf logstash-$STACK.tar.gz \ 24 | && mv logstash-$STACK /usr/share/logstash \ 25 | && echo "Download Kibana..." \ 26 | && wget --progress=bar:force -O kibana-$STACK.tar.gz https://artifacts.elastic.co/downloads/kibana/kibana-$STACK-linux-x86_64.tar.gz \ 27 | && tar -xzf kibana-$STACK.tar.gz \ 28 | && mv kibana-$STACK-linux-x86_64 /usr/share/kibana \ 29 | && echo "Configure [Elasticsearch] ===================================================" \ 30 | && for path in \ 31 | /usr/share/elasticsearch/data \ 32 | /usr/share/elasticsearch/logs \ 33 | /usr/share/elasticsearch/config \ 34 | /usr/share/elasticsearch/config/scripts \ 35 | /usr/share/elasticsearch/plugins \ 36 | /usr/share/elasticsearch/tmp \ 37 | ; do \ 38 | mkdir -p "$path"; \ 39 | done \ 40 | && echo "Configure [Logstash] ========================================================" \ 41 | && if [ -f "$LS_SETTINGS_DIR/logstash.yml" ]; then \ 42 | sed -ri 's!^(path.log|path.config):!#&!g' "$LS_SETTINGS_DIR/logstash.yml"; \ 43 | fi \ 44 | && echo "Configure [Kibana] ==========================================================" \ 45 | # the default "server.host" is "localhost" in 5+ 46 | && sed -ri "s!^(\#\s*)?(server\.host:).*!\2 '0.0.0.0'!" /usr/share/kibana/config/kibana.yml \ 47 | && grep -q "^server\.host: '0.0.0.0'\$" /usr/share/kibana/config/kibana.yml \ 48 | # usr alpine nodejs and not bundled version 49 | && bundled='NODE="${DIR}/node/bin/node"' \ 50 | && apline_node='NODE="/usr/bin/node"' \ 51 | && sed -i "s|$bundled|$apline_node|g" /usr/share/kibana/bin/kibana-plugin \ 52 | && sed -i "s|$bundled|$apline_node|g" /usr/share/kibana/bin/kibana \ 53 | && rm -rf /usr/share/kibana/node \ 54 | && echo "Make Nginx SSL directory..." \ 55 | && mkdir -p /etc/nginx/ssl \ 56 | && rm /etc/nginx/conf.d/default.conf \ 57 | && echo "Create elstack user..." \ 58 | && adduser -DH -s /sbin/nologin elstack \ 59 | && chown -R elstack:elstack /usr/share/elasticsearch \ 60 | && chown -R elstack:elstack /usr/share/logstash \ 61 | && chown -R elstack:elstack /usr/share/kibana \ 62 | && echo "Clean Up..." \ 63 | && rm -rf /tmp/* \ 64 | && apk del --purge .build-deps 65 | 66 | RUN apk add --no-cache supervisor libc6-compat 67 | 68 | ENV PATH /usr/share/elasticsearch/bin:$PATH 69 | ENV PATH /usr/share/logstash/bin:$PATH 70 | ENV PATH /usr/share/kibana/bin:$PATH 71 | ENV JAVA_HOME /usr/lib/jvm/java-1.8-openjdk 72 | 73 | # Add custom elasticsearch config 74 | COPY config/elastic /usr/share/elasticsearch/config 75 | COPY config/elastic/logrotate /etc/logrotate.d/elasticsearch 76 | 77 | # Add custom logstash config 78 | COPY config/logstash/conf.d/ /etc/logstash/conf.d/ 79 | COPY config/logstash/patterns/ /opt/logstash/patterns/ 80 | COPY config/logstash/logstash.yml /etc/logstash/ 81 | 82 | # necessary for 5.0+ (overriden via "--path.settings", ignored by < 5.0) 83 | ENV LS_SETTINGS_DIR /etc/logstash 84 | 85 | # fixes mktemp issue in alpine 86 | ENV ES_TMPDIR /usr/share/elasticsearch/tmp 87 | 88 | # Add custom nginx config 89 | COPY config/nginx/nginx.conf /etc/nginx/nginx.conf 90 | COPY config/nginx/kibana.conf /etc/nginx/conf.d/ 91 | COPY config/nginx/ssl.kibana.conf /etc/nginx/conf.d/ 92 | 93 | # Add custom supervisor config 94 | COPY config/supervisord/supervisord.conf /etc/supervisor/ 95 | 96 | # Add entrypoints 97 | COPY entrypoints/elastic-entrypoint.sh / 98 | COPY entrypoints/logstash-entrypoint.sh / 99 | COPY entrypoints/kibana-entrypoint.sh / 100 | COPY entrypoints/nginx-entrypoint.sh / 101 | 102 | # Install logstash-input-http plugin 103 | RUN logstash-plugin install logstash-input-http 104 | 105 | VOLUME ["/usr/share/elasticsearch/data"] 106 | VOLUME ["/etc/logstash/conf.d"] 107 | VOLUME ["/etc/nginx"] 108 | 109 | EXPOSE 80 443 5601 8060 9200 9300 110 | 111 | CMD ["/sbin/tini","--","/usr/bin/supervisord","-c", "/etc/supervisor/supervisord.conf"] 112 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Nils Balkow-Tychsen 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /QA-Monitoring-demo-seed-data.postman_collection.json: -------------------------------------------------------------------------------- 1 | { 2 | "info": { 3 | "_postman_id": "9c8fef50-751e-4dc1-8d2c-6fc3826eefab", 4 | "name": "QA-Monitoring-demo-seed-data", 5 | "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json" 6 | }, 7 | "item": [ 8 | { 9 | "name": "Report1", 10 | "request": { 11 | "method": "POST", 12 | "header": [ 13 | { 14 | "key": "Content-Type", 15 | "value": "application/json" 16 | } 17 | ], 18 | "body": { 19 | "mode": "raw", 20 | "raw": "{\n\t\"environment\" : \"Staging\", \n\t\"test_title\" : \"the-test-test\", \n\t\"result\" : \"PASS\", \n\t\"report\" : \"All test steps have passed.\"\n\t\n}" 21 | }, 22 | "url": { 23 | "raw": "http://localhost:8060/", 24 | "protocol": "http", 25 | "host": [ 26 | "localhost" 27 | ], 28 | "port": "8060", 29 | "path": [ 30 | "" 31 | ] 32 | }, 33 | "description": "." 34 | }, 35 | "response": [] 36 | }, 37 | { 38 | "name": "Report2", 39 | "request": { 40 | "method": "POST", 41 | "header": [ 42 | { 43 | "key": "Content-Type", 44 | "value": "application/json" 45 | } 46 | ], 47 | "body": { 48 | "mode": "raw", 49 | "raw": "{\n\t\"environment\" : \"Production\", \n\t\"test_title\" : \"the-test-test\", \n\t\"result\" : \"PASS\", \n\t\"report\" : \"All test steps have passed.\"\n\t\n}" 50 | }, 51 | "url": { 52 | "raw": "http://localhost:8060/", 53 | "protocol": "http", 54 | "host": [ 55 | "localhost" 56 | ], 57 | "port": "8060", 58 | "path": [ 59 | "" 60 | ] 61 | }, 62 | "description": "." 63 | }, 64 | "response": [] 65 | }, 66 | { 67 | "name": "Report3", 68 | "request": { 69 | "method": "POST", 70 | "header": [ 71 | { 72 | "key": "Content-Type", 73 | "value": "application/json" 74 | } 75 | ], 76 | "body": { 77 | "mode": "raw", 78 | "raw": "{\n\t\"environment\" : \"Staging\", \n\t\"test_title\" : \"the-test-test\", \n\t\"result\" : \"FAIL\", \n\t\"report\" : \"Test failed on step 23\\nCould not find UI object.\"\n\t\n}" 79 | }, 80 | "url": { 81 | "raw": "http://localhost:8060/", 82 | "protocol": "http", 83 | "host": [ 84 | "localhost" 85 | ], 86 | "port": "8060", 87 | "path": [ 88 | "" 89 | ] 90 | }, 91 | "description": "." 92 | }, 93 | "response": [] 94 | } 95 | ] 96 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Monitor your automation test results with logstash, elastic search and kibana 2 | 3 | ## Intro 4 | As QA you are always required to provide the best possible transparency for your test automation. To always know what was tested when and with what result will benefit your stakeholders as well as your team members. 5 | This demo project should show how the elastic stack can be used as a monitoring tool for automation test results. 6 | 7 | ## Requirements 8 | 9 | You need to have [docker](https://www.docker.com/) installed. 10 | 11 | To later send seed demo data as test results you can either use 12 | * [Postman](https://www.getpostman.com/), 13 | * [Newman](https://github.com/postmanlabs/newman#getting-started), 14 | * [cURL](https://curl.haxx.se/) 15 | * or [Robotframework](https://robotframework.org/) with [RequestsLibrary](https://github.com/bulkan/robotframework-requests) 16 | 17 | ## How to run it 18 | 19 | git clone this repository 20 | 21 | Build and run the docker container 22 | ``` 23 | docker build -t el-qa-mon . 24 | docker run -d --name qa-mon-elastic-stack -p 80:80 -p 8060:8060 -p 9200:9200 el-qa-mon 25 | ``` 26 | This docker container will bring up Logstash, Elastic Search and Kibana with a default configuration. I used [this docker image](https://hub.docker.com/r/blacktop/elastic-stack/) as a base and only added the [Logstash http-input-plugin](https://www.elastic.co/blog/introducing-logstash-input-http-plugin). 27 | 28 | ## Explore Kibana and Logstash 29 | 30 | In your browser go to http://localhost:80 to see the Kibana frontend. (Not much to see yet without data) 31 | 32 | In your browser go to http://localhost:8060 to check if the logstash http input plugin is listening (it takes 2 mins to come up after the docker container has been started) 33 | Once it's running it'll show "ok". 34 | 35 | ## Send seed data - demo test reports 36 | * Either open with Postman the collection QA-Monitoring-demo-seed-data.postman_collection.json and execute it. 37 | * or use Newman 38 | ``` 39 | newman run QA-Monitoring-demo-seed-data.postman_collection.json 40 | ``` 41 | * or send the following cURLs 42 | ``` 43 | curl -X POST \ 44 | http://localhost:8060/ \ 45 | -H 'Content-Type: application/json' \ 46 | -d '{ 47 | "environment" : "Staging", 48 | "test_title" : "the-test-test", 49 | "result" : "PASS", 50 | "report" : "All test steps have passed." 51 | 52 | }' 53 | curl -X POST \ 54 | http://localhost:8060/ \ 55 | -H 'Content-Type: application/json' \ 56 | -d '{ 57 | "environment" : "Production", 58 | "test_title" : "the-test-test", 59 | "result" : "PASS", 60 | "report" : "All test steps have passed." 61 | 62 | }' 63 | curl -X POST \ 64 | http://localhost:8060/ \ 65 | -H 'Content-Type: application/json' \ 66 | -d '{ 67 | "environment" : "Staging", 68 | "test_title" : "the-test-test", 69 | "result" : "FAIL", 70 | "report" : "Test failed on step 23\nCould not find UI object." 71 | 72 | }' 73 | ``` 74 | * or use these robot framework tests to populate some example test results 75 | `robot robot-tests` 76 | 77 | ## Find data in Kibana 78 | 79 | In your browser go to http://localhost:80 to see the Kibana frontend. 80 | 81 | 1. Create an index pattern "*" 82 | ![index pattern](/images/define-index-pattern.png) 83 | 84 | 2. Index creation step 2 - Time filter = @timestamp 85 | ![index pattern](/images/configure-setting.png) 86 | 87 | 3. Go to discover and select the fields you are interested in to have a less convoluted view. 88 | ![index pattern](/images/selected-fields.png) 89 | 90 | 4. Build some nice visualisations in the Visualize section. 91 | I highly recommend using a [tutorial](https://www.digitalocean.com/community/tutorials/how-to-use-kibana-dashboards-and-visualizations) to get into this. 92 | -------------------------------------------------------------------------------- /config/elastic/elasticsearch.yml: -------------------------------------------------------------------------------- 1 | http.host: 0.0.0.0 2 | 3 | # Uncomment the following lines for a production cluster deployment 4 | #transport.host: 0.0.0.0 5 | #discovery.zen.minimum_master_nodes: 1 6 | -------------------------------------------------------------------------------- /config/elastic/log4j2.properties: -------------------------------------------------------------------------------- 1 | status = error 2 | 3 | appender.console.type = Console 4 | appender.console.name = console 5 | appender.console.layout.type = PatternLayout 6 | appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n 7 | 8 | rootLogger.level = info 9 | rootLogger.appenderRef.console.ref = console 10 | -------------------------------------------------------------------------------- /config/elastic/logrotate: -------------------------------------------------------------------------------- 1 | /var/log/elasticsearch/*.log { 2 | daily 3 | rotate 50 4 | size 50M 5 | copytruncate 6 | compress 7 | delaycompress 8 | missingok 9 | notifempty 10 | create 644 elasticsearch elasticsearch 11 | } 12 | -------------------------------------------------------------------------------- /config/filebeat/filebeat.yml: -------------------------------------------------------------------------------- 1 | filebeat: 2 | prospectors: 3 | - 4 | paths: 5 | - /var/log/auth.log 6 | - /var/log/nginx/access.log 7 | - /var/log/syslog 8 | - /var/log/*.log 9 | 10 | input_type: log 11 | 12 | document_type: syslog 13 | 14 | registry_file: /var/lib/filebeat/registry 15 | 16 | output: 17 | logstash: 18 | hosts: ["localhost:5044"] 19 | bulk_max_size: 1024 20 | 21 | shipper: 22 | 23 | logging: 24 | files: 25 | rotateeverybytes: 10485760 # = 10MB 26 | -------------------------------------------------------------------------------- /config/golang/no-pic.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go 2 | index 14f4fa9..5599307 100644 3 | --- a/src/cmd/link/internal/ld/lib.go 4 | +++ b/src/cmd/link/internal/ld/lib.go 5 | @@ -1272,6 +1272,11 @@ func hostlink() { 6 | argv = append(argv, peimporteddlls()...) 7 | } 8 | 9 | + // The Go linker does not currently support building PIE 10 | + // executables when using the external linker. See: 11 | + // https://github.com/golang/go/issues/6940 12 | + argv = append(argv, "-fno-PIC") 13 | + 14 | if Debug['v'] != 0 { 15 | fmt.Fprintf(Bso, "host link:") 16 | for _, v := range argv { 17 | -------------------------------------------------------------------------------- /config/logstash/bro-ids/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 505Forensics 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so, 10 | subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /config/logstash/bro-ids/README.md: -------------------------------------------------------------------------------- 1 | logstash-dfir 2 | ============= 3 | 4 | Logstash configuration files for analyzing various types of logs. These configuration files are provided to analyze various types of log files using logstash, elasticsearch, and kibana. 5 | 6 | Whether you are running a full-blown setup of ElasticSearch, Kibana, and log shippers, or a single instance for rapid analysis, these configuration files will help you quickly parse various log files found on system images. 7 | 8 | Logstash 9 | ============= 10 | [Logstash Website](http://www.logstash.net) 11 | 12 | Other Resources 13 | ============= 14 | [sysforensics GitHub](https://github.com/sysforensics/LogstashConfigs) 15 | 16 | Related Posts 17 | ============= 18 | [I'll take some Elasticsearch/Kibana with my Plaso (Windows edition)](http://blog.kiddaland.net/2014/06/ill-take-some-elasticsearchkibana-with.html) 19 | 20 | [Finding the Needle in the Haystack with ELK](https://digital-forensics.sans.org/summit-archives/dfirprague14/Finding_the_Needle_in_the_Haystack_with_FLK_Christophe_Vandeplas.pdf) 21 | 22 | [Rapid Log Analysis](http://www.505forensics.com/rapid-log-analysis/) 23 | 24 | [Do you even Bro, bro?](http://www.505forensics.com/do-you-even-bro-bro/) 25 | 26 | [Utilizing Dictionaries with Logstash](http://www.505forensics.com/utilizing-dictionaries-with-logstash/) 27 | 28 | Changelog 29 | ============= 30 | 07 Jan 2015 - Uploaded logstash dictionaries for HTTP, FTP, and Bro IDS conn log status codes 31 | 32 | 04 Sep 2014 - Uploaded Bro IDS logs; thanks to team at http://www.appliednsm.com for laying the groundwork 33 | 34 | 02 Mar 2014 - Added log2timeline logstash config 35 | 36 | 01 Mar 2014 - Added apache-combined logstash config 37 | 38 | 22 Feb 2014 - Repository created; uploaded apache-common logstash config. 39 | -------------------------------------------------------------------------------- /config/logstash/bro-ids/conf_files/bro/bro-conn_log.conf: -------------------------------------------------------------------------------- 1 | ######################## 2 | # logstash Configuration Files - Bro IDS Logs 3 | # Created by 505Forensics (http://www.505forensics.com) 4 | # MIT License, so do what you want with it! 5 | # 6 | # For use with logstash, elasticsearch, and kibana to analyze logs 7 | # 8 | # Usage: Reference this config file for your instance of logstash to parse Bro conn logs 9 | # 10 | # Limitations: Standard Bro log delimiter is tab. 11 | # 12 | # Dependencies: Utilizing the logstash 'translate' filter requires having the logstash contrib plugins added, which are community supported and not part of the official release. Visit logstash.net to find out how to install these 13 | # 14 | ####################### 15 | 16 | input { 17 | file { 18 | type => "bro-conn_log" 19 | start_position => "beginning" 20 | sincedb_path => "/dev/null" 21 | 22 | #Edit the following path to reflect the location of your log files. You can also change the extension if you use something else 23 | path => "/path/to/your/bro/logs/conn*.log" 24 | } 25 | } 26 | 27 | filter { 28 | 29 | #Let's get rid of those header lines; they begin with a hash 30 | if [message] =~ /^#/ { 31 | drop { } 32 | } 33 | 34 | #Now, using the csv filter, we can define the Bro log fields 35 | if [type] == "bro-conn_log" { 36 | csv { 37 | columns => ["ts","uid","id.orig_h","id.orig_p","id.resp_h","id.resp_p","proto","service","duration","orig_bytes","resp_bytes","conn_state","local_orig","missed_bytes","history","orig_pkts","orig_ip_bytes","resp_pkts","resp_ip_bytes","tunnel_parents"] 38 | 39 | #If you use a custom delimiter, change the following value in between the quotes to your delimiter. Otherwise, insert a literal in between the two quotes on your logstash system 40 | separator => "" 41 | } 42 | 43 | #Let's convert our timestamp into the 'ts' field, so we can use Kibana features natively 44 | date { 45 | match => [ "ts", "UNIX" ] 46 | } 47 | 48 | #The following makes use of the translate filter (logstash contrib) to convert conn_state into human text. Saves having to look up values for packet introspection 49 | translate { 50 | field => "conn_state" 51 | 52 | destination => "conn_state_full" 53 | 54 | dictionary => [ 55 | "S0", "Connection attempt seen, no reply", 56 | "S1", "Connection established, not terminated", 57 | "S2", "Connection established and close attempt by originator seen (but no reply from responder)", 58 | "S3", "Connection established and close attempt by responder seen (but no reply from originator)", 59 | "SF", "Normal SYN/FIN completion", 60 | "REJ", "Connection attempt rejected", 61 | "RSTO", "Connection established, originator aborted (sent a RST)", 62 | "RSTR", "Established, responder aborted", 63 | "RSTOS0", "Originator sent a SYN followed by a RST, we never saw a SYN-ACK from the responder", 64 | "RSTRH", "Responder sent a SYN ACK followed by a RST, we never saw a SYN from the (purported) originator", 65 | "SH", "Originator sent a SYN followed by a FIN, we never saw a SYN ACK from the responder (hence the connection was 'half' open)", 66 | "SHR", "Responder sent a SYN ACK followed by a FIN, we never saw a SYN from the originator", 67 | "OTH", "No SYN seen, just midstream traffic (a 'partial connection' that was not later closed)" 68 | ] 69 | } 70 | 71 | mutate { 72 | convert => [ "id.orig_p", "integer" ] 73 | convert => [ "id.resp_p", "integer" ] 74 | convert => [ "orig_bytes", "integer" ] 75 | convert => [ "resp_bytes", "integer" ] 76 | convert => [ "missed_bytes", "integer" ] 77 | convert => [ "orig_pkts", "integer" ] 78 | convert => [ "orig_ip_bytes", "integer" ] 79 | convert => [ "resp_pkts", "integer" ] 80 | convert => [ "resp_ip_bytes", "integer" ] 81 | } 82 | } 83 | } 84 | 85 | output { 86 | elasticsearch { 87 | embedded => true 88 | } 89 | } 90 | 91 | -------------------------------------------------------------------------------- /config/logstash/bro-ids/conf_files/bro/bro-dhcp_log.conf: -------------------------------------------------------------------------------- 1 | ######################## 2 | # logstash Configuration Files - Bro IDS Logs 3 | # Created by 505Forensics (http://www.505forensics.com) 4 | # MIT License, so do what you want with it! 5 | # 6 | # For use with logstash, elasticsearch, and kibana to analyze logs 7 | # 8 | # Usage: Reference this config file for your instance of logstash to parse Bro dhcp logs 9 | # 10 | # Limitations: Standard bro log delimiter is tab. 11 | # 12 | ####################### 13 | 14 | input { 15 | file { 16 | type => "bro-dhcp_log" 17 | start_position => "beginning" 18 | sincedb_path => "/dev/null" 19 | 20 | #Edit the following path to reflect the location of your log files. You can also change the extension if you use something else 21 | path => "/path/to/your/bro/logs/dhcp*.log" 22 | } 23 | } 24 | 25 | filter { 26 | 27 | #Let's get rid of those header lines; they begin with a hash 28 | if [message] =~ /^#/ { 29 | drop { } 30 | } 31 | 32 | #Now, using the csv filter, we can define the Bro log fields 33 | if [type] == "bro-dhcp_log" { 34 | csv { 35 | columns => ["ts","uid","id.orig_h","id.orig_p","id.resp_h","id.resp_p","mac","assigned_ip","lease_time","trans_id"] 36 | 37 | #If you use a custom delimiter, change the following value in between the quotes to your delimiter. Otherwise, leave the next line alone. 38 | separator => "" 39 | } 40 | 41 | #Let's convert our timestamp into the 'ts' field, so we can use Kibana features natively 42 | date { 43 | match => [ "ts", "UNIX" ] 44 | } 45 | 46 | mutate { 47 | convert => [ "id.orig_p", "integer" ] 48 | convert => [ "id.resp_p", "integer" ] 49 | convert => [ "lease_time", "float" ] 50 | convert => [ "trans_id", "integer" ] 51 | } 52 | } 53 | } 54 | 55 | output { 56 | elasticsearch { 57 | embedded => true 58 | } 59 | } 60 | 61 | -------------------------------------------------------------------------------- /config/logstash/bro-ids/conf_files/bro/bro-dns_log.conf: -------------------------------------------------------------------------------- 1 | ######################## 2 | # logstash Configuration Files - Bro IDS Logs 3 | # Created by 505Forensics (http://www.505forensics.com) 4 | # MIT License, so do what you want with it! 5 | # 6 | # For use with logstash, elasticsearch, and kibana to analyze logs 7 | # 8 | # Usage: Reference this config file for your instance of logstash to parse Bro dns logs 9 | # 10 | # Limitations: Standard bro log delimiter is tab. 11 | # 12 | ####################### 13 | 14 | input { 15 | file { 16 | type => "bro-dns_log" 17 | start_position => "beginning" 18 | sincedb_path => "/dev/null" 19 | 20 | #Edit the following path to reflect the location of your log files. You can also change the extension if you use something else 21 | path => "/path/to/your/bro/logs/dns*.log" 22 | } 23 | } 24 | 25 | filter { 26 | 27 | #Let's get rid of those header lines; they begin with a hash 28 | if [message] =~ /^#/ { 29 | drop { } 30 | } 31 | 32 | #Now, using the csv filter, we can define the Bro log fields 33 | if [type] == "bro-dns_log" { 34 | csv { 35 | columns => ["ts","uid","id.orig_h","id.orig_p","id.resp_h","id.resp_p","proto","trans_id","query","qclass","qclass_name","qtype","qtype_name","rcode","rcode_name","AA","TC","RD","RA","Z","answers","TTLs","rejected"] 36 | 37 | #If you use a custom delimiter, change the following value in between the quotes to your delimiter. Otherwise, leave the next line alone. 38 | separator => "" 39 | } 40 | 41 | #Let's convert our timestamp into the 'ts' field, so we can use Kibana features natively 42 | date { 43 | match => [ "ts", "UNIX" ] 44 | } 45 | 46 | mutate { 47 | convert => [ "id.orig_p", "integer" ] 48 | convert => [ "id.resp_p", "integer" ] 49 | convert => [ "trans_id", "integer" ] 50 | convert => [ "qclass", "integer" ] 51 | convert => [ "qtype", "integer" ] 52 | convert => [ "rcode", "integer" ] 53 | } 54 | } 55 | } 56 | 57 | output { 58 | elasticsearch { 59 | embedded => true 60 | } 61 | } 62 | 63 | -------------------------------------------------------------------------------- /config/logstash/bro-ids/conf_files/bro/bro-files_log.conf: -------------------------------------------------------------------------------- 1 | ######################## 2 | # logstash Configuration Files - Bro IDS Logs 3 | # Created by 505Forensics (http://www.505forensics.com) 4 | # MIT License, so do what you want with it! 5 | # 6 | # For use with logstash, elasticsearch, and kibana to analyze logs 7 | # 8 | # Usage: Reference this config file for your instance of logstash to parse Bro files logs 9 | # 10 | # Limitations: Standard bro log delimiter is tab. 11 | # 12 | ####################### 13 | 14 | input { 15 | file { 16 | type => "bro-files_log" 17 | start_position => "beginning" 18 | sincedb_path => "/dev/null" 19 | 20 | #Edit the following path to reflect the location of your log files. You can also change the extension if you use something else 21 | path => "/path/to/your/bro/logs/files*.log" 22 | } 23 | } 24 | 25 | filter { 26 | 27 | #Let's get rid of those header lines; they begin with a hash 28 | if [message] =~ /^#/ { 29 | drop { } 30 | } 31 | 32 | #Now, using the csv filter, we can define the Bro log fields 33 | if [type] == "bro-files_log" { 34 | csv { 35 | columns => ["ts","fuid","tx_hosts","rx_hosts","conn_uids","source","depth","analyzers","mime_type","filename","duration","local_orig","is_orig","seen_bytes","total_bytes","missing_bytes","overflow_bytes","timedout","parent_fuid","md5","sha1","sha256","extracted"] 36 | 37 | #If you use a custom delimiter, change the following value in between the quotes to your delimiter. Otherwise, leave the next line alone. 38 | separator => "" 39 | } 40 | 41 | #Let's convert our timestamp into the 'ts' field, so we can use Kibana features natively 42 | date { 43 | match => [ "ts", "UNIX" ] 44 | } 45 | 46 | mutate { 47 | convert => [ "depth", "integer" ] 48 | convert => [ "seen_bytes", "integer" ] 49 | convert => [ "total_bytes", "integer" ] 50 | convert => [ "missing_bytes", "integer" ] 51 | convert => [ "overflow_bytes", "integer" ] 52 | 53 | } 54 | } 55 | } # appears to be missing oner here 56 | output { 57 | elasticsearch { 58 | embedded => true 59 | } 60 | } 61 | 62 | -------------------------------------------------------------------------------- /config/logstash/bro-ids/conf_files/bro/bro-http_log.conf: -------------------------------------------------------------------------------- 1 | ######################## 2 | # logstash Configuration Files - Bro IDS Logs 3 | # Created by 505Forensics (http://www.505forensics.com) 4 | # MIT License, so do what you want with it! 5 | # 6 | # For use with logstash, elasticsearch, and kibana to analyze logs 7 | # 8 | # Usage: Reference this config file for your instance of logstash to parse Bro http logs 9 | # 10 | # Limitations: Standard bro log delimiter is tab. 11 | # 12 | ####################### 13 | 14 | input { 15 | file { 16 | type => "bro-http_log" 17 | start_position => "beginning" 18 | sincedb_path => "/dev/null" 19 | 20 | #Edit the following path to reflect the location of your log files. You can also change the extension if you use something else 21 | path => "/path/to/your/bro/logs/http*.log" 22 | } 23 | } 24 | 25 | filter { 26 | 27 | #Let's get rid of those header lines; they begin with a hash 28 | if [message] =~ /^#/ { 29 | drop { } 30 | } 31 | 32 | #Now, using the csv filter, we can define the Bro log fields 33 | if [type] == "bro-http_log" { 34 | csv { 35 | columns => ["ts","uid","id.orig_h","id.orig_p","id.resp_h","id.resp_p","trans_depth","method","host","uri","referrer","user_agent","request_body_len","response_body_len","status_code","status_msg","info_code","info_msg","filename","tags","username","password","proxied","orig_fuids","orig_mime_types","resp_fuids","resp_mime_types"] 36 | 37 | #If you use a custom delimiter, change the following value in between the quotes to your delimiter. Otherwise, leave the next line alone. 38 | separator => "" 39 | } 40 | 41 | #Let's convert our timestamp into the 'ts' field, so we can use Kibana features natively 42 | date { 43 | match => [ "ts", "UNIX" ] 44 | } 45 | 46 | mutate { 47 | convert => [ "id.orig_p", "integer" ] 48 | convert => [ "id.resp_p", "integer" ] 49 | convert => [ "trans_depth", "integer" ] 50 | convert => [ "request_body_len", "integer" ] 51 | convert => [ "response_body_len", "integer" ] 52 | convert => [ "status_code", "integer" ] 53 | convert => [ "info_code", "integer" ] 54 | } 55 | } 56 | } 57 | 58 | output { 59 | elasticsearch { 60 | embedded => true 61 | } 62 | } 63 | 64 | -------------------------------------------------------------------------------- /config/logstash/bro-ids/conf_files/bro/bro-notice_log.conf: -------------------------------------------------------------------------------- 1 | ######################## 2 | # logstash Configuration Files - Bro IDS Logs 3 | # Created by 505Forensics (http://www.505forensics.com) 4 | # MIT License, so do what you want with it! 5 | # 6 | # For use with logstash, elasticsearch, and kibana to analyze logs 7 | # 8 | # Usage: Reference this config file for your instance of logstash to parse Bro notice logs 9 | # 10 | # Limitations: Standard bro log delimiter is tab. 11 | # 12 | ####################### 13 | 14 | input { 15 | file { 16 | type => "bro-notice_log" 17 | start_position => "beginning" 18 | sincedb_path => "/dev/null" 19 | 20 | #Edit the following path to reflect the location of your log files. You can also change the extension if you use something else 21 | path => "/path/to/your/bro/logs/notice*.log" 22 | } 23 | } 24 | 25 | filter { 26 | 27 | #Let's get rid of those header lines; they begin with a hash 28 | if [message] =~ /^#/ { 29 | drop { } 30 | } 31 | 32 | #Now, using the csv filter, we can define the Bro log fields 33 | if [type] == "bro-notice_log" { 34 | csv { 35 | columns => ["ts","uid","id.orig_h","id.orig_p","id.resp_h","id.resp_p","fuid","file_mime_type","file_desc","proto","note","msg","sub","src","dst","p","n","peer_descr","actions","suppress_for","dropped","remote_location.country_code","remote_location.region","remote_location.city","remote_location.latitude","remote_location.longitude"] 36 | 37 | #If you use a custom delimiter, change the following value in between the quotes to your delimiter. Otherwise, leave the next line alone. 38 | separator => "" 39 | } 40 | 41 | #Let's convert our timestamp into the 'ts' field, so we can use Kibana features natively 42 | date { 43 | match => [ "ts", "UNIX" ] 44 | } 45 | 46 | mutate { 47 | convert => [ "id.orig_p", "integer" ] 48 | convert => [ "id.resp_p", "integer" ] 49 | convert => [ "p", "integer" ] 50 | convert => [ "n", "integer" ] 51 | convert => [ "suppress_for", "float" ] 52 | } 53 | } 54 | } 55 | 56 | output { 57 | elasticsearch { 58 | embedded => true 59 | } 60 | } 61 | 62 | -------------------------------------------------------------------------------- /config/logstash/bro-ids/conf_files/bro/bro-weird_log.conf: -------------------------------------------------------------------------------- 1 | ######################## 2 | # logstash Configuration Files - Bro IDS Logs 3 | # Created by 505Forensics (http://www.505forensics.com) 4 | # MIT License, so do what you want with it! 5 | # 6 | # For use with logstash, elasticsearch, and kibana to analyze logs 7 | # 8 | # Usage: Reference this config file for your instance of logstash to parse Bro weird logs 9 | # 10 | # Limitations: Standard bro log delimiter is tab. 11 | # 12 | ####################### 13 | 14 | input { 15 | file { 16 | type => "bro-weird_log" 17 | start_position => "beginning" 18 | sincedb_path => "/dev/null" 19 | 20 | #Edit the following path to reflect the location of your log files. You can also change the extension if you use something else 21 | path => "/path/to/your/bro/logs/weird*.log" 22 | } 23 | } 24 | 25 | filter { 26 | 27 | #Let's get rid of those header lines; they begin with a hash 28 | if [message] =~ /^#/ { 29 | drop { } 30 | } 31 | 32 | #Now, using the csv filter, we can define the Bro log fields 33 | if [type] == "bro-weird_log" { 34 | csv { 35 | columns => ["ts","uid","id.orig_h","id.orig_p","id.resp_h","id.resp_p","name","addl","notice","peer"] 36 | 37 | #If you use a custom delimiter, change the following value in between the quotes to your delimiter. Otherwise, leave the next line alone. 38 | separator => "" 39 | } 40 | 41 | #Let's convert our timestamp into the 'ts' field, so we can use Kibana features natively 42 | date { 43 | match => [ "ts", "UNIX" ] 44 | } 45 | 46 | mutate { 47 | convert => [ "id.orig_p", "integer" ] 48 | convert => [ "id.resp_p", "integer" ] 49 | } 50 | } 51 | } 52 | 53 | output { 54 | elasticsearch { 55 | embedded => true 56 | } 57 | } 58 | 59 | -------------------------------------------------------------------------------- /config/logstash/bro-ids/conf_files/log2timeline/logstash-log2timeline.conf: -------------------------------------------------------------------------------- 1 | ######################## 2 | # logstash Configuration Files - Log2timeline Output Files 3 | # Created by 505Forensics (http://www.505forensics.com) 4 | # MIT License, so do what you want with it! 5 | # 6 | # For use with logstash, elasticsearch, and kibana to analyze logs 7 | # 8 | # Usage: Reference this config file for your instance of logstash to parse already-created log2timeline supertimelines 9 | # 10 | # Limitations: This file will parse raw text, and there must be a delimiter provided if not the default comma 11 | # 12 | ####################### 13 | 14 | input { 15 | file { 16 | type => "log2timeline-perl" 17 | start_position => "beginning" 18 | sincedb_path => "/dev/null" 19 | 20 | #Edit the following path to reflect the location of your timeline files. You can also change the extension if you use something else 21 | path => "/path/to/your/timelines/*.csv" 22 | } 23 | } 24 | 25 | filter { 26 | if [type] == "log2timeline-perl" { 27 | csv { 28 | columns => ["date","time","timezone","MACB","source","sourcetype","type","user","host","short","desc","version","filename","inode","notes","format","extra"] 29 | 30 | #If you use a custom delimiter, change the following value in between the quotes to your delimiter. Otherwise, leave the next line alone. 31 | separator => "," 32 | } 33 | 34 | mutate { 35 | replace => [ "date" , "%{date} %{time}" ] 36 | } 37 | 38 | if [timezone] == "timezone" { 39 | drop { } 40 | } 41 | } 42 | } 43 | 44 | output { 45 | elasticsearch { 46 | embedded => true 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /config/logstash/bro-ids/conf_files/web_logs/logstash-apache-combined.conf: -------------------------------------------------------------------------------- 1 | ######################## 2 | # logstash Configuration Files - Combined Apache Files 3 | # Created by 505Forensics (http://www.505forensics.com) 4 | # MIT License, so do what you want with it! 5 | # 6 | # For use with logstash, elasticsearch, and kibana to analyze logs 7 | # 8 | # Usage: Reference this config file for your instance of logstash to parse combined Apache log files 9 | # 10 | # Limitations: This file will parse raw text, not .gz log files. For .gz files, utilize a 'tcp' input, and zcat the files to netcat 11 | # 12 | ####################### 13 | 14 | input { 15 | 16 | # Sitting file input. Comment out or delete to remove this feature 17 | file { 18 | type => "apache-combined" 19 | start_position => "beginning" 20 | sincedb_path => "/dev/null" 21 | 22 | #Edit the following line to reflect the location of your .log files 23 | path => "/path/to/*.log_files" 24 | } 25 | 26 | # Receive files via local tcp port, either via netcat or other transfer methods. Comment out or delete to remove this feature 27 | tcp { 28 | type => "apache-common" 29 | 30 | #Edit the following line to reflect your port of choice. Note that you should ignore ports 9200-9300 for ElasticSearch 31 | port => 54321 32 | } 33 | } 34 | 35 | filter { 36 | if [type] == "apache-combined" { 37 | grok { 38 | match => { "message" => "%{COMBINEDAPACHELOG}" } 39 | } 40 | 41 | date { 42 | match => [ "timestamp", "dd/MMM/yyyy:HH:mm:ss Z" ] 43 | } 44 | } 45 | } 46 | 47 | output { 48 | elasticsearch { 49 | embedded => true 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /config/logstash/bro-ids/conf_files/web_logs/logstash-apache-common.conf: -------------------------------------------------------------------------------- 1 | ######################## 2 | # logstash Configuration Files - Common Apache Files 3 | # Created by 505Forensics (http://www.505forensics.com) 4 | # MIT License, so do what you want with it! 5 | # 6 | # For use with logstash, elasticsearch, and kibana to analyze logs 7 | # 8 | # Usage: Reference this config file for your instance of logstash to parse common Apache log files 9 | # 10 | # Limitations: This file will parse raw text, not .gz log files. For .gz files, utilize a 'tcp' input, and zcat the files to netcat 11 | # 12 | # Thanks to @hiddenillusion for his help with the 'file' input, which allows parsing of sitting log files 13 | ####################### 14 | 15 | input { 16 | 17 | # Sitting file input. Comment out or delete to remove this feature 18 | file { 19 | type => "apache-common" 20 | start_position => "beginning" 21 | sincedb_path => "/dev/null" 22 | 23 | #Edit the following line to reflect the location of your .log files 24 | path => "/path/to/*.log_files" 25 | } 26 | 27 | # Receive files via local tcp port, either via netcat or other transfer methods. Comment out or delete to remove this feature 28 | tcp { 29 | type => "apache-common" 30 | 31 | #Edit the following line to reflect your port of choice. Note that you should ignore ports 9200-9300 for ElasticSearch 32 | port => 54321 33 | } 34 | } 35 | 36 | filter { 37 | if [type] == "apache-common" { 38 | grok { 39 | match => { "message" => "%{COMMONAPACHELOG}" } 40 | } 41 | 42 | date { 43 | match => [ "timestamp", "dd/MMM/yyyy:HH:mm:ss Z" ] 44 | } 45 | } 46 | } 47 | 48 | output { 49 | elasticsearch { 50 | embedded => true 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /config/logstash/bro-ids/conf_files/web_logs/logstash-iis6.conf: -------------------------------------------------------------------------------- 1 | ######################## 2 | # logstash Configuration Files - IIS6 Extended Log Files (Internet and Intranet) 3 | # Created by 505Forensics (http://www.505forensics.com) 4 | # MIT License, so do what you want with it! 5 | # 6 | # For use with logstash, elasticsearch, and kibana to analyze logs 7 | # 8 | # Usage: Reference this config file for your instance of logstash to parse IIS6 web log files. Note that IIS6 has different log formats based on intranet or internet; comment or remove what you don't need below 9 | # 10 | # Limitations: This file will parse raw text, and there must be a delimiter provided if not the default space 11 | # 12 | ####################### 13 | 14 | input { 15 | file { 16 | type => "iis6-intranet" 17 | start_position => "beginning" 18 | sincedb_path => "/dev/null" 19 | 20 | path => "/path/to/iis6-intranet-logs/*.log" 21 | } 22 | 23 | file { 24 | type => "iis6-internet" 25 | start_position => "beginning" 26 | sincedb_path => "/dev/null" 27 | 28 | path => "/path/to/iis6-internet-logs/*.log" 29 | } 30 | } 31 | 32 | filter { 33 | if ([message] =~ /^#/) { 34 | drop { } 35 | } 36 | 37 | if [type] == "iis6-intranet" { 38 | grok { 39 | match => [ "message", "%{TIMESTAMP_ISO8601:timestamp} %{IP:client_IP} %{WORD:username} %{IP:source_IP} %{NUMBER:port} %{WORD:method} %{URI:URI_Stem} %{GREEDYDATA:URL_Query} %{NUMBER:Status} %{GREEDYDATA.User-Agent}" ] 40 | } 41 | } 42 | 43 | if [type] == "iis6-internet" { 44 | grok { 45 | match => [ "message", "%{TIMESTAMP_ISO8601:timestamp} %{IP:client_IP} %{WORD:username} %{IP:source_IP} %{NUMBER:port} %{WORD:method} %{URI:URI_Stem} %{GREEDYDATA:URI_Query} %{NUMBER:Status} %{NUMBER:Server_Bytes} %{NUMBER:Client_Bytes} %{NUMBER:Time_Taken} %{GREEDYDATA.User-Agent} %{GREEDYDATA:Referrer" ] 46 | } 47 | } 48 | } 49 | 50 | output { 51 | elasticsearch { 52 | embedded => true 53 | } 54 | } -------------------------------------------------------------------------------- /config/logstash/bro-ids/conf_files/web_logs/logstash-iis7.conf: -------------------------------------------------------------------------------- 1 | ######################## 2 | # logstash Configuration Files - IIS 7 Log Files 3 | # Created by 505Forensics (http://www.505forensics.com) 4 | # MIT License, so do what you want with it! 5 | # 6 | # For use with logstash, elasticsearch, and kibana to analyze logs 7 | # 8 | # Usage: Reference this config file for your instance of logstash to parse IIS 7 web log files. 9 | # 10 | # Limitations: This file will parse raw text. 11 | # 12 | ####################### 13 | 14 | input { 15 | file { 16 | type => "iis7" 17 | start_position => "beginning" 18 | sincedb_path => "/dev/null" 19 | 20 | path => "/path/to/iis7-logs/*.log" 21 | } 22 | } 23 | 24 | filter { 25 | # The following lines remove any commented fields 26 | if ([message] =~ /^#/) { 27 | drop { } 28 | } 29 | 30 | if [type] == "iis7" { 31 | grok { 32 | match => [ "message", "%{TIMESTAMP_ISO8601:timestamp} %{IP:source_IP} %{WORD:method} %{URI:URI_Stem} %{GREEDYDATA:URI_Query} %{NUMBER:port} %{WORD:username} %{IP:client_IP} %{GREEDYDATA.User-Agent} %{NUMBER:Status} % %{NUMBER:Sub-Status} %{NUMBER:Win32_Status} %{NUMBER:Time_Taken}" ] 33 | } 34 | } 35 | } 36 | 37 | output { 38 | elasticsearch { 39 | embedded => true 40 | } 41 | } -------------------------------------------------------------------------------- /config/logstash/bro-ids/conf_files/web_logs/logstash-iis8.conf: -------------------------------------------------------------------------------- 1 | ######################## 2 | # logstash Configuration Files - IIS 8 Log Files 3 | # Created by 505Forensics (http://www.505forensics.com) 4 | # MIT License, so do what you want with it! 5 | # 6 | # For use with logstash, elasticsearch, and kibana to analyze logs 7 | # 8 | # Usage: Reference this config file for your instance of logstash to parse IIS 8 web log files. 9 | # 10 | # Limitations: This file will parse raw text. 11 | # 12 | ####################### 13 | 14 | input { 15 | file { 16 | type => "iis8" 17 | start_position => "beginning" 18 | sincedb_path => "/dev/null" 19 | 20 | path => "/path/to/iis8-logs/*.log" 21 | } 22 | } 23 | 24 | filter { 25 | # The following lines remove any commented fields 26 | if ([message] =~ /^#/) { 27 | drop { } 28 | } 29 | 30 | if [type] == "iis8" { 31 | grok { 32 | match => [ "message", "%{TIMESTAMP_ISO8601:timestamp} %{WORD:Server_SiteName} %{WORD:ComputerName} %{IP:Server_IP} %{WORD:method} %{URI:URI_Stem} %{GREEDYDATA:URI_Query} %{NUMBER:port} %{WORD:username} %{IP:Client_IP} %{WORD:Version} %{GREEDYDATA.User-Agent} %{GREEDYDATA:Cookie} %{WORD:Referer} %{WORD:Host} %{NUMBER:Status} % %{NUMBER:Sub-Status} %{NUMBER:Win32_Status} %{NUMBER:Bytes_Received} %{NUMBER:Bytes_Sent} %{NUMBER:Time_Taken}" ] 33 | } 34 | } 35 | } 36 | 37 | output { 38 | elasticsearch { 39 | embedded => true 40 | } 41 | } -------------------------------------------------------------------------------- /config/logstash/bro-ids/dictionaries/logstash-bro-conn-log.dict: -------------------------------------------------------------------------------- 1 | ######################## 2 | # logstash Lookup Dictionaries - Bro IDS Conn Log Status Codes 3 | # Created by 505Forensics (http://www.505forensics.com) 4 | # MIT License, so do what you want with it! 5 | # 6 | # For use with logstash to translate Bro IDS conn log codes into text for analysis 7 | # 8 | # Usage: Insert this dictionary into your logstash configuration file; make sure to insert source and destination fields in the correct places 9 | # 10 | # Dependencies: Utilizing the logstash 'translate' filter requires having the logstash contrib plugins added, which are community supported and not part of the official release. Visit logstash.net to find out how to install these. 11 | # 12 | ####################### 13 | 14 | translate { 15 | #Insert the source field below between the quotes 16 | field => "" 17 | 18 | #Insert the destination field below between the quotes. This is a new field, so it can be any name you want 19 | destination => "" 20 | 21 | dictionary => [ 22 | "S0", "Connection attempt seen, no reply", 23 | "S1", "Connection established, not terminated", 24 | "S2", "Connection established and close attempt by originator seen (but no reply from responder)", 25 | "S3", "Connection established and close attempt by responder seen (but no reply from originator)", 26 | "SF", "Normal SYN/FIN completion", 27 | "REJ", "Connection attempt rejected", 28 | "RSTO", "Connection established, originator aborted (sent a RST)", 29 | "RSTR", "Established, responder aborted", 30 | "RSTOS0", "Originator sent a SYN followed by a RST, we never saw a SYN-ACK from the responder", 31 | "RSTRH", "Responder sent a SYN ACK followed by a RST, we never saw a SYN from the (purported) originator", 32 | "SH", "Originator sent a SYN followed by a FIN, we never saw a SYN ACK from the responder (hence the connection was 'half' open)", 33 | "SHR", "Responder sent a SYN ACK followed by a FIN, we never saw a SYN from the originator", 34 | "OTH", "No SYN seen, just midstream traffic (a 'partial connection' that was not later closed)" 35 | ] 36 | } -------------------------------------------------------------------------------- /config/logstash/bro-ids/dictionaries/logstash-ftp-status-codes.dict: -------------------------------------------------------------------------------- 1 | ######################## 2 | # logstash Lookup Dictionaries - FTP Status Codes 3 | # Created by 505Forensics (http://www.505forensics.com) 4 | # MIT License, so do what you want with it! 5 | # 6 | # For use with logstash to translate FTP status codes into text for analysis 7 | # 8 | # Usage: Insert this dictionary into your logstash configuration file; make sure to insert source and destination fields in the correct places 9 | # 10 | # Dependencies: Utilizing the logstash 'translate' filter requires having the logstash contrib plugins added, which are community supported and not part of the official release. Visit logstash.net to find out how to install these. 11 | # 12 | # Also note that the dictionary entries below are integers (numbers are not encapsulated with quotes). Therefore, the type must be converted to integer within Logstash/Elasticsearch. 13 | # 14 | ####################### 15 | 16 | translate { 17 | #Insert the source field below between the quotes 18 | field => "" 19 | 20 | #Insert the destination field below between the quotes. This is a new field, so it can be any name you want 21 | destination => "" 22 | 23 | dictionary => [ 24 | #1xx Codes are Preliminary Positive Replies 25 | 110, "Restart marker replay", 26 | 120, "Service ready in n minutes", 27 | 125, "Data connection is already open", 28 | 150, "File status is OK; opening data connection" 29 | 30 | #2xx Codes are Positive Completion Replies 31 | 202, "OK", 32 | 211, "System status or system help reply", 33 | 212, "Directory status", 34 | 213, "File status", 35 | 214, "Help message", 36 | 215, "NAME system type", 37 | 220, "Service is ready for new user", 38 | 221, "Service closing control connection", 39 | 225, "Data connection open; no transfer in progress", 40 | 226, "Closing data connection; request successful", 41 | 227, "Entering passive mode", 42 | 228, "Entering Long Passive Mode", 43 | 229, "Entering Extended Passive Mode", 44 | 230, "User is logged in", 45 | 231, "User is logged out", 46 | 232, "Logout will occur when transfer is complete", 47 | 250, "Requested file action OK, completed", 48 | 257, "Path created", 49 | 50 | #3xx Codes are Positive Intermediate Replies 51 | 331, "User name OK; need password", 52 | 332, "Login account required", 53 | 350, "Requested action pending further information", 54 | 55 | #4xx Codes are Transient Negative Completion Replies 56 | 421, "Service not available, closing connection", 57 | 425, "Can't open connection", 58 | 426, "Connection closed; transfer aborted", 59 | 430, "Invalid username or password", 60 | 434, "Requested host is unavailable", 61 | 450, "Requested file action not performed", 62 | 451, "Requested action aborted; local processing error", 63 | 452, "Requested action aborted; insufficient storage space", 64 | 65 | #5xx Codes are Permanent Negative Completion Replies 66 | 501, "Syntax error in parameters or arguments", 67 | 502, "Command not implemented", 68 | 503, "Bad sequence of commands", 69 | 504, "Command not implemented for that parameter", 70 | 530, "User not logged in", 71 | 532, "Need account for storing files", 72 | 550, "Requested action aborted; file unavailable", 73 | 551, "Requested action aborted; page type unknown", 74 | 552, "Requested action aborted; exceeded storage allocation", 75 | 553, "Requested action aborted; file name not allowed", 76 | 77 | #6xx Codes are Protected Replies. These are typically base64 encoded 78 | 631, "Integrity protected reply", 79 | 632, "Confidentiality and integrity protected reply", 80 | 633, "Confidentiality protected reply", 81 | 82 | #100xx Codes are Winsock Error Codes 83 | 10054, "Connection reset by peer; forcibly closed by remote host", 84 | 10060, "Cannot connect to remote server", 85 | 10061, "Cannot connect to remote server; connection is actively refused", 86 | 10066, "Directory not empty", 87 | 10068, "Too many users; server is full" 88 | ] 89 | } 90 | -------------------------------------------------------------------------------- /config/logstash/bro-ids/dictionaries/logstash-http-status-codes.dict: -------------------------------------------------------------------------------- 1 | ######################## 2 | # logstash Lookup Dictionaries - HTTP Status Codes 3 | # Created by 505Forensics (http://www.505forensics.com) 4 | # MIT License, so do what you want with it! 5 | # 6 | # For use with logstash to translate HTTP status codes into text for analysis 7 | # 8 | # Usage: Insert this dictionary into your logstash configuration file; make sure to insert source and destination fields in the correct places 9 | # 10 | # Dependencies: Utilizing the logstash 'translate' filter requires having the logstash contrib plugins added, which are community supported and not part of the official release. Visit logstash.net to find out how to install these. 11 | # 12 | # Also note that the dictionary entries below are integers (numbers are not encapsulated with quotes). Therefore, the type must be converted to integer within Logstash/Elasticsearch. 13 | # 14 | ####################### 15 | 16 | translate { 17 | #Insert the source field below between the quotes 18 | field => "" 19 | 20 | #Insert the destination field below between the quotes. This is a new field, so it can be any name you want 21 | destination => "" 22 | 23 | dictionary => [ 24 | #1xx Codes Are Informational 25 | 100, "Continue", 26 | 101, "Switching Protocols", 27 | 102, "Processing", 28 | 29 | #2xx Codes Are Successful; We Like These 30 | 200, "OK", 31 | 201, "Created", 32 | 202, "Accepted", 33 | 203, "Non-Authoritative Information", 34 | 204, "No Content", 35 | 205, "Reset Content", 36 | 206, "Partial Content", 37 | 207, "Multi-Status", 38 | 208, "Already Reported", 39 | 226, "Instance Manipulation Used", 40 | 41 | #3xx Codes Indicate that Further User Agent Action May be Needed 42 | 300, "Multiple Choices", 43 | 301, "Moved Permanently", 44 | 302, "Not Found", 45 | 303, "See Other", 46 | 304, "Not Modified", 47 | 305, "Use Proxy", 48 | #306 is no longer used, however is still reserved 49 | 307, "Temporary Redirect", 50 | 308, "Permanent Redirect", 51 | 52 | #4xx Codes Refer to Client Errors 53 | 400, "Bad Request", 54 | 401, "Unauthorized", 55 | 402, "Payment Required", 56 | 403, "Forbidden", 57 | 404, "Not Found", 58 | 405, "Method Not Allowed", 59 | 406, "Not Acceptable", 60 | 407, "Proxy Authentication Required", 61 | 408, "Request Timeout", 62 | 409, "Conflict", 63 | 410, "Gone", 64 | 411, "Length Required", 65 | 412, "Precondition Failed", 66 | 413, "Request Entity Too Large", 67 | 414, "Request URI Too Long", 68 | 415, "Unsupported Media Type", 69 | 416, "Requested Range Not Satisfiable, 70 | 417, "Expectation Failed", 71 | 422, "Unprocessable Entity", 72 | 423, "Locked", 73 | 424, "Failed Dependency", 74 | 426, "Upgrade Required", 75 | 428, "Precondition Required", 76 | 429, "Too Many Requests", 77 | 431, "Request Header Fields Too Large" 78 | 440, "Login Timeout", 79 | 444, "No Response (This is specific to Nginx)", 80 | 450, "Blocked my Microsoft Windows Parental Controls", 81 | 82 | #5xx Codes Refer to Server Errors 83 | 500, "Internal Server Error", 84 | 501, "Not Implemented", 85 | 502, "Bad Gateway", 86 | 503, "Service Unavailable", 87 | 504, "Gateway Timeout", 88 | 505, "HTTP Version Not Supported", 89 | 506, "Variant Also Negotiates", 90 | 507, "Insufficient Storage", 91 | 508, "Loop Detected", 92 | 510, "Not Extended", 93 | 511, "Network Authentication Required" 94 | ] 95 | } -------------------------------------------------------------------------------- /config/logstash/bro-ids/type_mappings/log2timeline.type: -------------------------------------------------------------------------------- 1 | { 2 | "log2timeline": { 3 | "_timestamp": { 4 | "enabled": "true", 5 | "path": "timestamp", 6 | "default": "null" 7 | }, 8 | "_source": { 9 | "enabled": true 10 | }, 11 | "properties": { 12 | "timestamp": { 13 | "type": "date", 14 | "format" : "dateOptionalTime" 15 | }, 16 | "timezone": { 17 | "type": "string" 18 | }, 19 | "MACB": { 20 | "type": "string" 21 | }, 22 | "source": { 23 | "type": "string", 24 | "index": "not_analyzed" 25 | }, 26 | "sourcetype": { 27 | "type": "string", 28 | "index": "not_analyzed" 29 | }, 30 | "type": { 31 | "type": "string" 32 | }, 33 | "user": { 34 | "type": "string", 35 | "index": "not_analyzed" 36 | }, 37 | "host": { 38 | "type": "string", 39 | "index": "not_analyzed" 40 | }, 41 | "short": { 42 | "type": "string", 43 | "index": "not_analyzed" 44 | }, 45 | "desc": { 46 | "type": "string", 47 | "index": "not_analyzed" 48 | }, 49 | "version": { 50 | "type": "string" 51 | }, 52 | "filename": { 53 | "type": "string", 54 | "index": "not_analyzed" 55 | }, 56 | "inode": { 57 | "type": "long" 58 | }, 59 | "notes": { 60 | "type": "string", 61 | "store" : "no" 62 | }, 63 | "format": { 64 | "type": "string" 65 | }, 66 | "extra": { 67 | "type": "string", 68 | "store" : "no" 69 | } 70 | } 71 | } 72 | } -------------------------------------------------------------------------------- /config/logstash/bro-ids/type_mappings/mhn-hpfeed.type: -------------------------------------------------------------------------------- 1 | { 2 | "mhnfeed": { 3 | "properties": { 4 | "channel": { 5 | "type": "string" 6 | }, 7 | "ident": { 8 | "type": "string" 9 | }, 10 | "last_error": { 11 | "type": "string" 12 | }, 13 | "last_error_timestamp": { 14 | "type": "date", 15 | "format": "dateOptionalTime" 16 | }, 17 | "payload": { 18 | "properties": { 19 | "commands": { 20 | "type": "string" 21 | }, 22 | "endTime": { 23 | "type": "date", 24 | "format": "dateOptionalTime" 25 | }, 26 | "hostIP": { 27 | "type": "ip" 28 | }, 29 | "hostPort": { 30 | "type": "long" 31 | }, 32 | "loggedin": { 33 | "type": "string" 34 | }, 35 | "peerIP": { 36 | "type": "ip" 37 | }, 38 | "peerPort": { 39 | "type": "long" 40 | }, 41 | "session": { 42 | "type": "string" 43 | }, 44 | "startTime": { 45 | "type": "date", 46 | "format": "dateOptionalTime" 47 | }, 48 | "ttylog": { 49 | "type": "string" 50 | }, 51 | "unknownCommands": { 52 | "type": "string" 53 | }, 54 | "version": { 55 | "type": "string" 56 | } 57 | } 58 | }, 59 | "peer_area_code": { 60 | "type": "long" 61 | }, 62 | "peer_city_name": { 63 | "type": "string" 64 | }, 65 | "peer_continent_code": { 66 | "type": "string" 67 | }, 68 | "peer_country_code2": { 69 | "type": "string" 70 | }, 71 | "peer_country_code3": { 72 | "type": "string" 73 | }, 74 | "peer_country_name": { 75 | "type": "string" 76 | }, 77 | "peer_dma_code": { 78 | "type": "long" 79 | }, 80 | "peer_geopoint": { 81 | "type": "geo_point" 82 | }, 83 | "peer_latitude": { 84 | "type": "double" 85 | }, 86 | "peer_longitude": { 87 | "type": "double" 88 | }, 89 | "peer_postal_code": { 90 | "type": "string" 91 | }, 92 | "peer_real_region_name": { 93 | "type": "string" 94 | }, 95 | "peer_region_name": { 96 | "type": "string" 97 | }, 98 | "peer_timezone": { 99 | "type": "string" 100 | }, 101 | "timestamp": { 102 | "type": "date", 103 | "format": "dateOptionalTime" 104 | } 105 | } 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /config/logstash/conf.d/02-beats-input.conf: -------------------------------------------------------------------------------- 1 | input { 2 | beats { 3 | port => 5044 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /config/logstash/conf.d/03-http-input.conf: -------------------------------------------------------------------------------- 1 | input { 2 | http { 3 | port => 8060 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /config/logstash/conf.d/10-syslog-filter.conf: -------------------------------------------------------------------------------- 1 | filter { 2 | if [type] == "syslog" { 3 | grok { 4 | match => { "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{DATA:syslog_program}(?:\[%{POSINT:syslog_pid}\])?: %{GREEDYDATA:syslog_message}" } 5 | add_field => [ "received_at", "%{@timestamp}" ] 6 | add_field => [ "received_from", "%{host}" ] 7 | } 8 | syslog_pri { } 9 | date { 10 | match => [ "syslog_timestamp", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ] 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /config/logstash/conf.d/11-nginx-filter.conf: -------------------------------------------------------------------------------- 1 | filter { 2 | if [type] == "nginx-access" { 3 | grok { 4 | patterns_dir => ["/opt/logstash/patterns"] 5 | match => { "message" => "%{NGINXACCESS}" } 6 | } 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /config/logstash/conf.d/30-elasticsearch-output.conf: -------------------------------------------------------------------------------- 1 | output { 2 | elasticsearch { 3 | hosts => ["localhost:9200"] 4 | sniffing => true 5 | manage_template => false 6 | index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" 7 | document_type => "%{[@metadata][type]}" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /config/logstash/logstash.yml: -------------------------------------------------------------------------------- 1 | pipeline.batch.size: 125 2 | pipeline.batch.delay: 5 3 | -------------------------------------------------------------------------------- /config/logstash/patterns/nginx: -------------------------------------------------------------------------------- 1 | NGUSERNAME [a-zA-Z\.\@\-\+_%]+ 2 | NGUSER %{NGUSERNAME} 3 | NGINXACCESS %{IPORHOST:clientip} %{NGUSER:ident} %{NGUSER:auth} \[%{HTTPDATE:timestamp}\] "%{WORD:verb} %{URIPATHPARAM:request} HTTP/%{NUMBER:httpversion}" %{NUMBER:response} (?:%{NUMBER:bytes}|-) (?:"(?:%{URI:referrer}|-)"|%{QS:referrer}) %{QS:agent} 4 | -------------------------------------------------------------------------------- /config/metricbeat/metricbeat.yml: -------------------------------------------------------------------------------- 1 | metricbeat.modules: 2 | - module: system 3 | metricsets: 4 | - cpu 5 | - filesystem 6 | - memory 7 | - network 8 | - process 9 | enabled: true 10 | period: 10s 11 | processes: ['.*'] 12 | cpu_ticks: false 13 | 14 | output.logstash: 15 | hosts: ["127.0.0.1:5044"] 16 | -------------------------------------------------------------------------------- /config/misc/elasticsearch.nse: -------------------------------------------------------------------------------- 1 | --[[ 2 | nmap --script ./elasticsearch.nse 192.168.0.0/24 -p 9200,80,8080 3 | 4 | PORT STATE SERVICE 5 | 9200/tcp open wap-wsp 6 | |_elasticsearch: looks like elasticsearch 7 | 8 | --]] 9 | 10 | local http = require "http" 11 | local string = require "string" 12 | 13 | portrule = function(host, port) 14 | return port.protocol == "tcp" and port.state == "open" 15 | end 16 | 17 | action = function(host, port) 18 | local uri = "/" 19 | local response = http.get(host, port, uri) 20 | if ( response.status == 200 ) then 21 | if ( string.find(response.body, "You Know, for Search") ) then 22 | return "looks like elasticsearch" 23 | end 24 | end 25 | end 26 | -------------------------------------------------------------------------------- /config/misc/test_index.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | 3 | from elasticsearch import Elasticsearch 4 | 5 | es = Elasticsearch() 6 | 7 | for i in range(10000): 8 | doc = {'author': 'kimchy', 'text': 'Elasticsearch: cool. bonsai cool.', 'timestamp': datetime.now()} 9 | res = es.index(index="test-index", doc_type='tweet', id=i, body=doc) 10 | # print(res['created']) 11 | 12 | res = es.get(index="test-index", doc_type='tweet', id=1) 13 | print(res['_source']) 14 | 15 | es.indices.refresh(index="test-index") 16 | 17 | res = es.search(index="test-index", body={"query": {"match_all": {}}}) 18 | print("Got %d Hits:" % res['hits']['total']) 19 | for hit in res['hits']['hits']: 20 | print("%(timestamp)s %(author)s: %(text)s" % hit["_source"]) 21 | -------------------------------------------------------------------------------- /config/nginx/htpasswd: -------------------------------------------------------------------------------- 1 | admin:$apr1$e6LaMI2c$LIiwzpQyFAGtZ4dZdmuwv0 2 | -------------------------------------------------------------------------------- /config/nginx/kibana.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | 4 | server_name elstack; 5 | 6 | location / { 7 | proxy_pass http://127.0.0.1:5601; 8 | proxy_http_version 1.1; 9 | proxy_set_header Upgrade $http_upgrade; 10 | proxy_set_header Connection 'upgrade'; 11 | proxy_set_header Host $host; 12 | proxy_cache_bypass $http_upgrade; 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /config/nginx/nginx.conf: -------------------------------------------------------------------------------- 1 | 2 | user nginx; 3 | worker_processes 1; 4 | 5 | error_log /var/log/nginx/error.log warn; 6 | pid /var/run/nginx.pid; 7 | 8 | 9 | events { 10 | worker_connections 1024; 11 | } 12 | 13 | 14 | http { 15 | include /etc/nginx/mime.types; 16 | default_type application/octet-stream; 17 | 18 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 19 | '$status $body_bytes_sent "$http_referer" ' 20 | '"$http_user_agent" "$http_x_forwarded_for"'; 21 | 22 | access_log /var/log/nginx/access.log main; 23 | 24 | sendfile on; 25 | #tcp_nopush on; 26 | 27 | keepalive_timeout 65; 28 | 29 | #gzip on; 30 | 31 | include /etc/nginx/conf.d/*.conf; 32 | } 33 | -------------------------------------------------------------------------------- /config/nginx/ssl.kibana.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen [::]:80 default_server; 3 | listen 80 default_server; 4 | 5 | server_name _; 6 | 7 | return 301 https://$host$request_uri; 8 | } 9 | 10 | server { 11 | listen [::]:443 ssl http2; 12 | listen 443 ssl http2; 13 | 14 | server_name _; 15 | 16 | auth_basic "Restricted Access"; 17 | auth_basic_user_file /etc/nginx/htpasswd.users; 18 | 19 | ssl on; 20 | ssl_certificate /etc/nginx/ssl/kibana.crt; 21 | ssl_certificate_key /etc/nginx/ssl/kibana.key; 22 | 23 | location / { 24 | proxy_pass http://127.0.0.1:5601; 25 | proxy_http_version 1.1; 26 | proxy_set_header Upgrade $http_upgrade; 27 | proxy_set_header Connection 'upgrade'; 28 | proxy_set_header Host $host; 29 | proxy_cache_bypass $http_upgrade; 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /config/supervisord/supervisord.conf: -------------------------------------------------------------------------------- 1 | [supervisord] 2 | nodaemon=true 3 | 4 | [program:logstash] 5 | command = /logstash-entrypoint.sh logstash -f /etc/logstash/conf.d/ 6 | autostart=true 7 | autorestart=true 8 | stdout_logfile=/var/log/logstash.stdout.log 9 | stderr_logfile=/var/log/logstash.stderr.log 10 | # priority=2 11 | 12 | [program:elasticsearch] 13 | command = /elastic-entrypoint.sh elasticsearch 14 | autostart=true 15 | autorestart=true 16 | stdout_logfile=/var/log/elasticsearch.stdout.log 17 | stderr_logfile=/var/log/elasticsearch.stderr.log 18 | # priority=1 19 | 20 | [program:kibana] 21 | command = /kibana-entrypoint.sh kibana 22 | startsecs=5 23 | autostart=true 24 | autorestart=true 25 | stdout_logfile=/var/log/kibana.stdout.log 26 | stderr_logfile=/var/log/kibana.stderr.log 27 | 28 | [program:nginx] 29 | command = /nginx-entrypoint.sh 30 | autostart=true 31 | autorestart=true 32 | stdout_logfile=/var/log/nginx.stdout.log 33 | stderr_logfile=/var/log/nginx.stderr.log 34 | # priority=4 35 | -------------------------------------------------------------------------------- /entrypoints/elastic-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | es_opts='' 6 | 7 | while IFS='=' read -r envvar_key envvar_value 8 | do 9 | # Elasticsearch env vars need to have at least two dot separated lowercase words, e.g. `cluster.name` 10 | if [[ "$envvar_key" =~ ^[a-z]+\.[a-z]+ ]] 11 | then 12 | if [[ ! -z $envvar_value ]]; then 13 | es_opt="-E${envvar_key}=${envvar_value}" 14 | es_opts+=" ${es_opt}" 15 | fi 16 | fi 17 | done < <(env) 18 | 19 | export ES_JAVA_OPTS="-Des.cgroups.hierarchy.override=/ $ES_JAVA_OPTS" 20 | 21 | # Add elasticsearch as command if needed 22 | if [ "${1:0:1}" = '-' ]; then 23 | set -- elasticsearch "$@" ${es_opts} 24 | fi 25 | 26 | # Drop root privileges if we are running elasticsearch 27 | # allow the container to be started with `--user` 28 | if [ "$1" = 'elasticsearch' -a "$(id -u)" = '0' ]; then 29 | 30 | chown -R elstack:elstack /usr/share/elasticsearch/data 31 | chown -R elstack:elstack /usr/share/elasticsearch/logs 32 | 33 | set -- su-exec elstack /sbin/tini -s -- "$@" ${es_opts} 34 | #exec su-exec elstack "$BASH_SOURCE" "$@" 35 | fi 36 | 37 | if [ "$1" = 'master' -a "$(id -u)" = '0' ]; then 38 | # Change node into a master node 39 | echo "node.master: true" >> /usr/share/elasticsearch/config/elasticsearch.yml 40 | echo "node.ingest: false" >> /usr/share/elasticsearch/config/elasticsearch.yml 41 | echo "node.data: false" >> /usr/share/elasticsearch/config/elasticsearch.yml 42 | 43 | chown -R elstack:elstack /usr/share/elasticsearch/data 44 | chown -R elstack:elstack /usr/share/elasticsearch/logs 45 | 46 | set -- su-exec elstack /sbin/tini -- elasticsearch ${es_opts} 47 | #exec su-exec elstack "$BASH_SOURCE" "$@" 48 | fi 49 | 50 | if [ "$1" = 'ingest' -a "$(id -u)" = '0' ]; then 51 | # Change node into a client node 52 | echo "node.master: false" >> /usr/share/elasticsearch/config/elasticsearch.yml 53 | echo "node.ingest: true" >> /usr/share/elasticsearch/config/elasticsearch.yml 54 | echo "node.data: false" >> /usr/share/elasticsearch/config/elasticsearch.yml 55 | echo "discovery.zen.ping.unicast.hosts: [\"elastic-master\"]" >> /usr/share/elasticsearch/config/elasticsearch.yml 56 | 57 | chown -R elstack:elstack /usr/share/elasticsearch/data 58 | chown -R elstack:elstack /usr/share/elasticsearch/logs 59 | 60 | set -- su-exec elstack /sbin/tini -- elasticsearch ${es_opts} 61 | #exec su-exec elstack "$BASH_SOURCE" "$@" 62 | fi 63 | 64 | if [ "$1" = 'data' -a "$(id -u)" = '0' ]; then 65 | # Change node into a data node 66 | echo "node.master: false" >> /usr/share/elasticsearch/config/elasticsearch.yml 67 | echo "node.ingest: false" >> /usr/share/elasticsearch/config/elasticsearch.yml 68 | echo "node.data: true" >> /usr/share/elasticsearch/config/elasticsearch.yml 69 | echo "discovery.zen.ping.unicast.hosts: [\"elastic-master\"]" >> /usr/share/elasticsearch/config/elasticsearch.yml 70 | 71 | chown -R elstack:elstack /usr/share/elasticsearch/data 72 | chown -R elstack:elstack /usr/share/elasticsearch/logs 73 | 74 | set -- su-exec elstack /sbin/tini -- elasticsearch ${es_opts} 75 | #exec su-exec elstack "$BASH_SOURCE" "$@" 76 | fi 77 | 78 | # As argument is not related to elasticsearch, 79 | # then assume that user wants to run his own process, 80 | # for example a `bash` shell to explore this image 81 | exec "$@" 82 | -------------------------------------------------------------------------------- /entrypoints/filebeat-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Add filebeat as command if needed 6 | if [ "${1:0:1}" = '-' ]; then 7 | # wait for elasticsearch 8 | echo "Importing Dashboards..." 9 | sleep 10;/usr/share/filebeat/import_dashboards 10 | 11 | set -- filebeat "$@" 12 | fi 13 | 14 | exec "$@" 15 | -------------------------------------------------------------------------------- /entrypoints/kibana-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Add kibana as command if needed 5 | if [[ "$1" == -* ]]; then 6 | set -- kibana "$@" 7 | fi 8 | 9 | # Run as user "elstack" if the command is "kibana" 10 | if [ "$1" = 'kibana' ]; then 11 | if [ "$ELASTICSEARCH_URL" ]; then 12 | sed -ri "s!^(\#\s*)?(elasticsearch\.url:).*!\2 '$ELASTICSEARCH_URL'!" /etc/kibana/kibana.yml 13 | fi 14 | 15 | set -- su-exec elstack tini -- "$@" 16 | fi 17 | 18 | exec "$@" 19 | -------------------------------------------------------------------------------- /entrypoints/logstash-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | # Add logstash as command if needed 6 | if [ "${1:0:1}" = '-' ]; then 7 | set -- logstash "$@" 8 | fi 9 | 10 | # Run as user "elstack" if the command is "logstash" 11 | if [ "$1" = 'logstash' ]; then 12 | chown -R elstack:elstack /usr/share/logstash 13 | chown -R elstack:elstack /etc/logstash/conf.d/ 14 | chown -R elstack:elstack /opt/logstash/patterns 15 | 16 | set -- su-exec elstack tini -- "$@" 17 | fi 18 | 19 | exec "$@" 20 | -------------------------------------------------------------------------------- /entrypoints/metricbeat-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Add metricbeat as command if needed 6 | if [ "${1:0:1}" = '-' ]; then 7 | 8 | set -- metricbeat "$@" 9 | fi 10 | 11 | exec "$@" 12 | -------------------------------------------------------------------------------- /entrypoints/nginx-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | : ${ELSK_USER:="admin"} 4 | : ${ELSK_PASS:="admin"} 5 | : ${ELSK_DOMAIN:="localhost"} 6 | 7 | if [ -z "$SSL" ]; then 8 | echo ">> using non-ssl nginx conf" 9 | rm /etc/nginx/conf.d/ssl.kibana.conf 10 | exec nginx -g 'daemon off;' 11 | else 12 | echo ">> generating basic auth" 13 | htpasswd -b -c /etc/nginx/htpasswd.users "$ELSK_USER" "$ELSK_PASS" 14 | 15 | if [ ! -e "/etc/nginx/ssl/*.key" ]; then 16 | echo ">> generating self signed cert" 17 | openssl req -x509 -newkey rsa:4086 \ 18 | -subj "/C=XX/ST=XXXX/L=XXXX/O=XXXX/CN=$ELSK_DOMAIN" \ 19 | -keyout "/etc/nginx/ssl/kibana.key" \ 20 | -out "/etc/nginx/ssl/kibana.crt" \ 21 | -days 3650 -nodes -sha256 22 | fi 23 | 24 | echo ">> using ssl nginx conf" 25 | rm /etc/nginx/conf.d/kibana.conf 26 | exec nginx -g 'daemon off;' 27 | fi 28 | -------------------------------------------------------------------------------- /hooks/post_push: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | VERSION=$(cat Dockerfile | grep '^ENV STACK' | cut -d" " -f3) 6 | TAGS=($VERSION 6) 7 | 8 | for TAG in "${TAGS[@]}"; do 9 | echo "===> Tagging $IMAGE_NAME as $DOCKER_REPO:$TAG" 10 | docker tag $IMAGE_NAME $DOCKER_REPO:$TAG 11 | echo "===> Pushing $DOCKER_REPO:$TAG" 12 | docker push $DOCKER_REPO:$TAG 13 | done 14 | -------------------------------------------------------------------------------- /images/configure-setting.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Nilsty/elstack-qa-monitoring/c443523937ee7031803b5c829c790341d2605f49/images/configure-setting.png -------------------------------------------------------------------------------- /images/define-index-pattern.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Nilsty/elstack-qa-monitoring/c443523937ee7031803b5c829c790341d2605f49/images/define-index-pattern.png -------------------------------------------------------------------------------- /images/selected-fields.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Nilsty/elstack-qa-monitoring/c443523937ee7031803b5c829c790341d2605f49/images/selected-fields.png -------------------------------------------------------------------------------- /robot-tests/Resources/Keywords.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Library RequestsLibrary 3 | Resource Variables.robot 4 | 5 | 6 | *** Keywords *** 7 | Create API session 8 | Create Session logstash ${URL} 9 | 10 | Post example test results Staging passed 11 | &{headers}= Create Dictionary Content-type=application/json 12 | &{data}= Create Dictionary environment=Staging test_title=Example 1 result=PASS report=All test steps have passed. 13 | ${resp}= Post Request logstash / json=${data} headers=${headers} 14 | Should Be Equal As Strings ${resp.status_code} 200 15 | Log ${resp.content} 16 | 17 | Post example test results Production failed 18 | &{headers}= Create Dictionary Content-type=application/json 19 | &{data}= Create Dictionary environment=Production test_title=Example 2 result=FAIL report=Test step 1 failed with an error. 20 | ${resp}= Post Request logstash / json=${data} headers=${headers} 21 | Should Be Equal As Strings ${resp.status_code} 200 22 | Log ${resp.content} 23 | 24 | Post example test results Production passed 25 | &{headers}= Create Dictionary Content-type=application/json 26 | &{data}= Create Dictionary environment=Production test_title=Example 3 result=PASS report=All test steps have passed. 27 | ${resp}= Post Request logstash / json=${data} headers=${headers} 28 | Should Be Equal As Strings ${resp.status_code} 200 29 | Log ${resp.content} 30 | -------------------------------------------------------------------------------- /robot-tests/Resources/Variables.robot: -------------------------------------------------------------------------------- 1 | *** Variables *** 2 | ${URL} http://localhost:8060 3 | -------------------------------------------------------------------------------- /robot-tests/example_test_results.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Documentation Creation of example test reports to populate via 3 | ... logstash in Kibana. 4 | 5 | Resource Resources/Keywords.robot 6 | 7 | Suite Setup Create API session 8 | 9 | *** Test Cases *** 10 | Example 1 11 | Post example test results Staging passed 12 | 13 | Example 2 14 | Post example test results Production failed 15 | 16 | Example 3 17 | Post example test results Production passed 18 | --------------------------------------------------------------------------------