├── .gitignore ├── README.md ├── cron ├── custom-cron.example ├── elastic-cron └── logs │ └── readme.txt ├── curator └── example │ ├── allocation_logstash.yaml │ ├── allocation_winlogbeat.yaml │ ├── curator.yaml.yaml │ ├── delete_indices.yaml │ ├── delete_indices_bro.yaml │ ├── delete_indices_ids.yaml │ ├── delete_indices_winlogbeat.yaml │ └── forcemerge.yaml ├── docker-compose.yml.example ├── docker-swarm.yml ├── elastalert ├── config.yaml.example └── example_rules │ ├── dns_baby_domains.yaml │ ├── dns_fuzzy_domain_match.yaml │ ├── dns_newly_observerd.yaml │ ├── flow_high_number_of_connections.yaml │ ├── host_not_sending_logs.yaml │ ├── password_fail_high_count.yaml │ ├── windows_brute_force_logins.yaml │ ├── windows_log_cleared.yaml │ ├── windows_new_service_creation.yaml │ └── windows_password_spraying.yaml ├── elasticsearch ├── es_data │ └── readme.txt ├── footer.template.json ├── header.template.json ├── indexes │ ├── elastalert_status.json │ ├── import.sh │ ├── logstash-bro.json │ ├── logstash-ids.json │ ├── logstash-ossec.json │ ├── logstash-so.json │ ├── logstash.json │ └── winlogbeat-6.json ├── logstash-suricata.template.json ├── logstash-zeek.template.json └── snapshots │ └── readme.txt ├── generate_template.sh ├── logstash ├── logstash_configs │ ├── bro-tsv │ │ ├── 0006_input_beats.conf │ │ ├── 1006_preprocess_beats.conf │ │ ├── 1100_preprocess_bro_conn.conf │ │ ├── 1101_preprocess_bro_dhcp.conf │ │ ├── 1102_preprocess_bro_dns.conf │ │ ├── 1103_preprocess_bro_dpd.conf │ │ ├── 1104_preprocess_bro_files.conf │ │ ├── 1105_preprocess_bro_ftp.conf │ │ ├── 1106_preprocess_bro_http.conf │ │ ├── 1107_preprocess_bro_irc.conf │ │ ├── 1108_preprocess_bro_kereberos.conf │ │ ├── 1109_preprocess_bro_notice.conf │ │ ├── 1110_preprocess_bro_rdp.conf │ │ ├── 1111_preprocess_bro_signatures.conf │ │ ├── 1112_preprocess_bro_smtp.conf │ │ ├── 1113_preprocess_bro_snmp.conf │ │ ├── 1114_preprocess_bro_software.conf │ │ ├── 1115_preprocess_bro_ssh.conf │ │ ├── 1116_preprocess_bro_ssl.conf │ │ ├── 1117_preprocess_bro_syslog.conf │ │ ├── 1118_preprocess_bro_tunnel.conf │ │ ├── 1119_preprocess_bro_weird.conf │ │ ├── 1121_preprocess_bro_mysql.conf │ │ ├── 1122_preprocess_bro_socks.conf │ │ ├── 1123_preprocess_bro_x509.conf │ │ ├── 1124_preprocess_bro_intel.conf │ │ ├── 6000_bro.conf │ │ ├── 6400_suricata.conf │ │ ├── 8001_postprocess_common_ip_augmentation.conf │ │ ├── 8006_postprocess_dns.conf │ │ ├── 8007_postprocess_http.conf │ │ ├── 8009_postprocess_dns_top1m_tagging.conf │ │ ├── 8010_postprocess_dns_creation_date.conf │ │ ├── 8200_postprocess_tagging.conf │ │ └── 9000_output.conf │ ├── firewall │ │ ├── 0001_input_fortinet.conf │ │ ├── 6200_firewall_fortinet.conf │ │ ├── 8000_postprocess_tagging.conf │ │ ├── 8001_postprocess_common_ip_augmentation.conf │ │ └── 9000_output_fortinet.conf │ ├── monitoring │ │ ├── 0000_input.conf │ │ └── 9000_output.conf │ ├── security_onion-json │ │ ├── 0001_input_json.conf │ │ ├── 0002_input_beats.conf │ │ ├── 1000_syslogng_preprocess.conf │ │ ├── 1001_bro_preprocess.conf │ │ ├── 1002_suricata_preprocess.conf │ │ ├── 1003_ossec_preprocess.conf │ │ ├── 6000_bro.conf │ │ ├── 6400_suricata.conf │ │ ├── 8000_postprocess_tagging.conf │ │ ├── 8001_postprocess_common_ip_augmentation.conf │ │ ├── 8006_postprocess_dns.conf │ │ ├── 8007_postprocess_http.conf │ │ ├── 8009_postprocess_dns_top1m_tagging.conf │ │ ├── 8010_postprocess_dns_creation_date.conf │ │ ├── 8502_postprocess_freq_analysis_bro_dns.conf │ │ ├── 8503_postprocess_freq_analysis_bro_http.conf │ │ ├── 8504_postprocess_freq_analysis_bro_ssl.conf │ │ ├── 8505_postprocess_freq_analysis_bro_x509.conf │ │ └── 9000_output.conf │ └── windows │ │ ├── 0006_input_windows_beats.conf │ │ ├── 6300_windows.conf │ │ ├── 6302_windows_translations.conf │ │ ├── 8200_postprocess_tagging.conf │ │ ├── 8999_postprocess_windows_filter.conf │ │ └── 9000_output.conf ├── monitor_pipeline.yml ├── persistent_data │ └── readme.txt ├── pipelines.yml.example └── rules │ ├── app-layer-events.rules │ ├── black_list.rules │ ├── decoder-events.rules │ ├── dnp3-events.rules │ ├── dns-events.rules │ ├── downloaded.rules │ ├── files.rules │ ├── http-events.rules │ ├── local.rules │ ├── modbus-events.rules │ ├── nfs-events.rules │ ├── ntp-events.rules │ ├── smtp-events.rules │ ├── so_rules.rules │ ├── stream-events.rules │ ├── tls-events.rules │ └── white_list.rules ├── scripts ├── initialize.sh └── prereq.sh └── winlogbeat_configs └── winlogbeat_recommended_default.yml /.gitignore: -------------------------------------------------------------------------------- 1 | cron/logs/*.log 2 | cron/custom-* 3 | curator/*.yaml 4 | elastalert/config.yaml 5 | elastalert/rules/* 6 | elasticsearch/snapshots/* 7 | elasticsearch/es_data/* 8 | logstash/persistent_data/* 9 | logstash/pipelines.yml 10 | logstash/pipelines*.yml 11 | logstash/logstash_configs/**/*custom*.conf 12 | logstash/logstash_configs/*custom*/*custom* 13 | docker-compose.yml -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # elastic_stack 2 | 3 | Deploying the Elastic Stack can be difficult. This project hopes to simplify that. 4 | 5 | ### Initial Goal 6 | 7 | Make it simple to deploy a full fledged Elastic Stack with advanced capabilities on a single physical box using Docker. 8 | 9 | ### Long Term Goal 10 | 11 | Contain scripts for easy deployment to production systems 12 | 13 | ## Prerequisites 14 | Must have Docker installed. An example of how to do this on an Ubuntu 16.04 system is as below: 15 | 16 | ```bash 17 | sudo apt-get install -y wget 18 | wget https://github.com/HASecuritySolutions/elastic_stack/raw/master/scripts/prereq.sh 19 | sudo bash prereq.sh 20 | sudo bash /opt/elastic_stack/scripts/initialize.sh 21 | docker-compose up -d 22 | # Wait until Elasticsearch is running then run this: 23 | bash elasticsearch/indexes/import.sh 24 | ``` 25 | -------------------------------------------------------------------------------- /cron/custom-cron.example: -------------------------------------------------------------------------------- 1 | #* * * * * user-job-runs-as command-being-executed 2 | #| | | | | 3 | #| | | | | 4 | #| | | | +---- Day of the Week (range: 1-7, 1 standing for Monday) 5 | #| | | +------ Month of the Year (range: 1-12) 6 | #| | +-------- Day of the Month (range: 1-31) 7 | #| +---------- Hour (range: 0-23) 8 | #+------------ Minute (range: 0-59) 9 | 0 0 * * * elastic-cron /usr/local/bin/curator --config /etc/curator/curator.yaml /etc/curator/delete_indices.yaml >> /home/elastic-cron/logs/delete_indices.log 2>&1 10 | 5 0 * * * elastic-cron /usr/local/bin/curator --config /etc/curator/curator.yaml /etc/curator/delete_indices_bro.yaml >> /home/elastic-cron/logs/delete_indices_bro.log 2>&1 11 | 10 0 * * * elastic-cron /usr/local/bin/curator --config /etc/curator/curator.yaml /etc/curator/delete_indices_ids.yaml >> /home/elastic-cron/logs/delete_indices_ids.log 2>&1 12 | 15 0 * * * elastic-cron /usr/local/bin/curator --config /etc/curator/curator.yaml /etc/curator/delete_indices_winlogbeat.yaml >> /home/elastic-cron/logs/delete_indices_winlogbeat.log 2>&1 13 | 0 1 * * * elastic-cron /usr/local/bin/curator --config /etc/curator/curator.yaml /etc/curator/allocation_logstash.yaml >> /home/elastic-cron/logs/allocation_logstash.log 2>&1 14 | 30 1 * * * elastic-cron /usr/local/bin/curator --config /etc/curator/curator.yaml /etc/curator/allocation_winlogbeat.yaml >> /home/elastic-cron/logs/allocation_winlogbeat.log 2>&1 15 | -------------------------------------------------------------------------------- /cron/elastic-cron: -------------------------------------------------------------------------------- 1 | #* * * * * user-job-runs-as command-being-executed 2 | #| | | | | 3 | #| | | | | 4 | #| | | | +---- Day of the Week (range: 1-7, 1 standing for Monday) 5 | #| | | +------ Month of the Year (range: 1-12) 6 | #| | +-------- Day of the Month (range: 1-31) 7 | #| +---------- Hour (range: 0-23) 8 | #+------------ Minute (range: 0-59) 9 | 0 2 * * * elastic-cron /usr/local/bin/curator --config /etc/curator/curator.yaml /etc/curator/forcemerge.yaml >> /home/elastic-cron/logs/forcemerge.log 2>&1 10 | -------------------------------------------------------------------------------- /cron/logs/readme.txt: -------------------------------------------------------------------------------- 1 | empty file -------------------------------------------------------------------------------- /curator/example/allocation_logstash.yaml: -------------------------------------------------------------------------------- 1 | actions: 2 | 1: 3 | action: allocation 4 | description: "Apply shard allocation filtering rules to the specified indices" 5 | options: 6 | key: box_type 7 | value: warm 8 | allocation_type: require 9 | wait_for_completion: true 10 | timeout_override: 11 | continue_if_exception: false 12 | disable_action: false 13 | filters: 14 | - filtertype: pattern 15 | kind: prefix 16 | value: logstash- 17 | - filtertype: age 18 | source: name 19 | direction: older 20 | timestring: '%Y.%m.%d' 21 | unit: days 22 | unit_count: 7 23 | -------------------------------------------------------------------------------- /curator/example/allocation_winlogbeat.yaml: -------------------------------------------------------------------------------- 1 | actions: 2 | 1: 3 | action: allocation 4 | description: "Apply shard allocation filtering rules to the specified indices" 5 | options: 6 | key: box_type 7 | value: warm 8 | allocation_type: require 9 | wait_for_completion: true 10 | timeout_override: 11 | continue_if_exception: false 12 | disable_action: false 13 | filters: 14 | - filtertype: pattern 15 | kind: prefix 16 | value: winlogbeat- 17 | - filtertype: age 18 | source: name 19 | direction: older 20 | timestring: '%Y.%m.%d' 21 | unit: days 22 | unit_count: 7 23 | -------------------------------------------------------------------------------- /curator/example/curator.yaml.yaml: -------------------------------------------------------------------------------- 1 | client: 2 | hosts: 3 | - elasticsearch 4 | port: 9200 5 | url_prefix: 6 | use_ssl: False 7 | certificate: 8 | client_cert: 9 | client_key: 10 | ssl_no_validate: False 11 | http_auth: 12 | timeout: 30 13 | master_only: False 14 | 15 | logging: 16 | loglevel: INFO 17 | logfile: 18 | logformat: default 19 | blacklist: ['elasticsearch', 'urllib3'] -------------------------------------------------------------------------------- /curator/example/delete_indices.yaml: -------------------------------------------------------------------------------- 1 | actions: 2 | 1: 3 | action: delete_indices 4 | description: >- 5 | Delete indices older than 60 days (based on index name), for logstash- 6 | prefixed indices. Ignore the error if the filter does not result in an 7 | actionable list of indices (ignore_empty_list) and exit cleanly. 8 | options: 9 | ignore_empty_list: True 10 | disable_action: False 11 | filters: 12 | - filtertype: pattern 13 | kind: prefix 14 | value: logstash- 15 | - filtertype: age 16 | source: name 17 | direction: older 18 | timestring: '%Y.%m.%d' 19 | unit: days 20 | unit_count: 60 21 | -------------------------------------------------------------------------------- /curator/example/delete_indices_bro.yaml: -------------------------------------------------------------------------------- 1 | actions: 2 | 1: 3 | action: delete_indices 4 | description: >- 5 | Delete indices older than 30 days (based on index name), for logstash-f5 6 | prefixed indices. Ignore the error if the filter does not result in an 7 | actionable list of indices (ignore_empty_list) and exit cleanly. 8 | options: 9 | ignore_empty_list: True 10 | disable_action: False 11 | filters: 12 | - filtertype: pattern 13 | kind: prefix 14 | value: logstash-bro- 15 | - filtertype: age 16 | source: name 17 | direction: older 18 | timestring: '%Y.%m.%d' 19 | unit: days 20 | unit_count: 30 21 | -------------------------------------------------------------------------------- /curator/example/delete_indices_ids.yaml: -------------------------------------------------------------------------------- 1 | actions: 2 | 1: 3 | action: delete_indices 4 | description: >- 5 | Delete indices older than 30 days (based on index name), for logstash-f5 6 | prefixed indices. Ignore the error if the filter does not result in an 7 | actionable list of indices (ignore_empty_list) and exit cleanly. 8 | options: 9 | ignore_empty_list: True 10 | disable_action: False 11 | filters: 12 | - filtertype: pattern 13 | kind: prefix 14 | value: logstash-ids- 15 | - filtertype: age 16 | source: name 17 | direction: older 18 | timestring: '%Y.%m.%d' 19 | unit: days 20 | unit_count: 30 21 | -------------------------------------------------------------------------------- /curator/example/delete_indices_winlogbeat.yaml: -------------------------------------------------------------------------------- 1 | actions: 2 | 1: 3 | action: delete_indices 4 | description: >- 5 | Delete indices older than 30 days (based on index name), for winlogbeat- 6 | prefixed indices. Ignore the error if the filter does not result in an 7 | actionable list of indices (ignore_empty_list) and exit cleanly. 8 | options: 9 | ignore_empty_list: True 10 | disable_action: False 11 | filters: 12 | - filtertype: pattern 13 | kind: prefix 14 | value: winlogbeat- 15 | - filtertype: age 16 | source: name 17 | direction: older 18 | timestring: '%Y.%m.%d' 19 | unit: days 20 | unit_count: 30 21 | -------------------------------------------------------------------------------- /curator/example/forcemerge.yaml: -------------------------------------------------------------------------------- 1 | actions: 2 | 1: 3 | action: forcemerge 4 | description: "Perform a forceMerge on selected indices to 'max_num_segments' per shard" 5 | options: 6 | max_num_segments: 1 7 | delay: 8 | timeout_override: 21600 9 | continue_if_exception: false 10 | disable_action: false 11 | filters: 12 | - filtertype: pattern 13 | kind: prefix 14 | value: logstash- 15 | - filtertype: age 16 | source: name 17 | direction: older 18 | timestring: '%Y.%m.%d' 19 | unit: days 20 | unit_count: 3 21 | -------------------------------------------------------------------------------- /docker-compose.yml.example: -------------------------------------------------------------------------------- 1 | version: '2.2' 2 | services: 3 | elasticsearch: 4 | image: docker.elastic.co/elasticsearch/elasticsearch:6.5.2 5 | container_name: elasticsearch 6 | restart: always 7 | environment: 8 | - cluster.name=labmeinc 9 | - node.name=elasticsearch 10 | - bootstrap.memory_lock=true 11 | - discovery.type=single-node 12 | - "ES_JAVA_OPTS=-Xms2g -Xmx2g" 13 | - path.repo=/snapshots 14 | ulimits: 15 | memlock: 16 | soft: -1 17 | hard: -1 18 | mem_limit: 4g 19 | volumes: 20 | - ./elasticsearch/es_data:/usr/share/elasticsearch/data 21 | - ./elasticsearch/snapshots:/snapshots 22 | ports: 23 | - 9200:9200 24 | networks: 25 | - esnet 26 | logging: 27 | driver: "json-file" 28 | options: 29 | max-size: "200k" 30 | max-file: "10" 31 | kibana: 32 | image: docker.elastic.co/kibana/kibana:6.5.2 33 | container_name: kibana 34 | restart: always 35 | depends_on: 36 | - elasticsearch 37 | environment: 38 | - SERVER_NAME=kibana 39 | - ELASTICSEARCH_URL=http://elasticsearch:9200 40 | ports: 41 | - 5601:5601 42 | networks: 43 | - esnet 44 | logging: 45 | driver: "json-file" 46 | options: 47 | max-size: "200k" 48 | max-file: "10" 49 | logstash: 50 | image: hasecuritysolutions/logstash:v6.5.2 51 | container_name: logstash 52 | restart: always 53 | environment: 54 | - ELASTICSEARCH_HOST=elasticsearch 55 | - pipeline.batch.size=125 56 | - config.reload.automatic=true 57 | - config.reload.interval=30s 58 | - queue.type=persisted 59 | - "LS_JAVA_OPTS:-Xmx2g -Xms1g" 60 | mem_limit: 4g 61 | ports: 62 | - 5044:5044 63 | - 5045:5045 64 | - 6050:6050 65 | volumes: 66 | - ./logstash/pipelines.yml:/usr/share/logstash/config/pipelines.yml:ro 67 | - ./logstash/logstash_configs:/opt/logstash_configs:ro 68 | - ./logstash/translate:/opt/elastic_stack/logstash/translate:ro 69 | - ./logstash/rules:/etc/nsm/rules:ro 70 | - ./logstash/persistent_data:/usr/share/logstash/data 71 | networks: 72 | - esnet 73 | logging: 74 | driver: "json-file" 75 | options: 76 | max-size: "200k" 77 | max-file: "10" 78 | freq_server: 79 | image: hasecuritysolutions/freq_server:v1.0 80 | container_name: freq_server 81 | restart: always 82 | ports: 83 | - 10004:10004 84 | networks: 85 | - esnet 86 | logging: 87 | driver: "json-file" 88 | options: 89 | max-size: "200k" 90 | max-file: "10" 91 | domain_stats: 92 | image: hasecuritysolutions/domain_stats:v1.0 93 | container_name: domain_stats 94 | restart: always 95 | ports: 96 | - 20000:20000 97 | networks: 98 | - esnet 99 | logging: 100 | driver: "json-file" 101 | options: 102 | max-size: "200k" 103 | max-file: "10" 104 | cerebro: 105 | image: hasecuritysolutions/cerebro:v1.0 106 | container_name: cerebro 107 | depends_on: 108 | - elasticsearch 109 | ports: 110 | - 9000:9000 111 | networks: 112 | - esnet 113 | logging: 114 | driver: "json-file" 115 | options: 116 | max-size: "200k" 117 | max-file: "10" 118 | elastalert: 119 | image: hasecuritysolutions/elastalert:v1.0 120 | container_name: elastalert 121 | restart: always 122 | depends_on: 123 | - elasticsearch 124 | volumes: 125 | - ./elastalert:/etc/elastalert:ro 126 | networks: 127 | - esnet 128 | logging: 129 | driver: "json-file" 130 | options: 131 | max-size: "200k" 132 | max-file: "10" 133 | 134 | networks: 135 | esnet: 136 | driver: bridge 137 | -------------------------------------------------------------------------------- /docker-swarm.yml: -------------------------------------------------------------------------------- 1 | version: '3.7' 2 | services: 3 | master: 4 | image: hasecuritysolutions/elasticsearch-readonlyrest:6.7.0-gcs 5 | env_file: env/es_master.env 6 | volumes: 7 | - /cloud/cloud_configs/client/HA/elasticsearch/readonlyrest.yml:/usr/share/elasticsearch/config/readonlyrest.yml 8 | networks: 9 | - customer 10 | deploy: 11 | replicas: 3 12 | update_config: 13 | parallelism: 1 14 | delay: 10s 15 | order: stop-first 16 | restart_policy: 17 | condition: on-failure 18 | delay: 5s 19 | max_attempts: 3 20 | window: 120s 21 | placement: 22 | constraints: [node.role == manager] 23 | preferences: 24 | - spread: node.role.manager 25 | ssd1: 26 | image: hasecuritysolutions/elasticsearch-readonlyrest:6.7.0-gcs 27 | env_file: env/es_hot.env 28 | volumes: 29 | - /cloud/cloud_configs/client/HA/elasticsearch/readonlyrest.yml:/usr/share/elasticsearch/config/readonlyrest.yml 30 | - /ssd/HA_ssd:/usr/share/elasticsearch/data 31 | networks: 32 | - customer 33 | deploy: 34 | replicas: 1 35 | restart_policy: 36 | condition: on-failure 37 | delay: 5s 38 | max_attempts: 3 39 | window: 120s 40 | placement: 41 | constraints: [node.hostname == cloud01a] 42 | ssd2: 43 | image: hasecuritysolutions/elasticsearch-readonlyrest:6.7.0-gcs 44 | env_file: env/es_hot.env 45 | volumes: 46 | - /cloud/cloud_configs/client/HA/elasticsearch/readonlyrest.yml:/usr/share/elasticsearch/config/readonlyrest.yml 47 | - /ssd/HA_ssd:/usr/share/elasticsearch/data 48 | networks: 49 | - customer 50 | deploy: 51 | replicas: 1 52 | restart_policy: 53 | condition: on-failure 54 | delay: 5s 55 | max_attempts: 3 56 | window: 120s 57 | placement: 58 | constraints: [node.hostname == cloud01b] 59 | sata1: 60 | image: hasecuritysolutions/elasticsearch-readonlyrest:6.7.0-gcs 61 | env_file: env/es_warm.env 62 | volumes: 63 | - /cloud/cloud_configs/client/HA/elasticsearch/readonlyrest.yml:/usr/share/elasticsearch/config/readonlyrest.yml 64 | - /sata/HA_sata:/usr/share/elasticsearch/data 65 | networks: 66 | - customer 67 | deploy: 68 | replicas: 1 69 | restart_policy: 70 | condition: on-failure 71 | delay: 5s 72 | max_attempts: 3 73 | window: 120s 74 | placement: 75 | constraints: [node.hostname == cloud01a] 76 | sata2: 77 | image: hasecuritysolutions/elasticsearch-readonlyrest:6.7.0-gcs 78 | env_file: env/es_warm.env 79 | volumes: 80 | - /cloud/cloud_configs/client/HA/elasticsearch/readonlyrest.yml:/usr/share/elasticsearch/config/readonlyrest.yml 81 | - /sata/HA_sata:/usr/share/elasticsearch/data 82 | networks: 83 | - customer 84 | deploy: 85 | replicas: 1 86 | restart_policy: 87 | condition: on-failure 88 | delay: 5s 89 | max_attempts: 3 90 | window: 120s 91 | placement: 92 | constraints: [node.hostname == cloud01b] 93 | kibana: 94 | image: hasecuritysolutions/kibana-readonlyrest:6.7.0 95 | ports: 96 | - 5601:5601 97 | volumes: 98 | - /cloud/cloud_configs/client/HA/kibana:/opt/kibana/config:ro 99 | networks: 100 | - customer 101 | deploy: 102 | replicas: 2 103 | restart_policy: 104 | condition: on-failure 105 | delay: 5s 106 | max_attempts: 3 107 | window: 120s 108 | placement: 109 | constraints: [node.role == manager] 110 | preferences: 111 | - spread: node.role.manager 112 | cerebro: 113 | image: hasecuritysolutions/cerebro:0.8.3 114 | volumes: 115 | - /cloud/cloud_configs/client/HA/cerebro/application.conf:/opt/cerebro/conf/application.conf:ro 116 | ports: 117 | - 9000:9000 118 | networks: 119 | - customer 120 | deploy: 121 | replicas: 2 122 | restart_policy: 123 | condition: on-failure 124 | delay: 5s 125 | max_attempts: 3 126 | window: 120s 127 | placement: 128 | constraints: [node.role == manager] 129 | preferences: 130 | - spread: node.role.manager 131 | freq_server: 132 | image: hasecuritysolutions/freq_server:2.0 133 | networks: 134 | - customer 135 | deploy: 136 | replicas: 2 137 | restart_policy: 138 | condition: on-failure 139 | delay: 5s 140 | max_attempts: 3 141 | window: 120s 142 | placement: 143 | constraints: [node.role == worker] 144 | preferences: 145 | - spread: node.role.worker 146 | domain_stats: 147 | image: hasecuritysolutions/domain_stats:2.0 148 | ports: 149 | - 20000:20000 150 | networks: 151 | - customer 152 | deploy: 153 | replicas: 2 154 | restart_policy: 155 | condition: on-failure 156 | delay: 5s 157 | max_attempts: 3 158 | window: 120s 159 | placement: 160 | constraints: [node.role == worker] 161 | preferences: 162 | - spread: node.role.worker 163 | 164 | networks: 165 | customer: 166 | driver: overlay 167 | -------------------------------------------------------------------------------- /elastalert/config.yaml.example: -------------------------------------------------------------------------------- 1 | # This is the folder that contains the rule yaml files 2 | # Any .yaml file will be loaded as a rule 3 | rules_folder: /etc/elastalert/rules 4 | 5 | # How often ElastAlert will query Elasticsearch 6 | # The unit can be anything from weeks to seconds 7 | run_every: 8 | minutes: 1 9 | 10 | # ElastAlert will buffer results from the most recent 11 | # period of time, in case some log sources are not in real time 12 | buffer_time: 13 | minutes: 15 14 | 15 | # The Elasticsearch hostname for metadata writeback 16 | # Note that every rule can have its own Elasticsearch host 17 | es_host: elasticsearch 18 | 19 | # The Elasticsearch port 20 | es_port: 9200 21 | 22 | # The AWS region to use. Set this when using AWS-managed elasticsearch 23 | #aws_region: us-east-1 24 | 25 | # The AWS profile to use. Use this if you are using an aws-cli profile. 26 | # See http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html 27 | # for details 28 | #profile: test 29 | 30 | # Optional URL prefix for Elasticsearch 31 | #es_url_prefix: elasticsearch 32 | 33 | # Connect with TLS to Elasticsearch 34 | #use_ssl: True 35 | 36 | # Verify TLS certificates 37 | #verify_certs: True 38 | 39 | # GET request with body is the default option for Elasticsearch. 40 | # If it fails for some reason, you can pass 'GET', 'POST' or 'source'. 41 | # See http://elasticsearch-py.readthedocs.io/en/master/connection.html?highlight=send_get_body_as#transport 42 | # for details 43 | #es_send_get_body_as: GET 44 | 45 | # Option basic-auth username and password for Elasticsearch 46 | #es_username: someusername 47 | #es_password: somepassword 48 | 49 | # Use SSL authentication with client certificates client_cert must be 50 | # a pem file containing both cert and key for client 51 | #verify_certs: True 52 | #ca_certs: /path/to/cacert.pem 53 | #client_cert: /path/to/client_cert.pem 54 | #client_key: /path/to/client_key.key 55 | 56 | # The index on es_host which is used for metadata storage 57 | # This can be a unmapped index, but it is recommended that you run 58 | # elastalert-create-index to set a mapping 59 | writeback_index: elastalert_status 60 | 61 | # If an alert fails for some reason, ElastAlert will retry 62 | # sending the alert until this time period has elapsed 63 | alert_time_limit: 64 | days: 2 65 | -------------------------------------------------------------------------------- /elastalert/example_rules/dns_baby_domains.yaml: -------------------------------------------------------------------------------- 1 | name: DNS Newly Registered Domain Request 2 | type: frequency 3 | index: logstash-bro-* 4 | 5 | realert: 6 | minutes: 0 7 | 8 | num_events: 1 9 | timeframe: 10 | hours: 1 11 | 12 | filter: 13 | - query: 14 | query_string: 15 | query: "event_type:dns AND creation_date:[now-30d TO now] -tags:top-1m" 16 | 17 | alert: debug 18 | #email: justin@hasecuritysolutions.com 19 | #from_addr: "elastalert@hasecuritysolutions.com" 20 | #smtp_host: email_gateway_goes_here 21 | -------------------------------------------------------------------------------- /elastalert/example_rules/dns_fuzzy_domain_match.yaml: -------------------------------------------------------------------------------- 1 | name: DNS Fuzzy Domain 2 | type: frequency 3 | index: logstash-bro-* 4 | 5 | realert: 6 | minutes: 0 7 | 8 | num_events: 1 9 | timeframe: 10 | hours: 1 11 | 12 | filter: 13 | - query: 14 | query_string: 15 | query: "event_type:dns AND ((highest_registered_domain:labmeinc.com~ -highest_registered_domain:labmeinc.com) OR (highest_registered_domain:labmeinc.internal~ -highest_registered_domain:labmeinc.internal))" 16 | 17 | alert: debug 18 | #email: justin@hasecuritysolutions.com 19 | #from_addr: "elastalert@hasecuritysolutions.com" 20 | #smtp_host: email_gateway_goes_here 21 | -------------------------------------------------------------------------------- /elastalert/example_rules/dns_newly_observerd.yaml: -------------------------------------------------------------------------------- 1 | name: Newly Accessed Domains 2 | type: new_term 3 | index: logstash-bro-* 4 | doc_type: "dns" 5 | 6 | realert: 7 | minutes: 0 8 | 9 | fields: 10 | - highest_registered_domain 11 | 12 | terms_window_size: 13 | days: 30 14 | 15 | use_terms_query: true 16 | query_key: highest_registered_domain 17 | filter: 18 | - query: 19 | query_string: 20 | query: "-tags:top-1m" 21 | 22 | alert: debug 23 | #email: justin@hasecuritysolutions.com 24 | #from_addr: "elastalert@hasecuritysolutions.com" 25 | #smtp_host: email_gateway_goes_here 26 | -------------------------------------------------------------------------------- /elastalert/example_rules/flow_high_number_of_connections.yaml: -------------------------------------------------------------------------------- 1 | name: High Number of Connections 2 | type: frequency 3 | index: logstash-ids-* 4 | 5 | realert: 6 | minutes: 5 7 | 8 | num_events: 100 9 | timeframe: 10 | hours: 1 11 | query_key: source_ip 12 | 13 | filter: 14 | - query: 15 | query_string: 16 | query: "event_type:flow AND (((state:new OR state:established) AND _exists_:tcp.ack) OR (state:closed AND tcp.fin:true)) -source_ip:192.168.2.106 -source_ip:192.168.2.78 -source_ip:10.0.0.51" 17 | 18 | alert: debug 19 | #email: justin@hasecuritysolutions.com 20 | #from_addr: "elastalert@hasecuritysolutions.com" 21 | #smtp_host: email_gateway_goes_here 22 | -------------------------------------------------------------------------------- /elastalert/example_rules/host_not_sending_logs.yaml: -------------------------------------------------------------------------------- 1 | name: Host Not Sending Logs 2 | type: flatline 3 | index: logstash-bro-* 4 | 5 | realert: 6 | minutes: 60 7 | 8 | threshold: 1 9 | timeframe: 10 | minutes: 15 11 | 12 | use_count_query: true 13 | doc_type: bro_conn 14 | 15 | filter: 16 | - query: 17 | query_string: 18 | query: 'source_ip:192.168.2.101' 19 | 20 | alert: debug 21 | #email: justin@hasecuritysolutions.com 22 | #from_addr: "elastalert@hasecuritysolutions.com" 23 | #smtp_host: email_gateway_goes_here 24 | -------------------------------------------------------------------------------- /elastalert/example_rules/password_fail_high_count.yaml: -------------------------------------------------------------------------------- 1 | name: High Login Failures 2 | type: frequency 3 | index: winlogbeat-* 4 | 5 | realert: 6 | minutes: 5 7 | 8 | num_events: 1000 9 | timeframe: 10 | hours: 1 11 | 12 | filter: 13 | - query: 14 | query_string: 15 | query: 'tags:logon_failure -tags:machine' 16 | 17 | alert: debug 18 | #email: justin@hasecuritysolutions.com 19 | #from_addr: "elastalert@hasecuritysolutions.com" 20 | #smtp_host: email_gateway_goes_here 21 | -------------------------------------------------------------------------------- /elastalert/example_rules/windows_brute_force_logins.yaml: -------------------------------------------------------------------------------- 1 | name: Brute Force Logins 2 | type: frequency 3 | index: winlogbeat-* 4 | 5 | realert: 6 | minutes: 5 7 | 8 | num_events: 50 9 | timeframe: 10 | hours: 1 11 | query_key: event_data.TargetUserName 12 | 13 | filter: 14 | - term: 15 | event_id: 4625 16 | 17 | alert: debug 18 | #email: justin@hasecuritysolutions.com 19 | #from_addr: "elastalert@hasecuritysolutions.com" 20 | #smtp_host: email_gateway_goes_here 21 | -------------------------------------------------------------------------------- /elastalert/example_rules/windows_log_cleared.yaml: -------------------------------------------------------------------------------- 1 | name: Log cleared 2 | type: frequency 3 | index: winlogbeat-* 4 | 5 | realert: 6 | minutes: 0 7 | 8 | num_events: 1 9 | timeframe: 10 | hours: 1 11 | 12 | filter: 13 | - term: 14 | event_id: 1102 15 | 16 | alert: debug 17 | #email: justin@hasecuritysolutions.com 18 | #from_addr: "elastalert@hasecuritysolutions.com" 19 | #smtp_host: email_gateway_goes_here 20 | -------------------------------------------------------------------------------- /elastalert/example_rules/windows_new_service_creation.yaml: -------------------------------------------------------------------------------- 1 | name: New Windows Service Detected 2 | type: new_term 3 | index: winlogbeat-* 4 | doc_type: doc 5 | 6 | realert: 7 | minutes: 0 8 | 9 | fields: 10 | - event_data.ServiceName 11 | 12 | terms_window_size: 13 | days: 3 14 | 15 | use_terms_query: true 16 | query_key: event_data.ServiceName 17 | filter: 18 | - query: 19 | query_string: 20 | query: "event_id:7045" 21 | 22 | alert: debug 23 | #email: justin@hasecuritysolutions.com 24 | #from_addr: "elastalert@hasecuritysolutions.com" 25 | #smtp_host: email_gateway_goes_here 26 | -------------------------------------------------------------------------------- /elastalert/example_rules/windows_password_spraying.yaml: -------------------------------------------------------------------------------- 1 | name: Password Spraying Windows 2 | type: frequency 3 | index: winlogbeat-* 4 | 5 | realert: 6 | minutes: 5 7 | 8 | num_events: 250 9 | timeframe: 10 | hours: 1 11 | query_key: event_data.WorkstationName 12 | 13 | filter: 14 | - query: 15 | query_string: 16 | query: 'event_id:4625 AND -event_data.WorkstationName:"-"' 17 | 18 | alert: debug 19 | #email: justin@hasecuritysolutions.com 20 | #from_addr: "elastalert@hasecuritysolutions.com" 21 | #smtp_host: email_gateway_goes_here 22 | -------------------------------------------------------------------------------- /elasticsearch/es_data/readme.txt: -------------------------------------------------------------------------------- 1 | empty file -------------------------------------------------------------------------------- /elasticsearch/footer.template.json: -------------------------------------------------------------------------------- 1 | } 2 | }, 3 | "aliases": {} 4 | } 5 | -------------------------------------------------------------------------------- /elasticsearch/header.template.json: -------------------------------------------------------------------------------- 1 | { 2 | "order": 5, 3 | "version": 60001, 4 | "index_patterns": [ 5 | "INDEXNAME-*" 6 | ], 7 | "settings": { 8 | "index": { 9 | "routing": { 10 | "allocation": { 11 | "require": { 12 | "box_type": "hot" 13 | } 14 | } 15 | }, 16 | "mapping": { 17 | "total_fields": { 18 | "limit": "15000" 19 | } 20 | }, 21 | "refresh_interval": "30s", 22 | "number_of_shards": "NUMBERSHARDS", 23 | "number_of_replicas": "NUMBERREPLICAS" 24 | } 25 | }, 26 | "mappings": { 27 | "dynamic_templates": [ 28 | { 29 | "message_field": { 30 | "path_match": "message", 31 | "mapping": { 32 | "norms": false, 33 | "type": "text" 34 | }, 35 | "match_mapping_type": "string" 36 | } 37 | }, 38 | { 39 | "string_fields": { 40 | "mapping": { 41 | "norms": false, 42 | "type": "text", 43 | "fields": { 44 | "keyword": { 45 | "ignore_above": 256, 46 | "type": "keyword" 47 | } 48 | } 49 | }, 50 | "match_mapping_type": "string", 51 | "match": "*" 52 | } 53 | } 54 | ], 55 | "properties": 56 | -------------------------------------------------------------------------------- /elasticsearch/indexes/elastalert_status.json: -------------------------------------------------------------------------------- 1 | { 2 | "order": 0, 3 | "version": 60001, 4 | "index_patterns": [ 5 | "elastalert_status" 6 | ], 7 | "settings": { 8 | "index": { 9 | "refresh_interval": "30s" 10 | } 11 | }, 12 | "mappings": { 13 | "_default_": { 14 | "dynamic_templates": [ 15 | { 16 | "message_field": { 17 | "path_match": "message", 18 | "match_mapping_type": "string", 19 | "mapping": { 20 | "type": "text", 21 | "norms": false 22 | } 23 | } 24 | }, 25 | { 26 | "string_fields": { 27 | "match": "*", 28 | "match_mapping_type": "string", 29 | "mapping": { 30 | "type": "text", 31 | "norms": false, 32 | "fields": { 33 | "keyword": { 34 | "type": "keyword", 35 | "ignore_above": 256 36 | } 37 | } 38 | } 39 | } 40 | } 41 | ], 42 | "properties": { 43 | "@timestamp": { 44 | "type": "date" 45 | }, 46 | "@version": { 47 | "type": "keyword" 48 | }, 49 | "geoip": { 50 | "dynamic": true, 51 | "properties": { 52 | "ip": { 53 | "type": "ip" 54 | }, 55 | "location": { 56 | "type": "geo_point" 57 | }, 58 | "latitude": { 59 | "type": "half_float" 60 | }, 61 | "longitude": { 62 | "type": "half_float" 63 | } 64 | } 65 | } 66 | } 67 | } 68 | }, 69 | "aliases": {} 70 | } -------------------------------------------------------------------------------- /elasticsearch/indexes/import.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cd /opt/elastic_stack/elasticsearch/indexes 3 | for filename in *.json; do 4 | name=${filename:0:-5}; 5 | echo "localhost:9200/_template/$name"; 6 | curl -XPUT "localhost:9200/_template/$name" -H 'Content-Type: application/json' --data "@/opt/elastic_stack/elasticsearch/indexes/$filename"; 7 | done -------------------------------------------------------------------------------- /elasticsearch/indexes/logstash-bro.json: -------------------------------------------------------------------------------- 1 | { 2 | "order": 0, 3 | "version": 60001, 4 | "index_patterns": [ 5 | "logstash-bro-*" 6 | ], 7 | "settings": { 8 | "index": { 9 | "number_of_shards": "1", 10 | "number_of_replicas": "0", 11 | "refresh_interval": "30s" 12 | } 13 | }, 14 | "mappings": { 15 | "_default_": { 16 | "dynamic_templates": [ 17 | { 18 | "message_field": { 19 | "path_match": "message", 20 | "match_mapping_type": "string", 21 | "mapping": { 22 | "type": "text", 23 | "norms": false 24 | } 25 | } 26 | }, 27 | { 28 | "string_fields": { 29 | "match": "*", 30 | "match_mapping_type": "string", 31 | "mapping": { 32 | "type": "text", 33 | "norms": false, 34 | "fields": { 35 | "keyword": { 36 | "type": "keyword", 37 | "ignore_above": 256 38 | } 39 | } 40 | } 41 | } 42 | } 43 | ], 44 | "properties": { 45 | "@timestamp": { 46 | "type": "date" 47 | }, 48 | "@version": { 49 | "type": "keyword" 50 | }, 51 | "source_ip": { 52 | "type": "ip" 53 | }, 54 | "destination_ip": { 55 | "type": "ip" 56 | }, 57 | "geoip": { 58 | "dynamic": true, 59 | "properties": { 60 | "ip": { 61 | "type": "ip" 62 | }, 63 | "location": { 64 | "type": "geo_point" 65 | }, 66 | "latitude": { 67 | "type": "half_float" 68 | }, 69 | "longitude": { 70 | "type": "half_float" 71 | } 72 | } 73 | }, 74 | "source_geo": { 75 | "dynamic": true, 76 | "properties": { 77 | "ip": { 78 | "type": "ip" 79 | }, 80 | "location": { 81 | "type": "geo_point" 82 | }, 83 | "latitude": { 84 | "type": "half_float" 85 | }, 86 | "longitude": { 87 | "type": "half_float" 88 | } 89 | } 90 | }, 91 | "destination_geo": { 92 | "dynamic": true, 93 | "properties": { 94 | "ip": { 95 | "type": "ip" 96 | }, 97 | "location": { 98 | "type": "geo_point" 99 | }, 100 | "latitude": { 101 | "type": "half_float" 102 | }, 103 | "longitude": { 104 | "type": "half_float" 105 | } 106 | } 107 | } 108 | } 109 | } 110 | }, 111 | "aliases": {} 112 | } 113 | -------------------------------------------------------------------------------- /elasticsearch/indexes/logstash-ids.json: -------------------------------------------------------------------------------- 1 | { 2 | "order": 0, 3 | "version": 60001, 4 | "index_patterns": [ 5 | "logstash-ids-*" 6 | ], 7 | "settings": { 8 | "index": { 9 | "number_of_shards": "1", 10 | "number_of_replicas": "0", 11 | "refresh_interval": "30s" 12 | } 13 | }, 14 | "mappings": { 15 | "_default_": { 16 | "dynamic_templates": [ 17 | { 18 | "message_field": { 19 | "path_match": "message", 20 | "match_mapping_type": "string", 21 | "mapping": { 22 | "type": "text", 23 | "norms": false 24 | } 25 | } 26 | }, 27 | { 28 | "string_fields": { 29 | "match": "*", 30 | "match_mapping_type": "string", 31 | "mapping": { 32 | "type": "text", 33 | "norms": false, 34 | "fields": { 35 | "keyword": { 36 | "type": "keyword", 37 | "ignore_above": 256 38 | } 39 | } 40 | } 41 | } 42 | } 43 | ], 44 | "properties": { 45 | "@timestamp": { 46 | "type": "date" 47 | }, 48 | "@version": { 49 | "type": "keyword" 50 | }, 51 | "source_ip": { 52 | "type": "ip" 53 | }, 54 | "destination_ip": { 55 | "type": "ip" 56 | }, 57 | "geoip": { 58 | "dynamic": true, 59 | "properties": { 60 | "ip": { 61 | "type": "ip" 62 | }, 63 | "location": { 64 | "type": "geo_point" 65 | }, 66 | "latitude": { 67 | "type": "half_float" 68 | }, 69 | "longitude": { 70 | "type": "half_float" 71 | } 72 | } 73 | }, 74 | "source_geo": { 75 | "dynamic": true, 76 | "properties": { 77 | "ip": { 78 | "type": "ip" 79 | }, 80 | "location": { 81 | "type": "geo_point" 82 | }, 83 | "latitude": { 84 | "type": "half_float" 85 | }, 86 | "longitude": { 87 | "type": "half_float" 88 | } 89 | } 90 | }, 91 | "destination_geo": { 92 | "dynamic": true, 93 | "properties": { 94 | "ip": { 95 | "type": "ip" 96 | }, 97 | "location": { 98 | "type": "geo_point" 99 | }, 100 | "latitude": { 101 | "type": "half_float" 102 | }, 103 | "longitude": { 104 | "type": "half_float" 105 | } 106 | } 107 | } 108 | } 109 | } 110 | }, 111 | "aliases": {} 112 | } 113 | -------------------------------------------------------------------------------- /elasticsearch/indexes/logstash-ossec.json: -------------------------------------------------------------------------------- 1 | { 2 | "order": 0, 3 | "version": 60001, 4 | "index_patterns": [ 5 | "logstash-ossec-*" 6 | ], 7 | "settings": { 8 | "index": { 9 | "number_of_shards": "1", 10 | "number_of_replicas": "0", 11 | "refresh_interval": "30s" 12 | } 13 | }, 14 | "mappings": { 15 | "_default_": { 16 | "dynamic_templates": [ 17 | { 18 | "message_field": { 19 | "path_match": "message", 20 | "match_mapping_type": "string", 21 | "mapping": { 22 | "type": "text", 23 | "norms": false 24 | } 25 | } 26 | }, 27 | { 28 | "string_fields": { 29 | "match": "*", 30 | "match_mapping_type": "string", 31 | "mapping": { 32 | "type": "text", 33 | "norms": false, 34 | "fields": { 35 | "keyword": { 36 | "type": "keyword", 37 | "ignore_above": 256 38 | } 39 | } 40 | } 41 | } 42 | } 43 | ], 44 | "properties": { 45 | "@timestamp": { 46 | "type": "date" 47 | }, 48 | "@version": { 49 | "type": "keyword" 50 | }, 51 | "source_ip": { 52 | "type": "ip" 53 | }, 54 | "destination_ip": { 55 | "type": "ip" 56 | }, 57 | "geoip": { 58 | "dynamic": true, 59 | "properties": { 60 | "ip": { 61 | "type": "ip" 62 | }, 63 | "location": { 64 | "type": "geo_point" 65 | }, 66 | "latitude": { 67 | "type": "half_float" 68 | }, 69 | "longitude": { 70 | "type": "half_float" 71 | } 72 | } 73 | }, 74 | "source_geo": { 75 | "dynamic": true, 76 | "properties": { 77 | "ip": { 78 | "type": "ip" 79 | }, 80 | "location": { 81 | "type": "geo_point" 82 | }, 83 | "latitude": { 84 | "type": "half_float" 85 | }, 86 | "longitude": { 87 | "type": "half_float" 88 | } 89 | } 90 | }, 91 | "destination_geo": { 92 | "dynamic": true, 93 | "properties": { 94 | "ip": { 95 | "type": "ip" 96 | }, 97 | "location": { 98 | "type": "geo_point" 99 | }, 100 | "latitude": { 101 | "type": "half_float" 102 | }, 103 | "longitude": { 104 | "type": "half_float" 105 | } 106 | } 107 | } 108 | } 109 | } 110 | }, 111 | "aliases": {} 112 | } 113 | -------------------------------------------------------------------------------- /elasticsearch/indexes/logstash-so.json: -------------------------------------------------------------------------------- 1 | { 2 | "order": 0, 3 | "version": 60001, 4 | "index_patterns": [ 5 | "logstash-so-*" 6 | ], 7 | "settings": { 8 | "index": { 9 | "number_of_shards": "1", 10 | "number_of_replicas": "0", 11 | "refresh_interval": "30s" 12 | } 13 | }, 14 | "mappings": { 15 | "_default_": { 16 | "dynamic_templates": [ 17 | { 18 | "message_field": { 19 | "path_match": "message", 20 | "match_mapping_type": "string", 21 | "mapping": { 22 | "type": "text", 23 | "norms": false 24 | } 25 | } 26 | }, 27 | { 28 | "string_fields": { 29 | "match": "*", 30 | "match_mapping_type": "string", 31 | "mapping": { 32 | "type": "text", 33 | "norms": false, 34 | "fields": { 35 | "keyword": { 36 | "type": "keyword", 37 | "ignore_above": 256 38 | } 39 | } 40 | } 41 | } 42 | } 43 | ], 44 | "properties": { 45 | "@timestamp": { 46 | "type": "date" 47 | }, 48 | "@version": { 49 | "type": "keyword" 50 | }, 51 | "source_ip": { 52 | "type": "ip" 53 | }, 54 | "destination_ip": { 55 | "type": "ip" 56 | }, 57 | "geoip": { 58 | "dynamic": true, 59 | "properties": { 60 | "ip": { 61 | "type": "ip" 62 | }, 63 | "location": { 64 | "type": "geo_point" 65 | }, 66 | "latitude": { 67 | "type": "half_float" 68 | }, 69 | "longitude": { 70 | "type": "half_float" 71 | } 72 | } 73 | }, 74 | "source_geo": { 75 | "dynamic": true, 76 | "properties": { 77 | "ip": { 78 | "type": "ip" 79 | }, 80 | "location": { 81 | "type": "geo_point" 82 | }, 83 | "latitude": { 84 | "type": "half_float" 85 | }, 86 | "longitude": { 87 | "type": "half_float" 88 | } 89 | } 90 | }, 91 | "destination_geo": { 92 | "dynamic": true, 93 | "properties": { 94 | "ip": { 95 | "type": "ip" 96 | }, 97 | "location": { 98 | "type": "geo_point" 99 | }, 100 | "latitude": { 101 | "type": "half_float" 102 | }, 103 | "longitude": { 104 | "type": "half_float" 105 | } 106 | } 107 | } 108 | } 109 | } 110 | }, 111 | "aliases": {} 112 | } 113 | -------------------------------------------------------------------------------- /elasticsearch/indexes/logstash.json: -------------------------------------------------------------------------------- 1 | { 2 | "order": 0, 3 | "version": 60001, 4 | "index_patterns": [ 5 | "logstash-*" 6 | ], 7 | "settings": { 8 | "index": { 9 | "refresh_interval": "30s" 10 | } 11 | }, 12 | "mappings": { 13 | "_default_": { 14 | "dynamic_templates": [ 15 | { 16 | "message_field": { 17 | "path_match": "message", 18 | "match_mapping_type": "string", 19 | "mapping": { 20 | "type": "text", 21 | "norms": false 22 | } 23 | } 24 | }, 25 | { 26 | "string_fields": { 27 | "match": "*", 28 | "match_mapping_type": "string", 29 | "mapping": { 30 | "type": "text", 31 | "norms": false, 32 | "fields": { 33 | "keyword": { 34 | "type": "keyword", 35 | "ignore_above": 256 36 | } 37 | } 38 | } 39 | } 40 | } 41 | ], 42 | "properties": { 43 | "@timestamp": { 44 | "type": "date" 45 | }, 46 | "@version": { 47 | "type": "keyword" 48 | }, 49 | "geoip": { 50 | "dynamic": true, 51 | "properties": { 52 | "ip": { 53 | "type": "ip" 54 | }, 55 | "location": { 56 | "type": "geo_point" 57 | }, 58 | "latitude": { 59 | "type": "half_float" 60 | }, 61 | "longitude": { 62 | "type": "half_float" 63 | } 64 | } 65 | } 66 | } 67 | } 68 | }, 69 | "aliases": {} 70 | } -------------------------------------------------------------------------------- /elasticsearch/indexes/winlogbeat-6.json: -------------------------------------------------------------------------------- 1 | { 2 | "order": 1, 3 | "index_patterns": [ 4 | "winlogbeat-*" 5 | ], 6 | "settings": { 7 | "index": { 8 | "mapping": { 9 | "total_fields": { 10 | "limit": "10000" 11 | } 12 | }, 13 | "refresh_interval": "30s", 14 | "number_of_shards": "1", 15 | "number_of_replicas": "0" 16 | } 17 | }, 18 | "mappings": { 19 | "doc": { 20 | "_meta": { 21 | "version": "6.0.0" 22 | }, 23 | "date_detection": false, 24 | "dynamic_templates": [ 25 | { 26 | "fields": { 27 | "mapping": { 28 | "type": "keyword" 29 | }, 30 | "match_mapping_type": "string", 31 | "path_match": "fields.*" 32 | } 33 | }, 34 | { 35 | "docker.container.labels": { 36 | "mapping": { 37 | "type": "keyword" 38 | }, 39 | "match_mapping_type": "string", 40 | "path_match": "docker.container.labels.*" 41 | } 42 | }, 43 | { 44 | "event_data": { 45 | "mapping": { 46 | "type": "keyword" 47 | }, 48 | "match_mapping_type": "string", 49 | "path_match": "event_data.*" 50 | } 51 | }, 52 | { 53 | "user_data": { 54 | "match_mapping_type": "string", 55 | "path_match": "user_data.*", 56 | "mapping": { 57 | "type": "keyword" 58 | } 59 | } 60 | }, 61 | { 62 | "strings_as_keyword": { 63 | "mapping": { 64 | "ignore_above": 1024, 65 | "type": "keyword" 66 | }, 67 | "match_mapping_type": "string" 68 | } 69 | } 70 | ], 71 | "properties": { 72 | "fields": { 73 | "type": "object" 74 | }, 75 | "kubernetes": { 76 | "properties": { 77 | "namespace": { 78 | "type": "keyword", 79 | "ignore_above": 1024 80 | }, 81 | "labels": { 82 | "type": "object" 83 | }, 84 | "annotations": { 85 | "type": "object" 86 | }, 87 | "container": { 88 | "properties": { 89 | "name": { 90 | "type": "keyword", 91 | "ignore_above": 1024 92 | }, 93 | "image": { 94 | "type": "keyword", 95 | "ignore_above": 1024 96 | } 97 | } 98 | }, 99 | "pod": { 100 | "properties": { 101 | "name": { 102 | "type": "keyword", 103 | "ignore_above": 1024 104 | } 105 | } 106 | } 107 | } 108 | }, 109 | "activity_id": { 110 | "type": "keyword", 111 | "ignore_above": 1024 112 | }, 113 | "user": { 114 | "properties": { 115 | "domain": { 116 | "type": "keyword", 117 | "ignore_above": 1024 118 | }, 119 | "type": { 120 | "ignore_above": 1024, 121 | "type": "keyword" 122 | }, 123 | "identifier": { 124 | "type": "keyword", 125 | "ignore_above": 1024 126 | }, 127 | "name": { 128 | "type": "keyword", 129 | "ignore_above": 1024 130 | } 131 | } 132 | }, 133 | "tags": { 134 | "type": "keyword", 135 | "ignore_above": 1024 136 | }, 137 | "error": { 138 | "properties": { 139 | "message": { 140 | "type": "text", 141 | "norms": false 142 | }, 143 | "code": { 144 | "type": "long" 145 | }, 146 | "type": { 147 | "type": "keyword", 148 | "ignore_above": 1024 149 | } 150 | } 151 | }, 152 | "message": { 153 | "type": "text", 154 | "norms": false 155 | }, 156 | "opcode": { 157 | "type": "keyword", 158 | "ignore_above": 1024 159 | }, 160 | "version": { 161 | "type": "long" 162 | }, 163 | "source_name": { 164 | "type": "keyword", 165 | "ignore_above": 1024 166 | }, 167 | "user_data": { 168 | "type": "object" 169 | }, 170 | "meta": { 171 | "properties": { 172 | "cloud": { 173 | "properties": { 174 | "project_id": { 175 | "type": "keyword", 176 | "ignore_above": 1024 177 | }, 178 | "region": { 179 | "type": "keyword", 180 | "ignore_above": 1024 181 | }, 182 | "provider": { 183 | "type": "keyword", 184 | "ignore_above": 1024 185 | }, 186 | "instance_id": { 187 | "type": "keyword", 188 | "ignore_above": 1024 189 | }, 190 | "instance_name": { 191 | "type": "keyword", 192 | "ignore_above": 1024 193 | }, 194 | "machine_type": { 195 | "type": "keyword", 196 | "ignore_above": 1024 197 | }, 198 | "availability_zone": { 199 | "type": "keyword", 200 | "ignore_above": 1024 201 | } 202 | } 203 | } 204 | } 205 | }, 206 | "docker": { 207 | "properties": { 208 | "container": { 209 | "properties": { 210 | "id": { 211 | "type": "keyword", 212 | "ignore_above": 1024 213 | }, 214 | "image": { 215 | "type": "keyword", 216 | "ignore_above": 1024 217 | }, 218 | "name": { 219 | "type": "keyword", 220 | "ignore_above": 1024 221 | }, 222 | "labels": { 223 | "type": "object" 224 | } 225 | } 226 | } 227 | } 228 | }, 229 | "event_id": { 230 | "type": "long" 231 | }, 232 | "keywords": { 233 | "type": "keyword", 234 | "ignore_above": 1024 235 | }, 236 | "record_number": { 237 | "type": "keyword", 238 | "ignore_above": 1024 239 | }, 240 | "provider_guid": { 241 | "type": "keyword", 242 | "ignore_above": 1024 243 | }, 244 | "xml": { 245 | "type": "text", 246 | "norms": false 247 | }, 248 | "type": { 249 | "type": "keyword", 250 | "ignore_above": 1024 251 | }, 252 | "task": { 253 | "ignore_above": 1024, 254 | "type": "keyword" 255 | }, 256 | "thread_id": { 257 | "type": "long" 258 | }, 259 | "beat": { 260 | "properties": { 261 | "name": { 262 | "type": "keyword", 263 | "ignore_above": 1024 264 | }, 265 | "hostname": { 266 | "type": "keyword", 267 | "ignore_above": 1024 268 | }, 269 | "timezone": { 270 | "ignore_above": 1024, 271 | "type": "keyword" 272 | }, 273 | "version": { 274 | "type": "keyword", 275 | "ignore_above": 1024 276 | } 277 | } 278 | }, 279 | "computer_name": { 280 | "type": "keyword", 281 | "ignore_above": 1024 282 | }, 283 | "event_data": { 284 | "type": "object" 285 | }, 286 | "log_name": { 287 | "type": "keyword", 288 | "ignore_above": 1024 289 | }, 290 | "level": { 291 | "type": "keyword", 292 | "ignore_above": 1024 293 | }, 294 | "message_error": { 295 | "type": "keyword", 296 | "ignore_above": 1024 297 | }, 298 | "related_activity_id": { 299 | "type": "keyword", 300 | "ignore_above": 1024 301 | }, 302 | "@timestamp": { 303 | "type": "date" 304 | }, 305 | "process_id": { 306 | "type": "long" 307 | } 308 | } 309 | } 310 | }, 311 | "aliases": {} 312 | } -------------------------------------------------------------------------------- /elasticsearch/snapshots/readme.txt: -------------------------------------------------------------------------------- 1 | empty file -------------------------------------------------------------------------------- /generate_template.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | USER=YOURUSERHERE 3 | PASSWORD=YOURPASSWORDHERE 4 | SERVER=SERVERGOESHERE 5 | PORT=PASSWORDGOESHERE 6 | SHARDPERXGB=50 7 | REPLICAENABLED=1 8 | CACERT=/opt/elastic_stack/ca/ca.crt 9 | HEADERFILE=/opt/elastic_stack/elasticsearch/header.template.json 10 | FOOTERFILE=/opt/elastic_stack/elasticsearch/footer.template.json 11 | PURGEOLDTEMPLATE=1 12 | ceil() { 13 | echo "define ceil (x) {if (x<0) {return x/1} \ 14 | else {if (scale(x)==0) {return x} \ 15 | else {return x/1 + 1 }}} ; ceil($1)" | bc 16 | } 17 | echo "Enter a specific name for an index to pull properties from - Example: logstash-zeek-2019.11.22" 18 | read INDEXNAME 19 | echo "Enter the index pattern name. logstash-zeek-2019.11.22 would have a pattern name of logstash-zeek" 20 | read INDEXPATTERNNAME 21 | curl -s --cacert ${CACERT} -u ${USER}:${PASSWORD} https://${SERVER}:$PORT/${INDEXNAME}/_mappings?pretty | jq .[]."mappings"."properties" > properties.json 22 | INDEXPRIMARIESSIZE=$(curl -s --cacert ${CACERT} -u ${USER}:${PASSWORD} https://${SERVER}:$PORT/${INDEXNAME}/_stats/store | jq ."_all"."primaries"."store"."size_in_bytes" | awk '{ byte =$1 /1024/1024/1024; print byte }') 23 | SHARDS=1 24 | if [[ $PURGEOLDTEMPLATE -eq 1 ]] 25 | then 26 | rm -f ${INDEXPATTERNNAME}.template.json 27 | fi 28 | 29 | cat $HEADERFILE > ${INDEXPATTERNNAME}.template.json 30 | cat properties.json >> ${INDEXPATTERNNAME}.template.json 31 | cat $FOOTERFILE >> ${INDEXPATTERNNAME}.template.json 32 | sed -i "s/INDEXNAME/${INDEXPATTERNNAME}/" ${INDEXPATTERNNAME}.template.json 33 | rm -f properties.json 34 | sed -i "s/NUMBERSHARDS/${SHARDS}/" ${INDEXPATTERNNAME}.template.json 35 | 36 | if [[ $REPLICAENABLED -eq 1 ]] 37 | then 38 | sed -i "s/NUMBERREPLICAS/1/" ${INDEXPATTERNNAME}.template.json 39 | else 40 | sed -i "s/NUMBERREPLICAS/0/" ${INDEXPATTERNNAME}.template.json 41 | fi 42 | echo "New Template created at ${INDEXPATTERNNAME}.template.json" 43 | 44 | echo "Would you like to import the index template? Enter 1 for yes or 2 for no" 45 | read ASKTOIMPORT 46 | 47 | if [[ $ASKTOIMPORT -eq 1 ]] 48 | then 49 | curl -s --cacert ${CACERT} -u ${USER}:${PASSWORD} -X PUT "https://${SERVER}:$PORT/_template/${INDEXPATTERNNAME}?pretty" -H 'Content-Type: application/json' -d @${INDEXPATTERNNAME}.template.json 50 | fi 51 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/0006_input_beats.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/15/2017 5 | 6 | input { 7 | # Assumes filebeat 8 | beats { 9 | port => "5044" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/1006_preprocess_beats.conf: -------------------------------------------------------------------------------- 1 | filter { 2 | if [source] =~ "^\/nsm\/bro" { 3 | grok { 4 | match => { "source" => "\/%{WORD:file_name}\.log$" } 5 | } 6 | mutate { 7 | add_field => { "event_type" => "bro_%{file_name}" } 8 | remove_field => "file_name" 9 | add_tag => "bro" 10 | } 11 | if [event_type] =~ "^bro_http" { 12 | mutate { 13 | replace => { "event_type" => "bro_http"} 14 | } 15 | } 16 | } 17 | if [source] =~ ".eve.json$" { 18 | json { 19 | source => "message" 20 | remove_field => "message" 21 | add_tag => "suricata" 22 | } 23 | } 24 | mutate { 25 | replace => { "type" => "doc" } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/1100_preprocess_bro_conn.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/16/2017 5 | # 6 | # This conf file is based on accepting logs for conn.log from Bro systems 7 | filter { 8 | if [event_type] == "bro_conn" { 9 | # This is the initial parsing of the log 10 | csv { 11 | columns => ["timestamp","uid","source_ip","source_port","destination_ip","destination_port","protocol","service","duration","original_bytes","respond_bytes","connection_state","local_orig","local_respond","missed_bytes","history","original_packets","original_ipbytes","respond_packets","respond_ipbytes","tunnel_parents","original_country_code","respond_country_code","sensor_name"] 12 | 13 | # If you use a custom delimiter, change the following value in between the quotes to your delimiter. Otherwise, insert a literal in between the two quotes on your logstash system, use a text editor like nano that doesn't convert tabs to spaces. 14 | separator => " " 15 | } 16 | translate { 17 | field => "connection_state" 18 | 19 | destination => "ConnectionStateDescription" 20 | 21 | dictionary => [ 22 | "S0", "Connection attempt seen, no reply", 23 | "S1", "Connection established, not terminated", 24 | "S2", "Connection established and close attempt by originator seen (but no reply from responder)", 25 | "S3", "Connection established and close attempt by responder seen (but no reply from originator)", 26 | "SF", "Normal SYN/FIN completion", 27 | "REJ", "Connection attempt rejected", 28 | "RSTO", "Connection established, originator aborted (sent a RST)", 29 | "RSTR", "Established, responder aborted", 30 | "RSTOS0", "Originator sent a SYN followed by a RST, we never saw a SYN-ACK from the responder", 31 | "RSTRH", "Responder sent a SYN ACK followed by a RST, we never saw a SYN from the (purported) originator", 32 | "SH", "Originator sent a SYN followed by a FIN, we never saw a SYN ACK from the responder (hence the connection was 'half' open)", 33 | "SHR", "Responder sent a SYN ACK followed by a FIN, we never saw a SYN from the originator", 34 | "OTH", "No SYN seen, just midstream traffic (a 'partial connection' that was not later closed)" 35 | ] 36 | } 37 | mutate { 38 | #add_tag => [ "conf_file_1100"] 39 | } 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/1101_preprocess_bro_dhcp.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/16/2017 5 | # 6 | # This conf file is based on accepting logs for dhcp.log from Bro systems 7 | filter { 8 | if [event_type] == "bro_dhcp" { 9 | # This is the initial parsing of the log 10 | grok { 11 | match => [ "message", "(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*))" ] 12 | } 13 | mutate { 14 | #add_tag => [ "conf_file_1101"] 15 | } 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/1102_preprocess_bro_dns.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/16/2017 5 | # 6 | # This conf file is based on accepting logs for dns.log from Bro systems 7 | filter { 8 | if [event_type] == "bro_dns" { 9 | # This is the initial parsing of the log 10 | csv { 11 | columns => ["timestamp","uid","source_ip","source_port","destination_ip","destination_port","protocol","transaction_id","rtt","query","query_class","query_class_name","query_type","query_type_name","rcode","rcode_name","aa","tc","rd","ra","z","answers","ttls","rejected"] 12 | 13 | #If you use a custom delimiter, change the following value in between the quotes to your delimiter. Otherwise, insert a literal in between the two quotes on your logstash system, use a text editor like nano that doesn't convert tabs to spaces. 14 | separator => " " 15 | } 16 | if [ttls] == "-" { 17 | mutate { 18 | remove_field => [ "ttls" ] 19 | } 20 | } else { 21 | } 22 | mutate { 23 | #add_tag => [ "conf_file_1102"] 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/1103_preprocess_bro_dpd.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/16/2017 5 | # 6 | # This conf file is based on accepting logs for dpd.log from Bro systems 7 | filter { 8 | if [event_type] == "bro_dpd" { 9 | # This is the initial parsing of the log 10 | csv { 11 | columns => ["timestamp","uid","source_ip","source_port","destination_ip","destination_port","protocol","analyzer","failure_reason"] 12 | separator => " " 13 | } 14 | mutate { 15 | #add_tag => [ "conf_file_1103"] 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/1104_preprocess_bro_files.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/16/2017 5 | # 6 | # This conf file is based on accepting logs for files.log from Bro systems 7 | filter { 8 | if [event_type] == "bro_files" { 9 | # This is the initial parsing of the log 10 | csv { 11 | columns => ["timestamp","fuid","file_ip","destination_ip","connection_uids","source","depth","analyzer","mime_type","file_name","duration","rig","is_orig","seen_bytes","total_bytes","missing_bytes","overflow_bytes","timed_out","fuid","md5","sha1","sha256","extracted"] 12 | separator => " " 13 | } 14 | mutate { 15 | #add_tag => [ "conf_file_1104"] 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/1105_preprocess_bro_ftp.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/16/2017 5 | # 6 | # This conf file is based on accepting logs for ftp.log from Bro systems 7 | filter { 8 | if [event_type] == "bro_ftp" { 9 | # This is the initial parsing of the log 10 | csv { 11 | columns => ["timestamp","uid","source_ip","source_port","destination_ip","destination_port","username","password","command","argument","mime_type","file_size","reply_code","reply_message","data_channel_passive","data_channel_source_ip","data_channel_destination_ip","data_channel_destination_port","fuid"] 12 | separator => " " 13 | } 14 | mutate { 15 | #add_tag => [ "conf_file_1105"] 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/1106_preprocess_bro_http.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/16/2017 5 | # 6 | # This conf file is based on accepting logs for http.log from Bro systems 7 | filter { 8 | if [event_type] == "bro_http" { 9 | grok { 10 | match => [ "message", "(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*))\t(?(.*))\t(?(.*))\t(?(.*))\t(?(.*))\t(?(.*))\t(?(.*))\t(?(.*))\t(?(.*))" ] 11 | } 12 | if [useragent] == "-" { 13 | mutate { 14 | remove_field => [ "useragent" ] 15 | } 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/1107_preprocess_bro_irc.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/16/2017 5 | # 6 | # This conf file is based on accepting logs for irc.log from Bro systems 7 | filter { 8 | if [event_type] == "bro_irc" { 9 | # This is the initial parsing of the log 10 | csv { 11 | columns => ["timestamp","uid","source_ip","source_port","destination_ip","destination_port","nick","username","command","value","additional_info","dcc_file_name","dcc_file_size","dcc_mime_type","fuid"] 12 | separator => " " 13 | } 14 | mutate { 15 | #add_tag => [ "conf_file_1107"] 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/1108_preprocess_bro_kereberos.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/16/2017 5 | # This conf file is based on accepting logs for kerberos.log from Bro systems 6 | filter { 7 | if [event_type] == "bro_kerberos" { 8 | csv { 9 | columns => ["timestamp","uid","source_ip","source_port","destination_ip","destination_port","request_type","client","service","success","error_message","email_from","till","cipher","forwardable","renewable","client_certificate_subject","client_certificate_uid","server_certificate_subject","server_certificate_fuid"] 10 | separator => " " 11 | } 12 | mutate { 13 | #add_tag => [ "conf_file_1108"] 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/1109_preprocess_bro_notice.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/16/2017 5 | # 6 | # This conf file is based on accepting logs for notice.log from Bro systems 7 | filter { 8 | if [event_type] == "bro_notice" { 9 | # This is the initial parsing of the log 10 | csv { 11 | columns => ["timestamp","uid","source_ip","source_port","destination_ip","destination_port","fuid","file_mime_type","file_description","protocol","note","message","sub_message","source_ip","destination_ip","p","n","peer_description","action","suppress_for","dropped","destination_country_code","destination_region","destination_city","destination_latitude","destination_longitude"] 12 | separator => " " 13 | } 14 | mutate { 15 | #add_tag => [ "conf_file_1109"] 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/1110_preprocess_bro_rdp.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/16/2017 5 | # 6 | # This conf file is based on accepting logs for weird.log from Bro systems 7 | filter { 8 | if [event_type] == "bro_rdp" { 9 | csv { 10 | columns => ["timestamp","uid","source_ip","source_port","destination_ip","destination_port","cookie","result","security_protocol","keyboard_layout","client_build","client_name","client_digital_product_id","desktop_width","desktop_height","requested_color_depth","certificate_type","certificate_count","certificate_permanent","encryption_level","encryption_method"] 11 | separator => " " 12 | } 13 | mutate { 14 | #add_tag => [ "conf_file_1110"] 15 | } 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/1111_preprocess_bro_signatures.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/16/2017 5 | # 6 | # This conf file is based on accepting logs for signatures.log from Bro systems 7 | filter { 8 | if [event_type] == "bro_signatures" { 9 | # This is the initial parsing of the log 10 | csv { 11 | columns => ["timestamp","uid","source_ip","source_port","destination_ip","destination_port","note","signature_id","event_message","sub_message","signature_count","host_count"] 12 | separator => " " 13 | } 14 | mutate { 15 | #add_tag => [ "conf_file_1111"] 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/1112_preprocess_bro_smtp.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/16/2017 5 | # 6 | # This conf file is based on accepting logs for smtp.log from Bro systems 7 | filter { 8 | if [event_type] == "bro_smtp" { 9 | grok { 10 | match => [ "message", "(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*))\t(?(.*))\t(?(.*))\t(?(.*))\t(?(.*))\t(?(.*))\t(?(.*))\t(?(.*))" ] 11 | } 12 | if [useragent] == "-" { 13 | mutate { 14 | remove_field => [ "useragent" ] 15 | } 16 | } 17 | mutate { 18 | #add_tag => [ "conf_file_1112"] 19 | } 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/1113_preprocess_bro_snmp.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/16/2017 5 | # 6 | # This conf file is based on accepting logs for snmp.log from Bro systems 7 | filter { 8 | if [event_type] == "bro_snmp" { 9 | # This is the initial parsing of the log 10 | csv { 11 | columns => ["timestamp","uid","source_ip","source_port","destination_ip","destination_port","duration","version","community","get_requests","get_bulk_requests","get_responses","set_responses","display_string","up_since"] 12 | separator => " " 13 | } 14 | mutate { 15 | #add_tag => [ "conf_file_1113"] 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/1114_preprocess_bro_software.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/16/2017 5 | # 6 | # This conf file is based on accepting logs for software.log from Bro systems 7 | filter { 8 | if [event_type] == "bro_software" { 9 | # This is the initial parsing of the log 10 | csv { 11 | columns => ["timestamp","source_ip","source_port","software_type","name","version_major","version_minor","version_minor2","version_minor3","version_additional_info","unparsed_version"] 12 | separator => " " 13 | } 14 | mutate { 15 | #add_tag => [ "conf_file_1114"] 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/1115_preprocess_bro_ssh.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/16/2017 5 | # 6 | # This conf file is based on accepting logs for ssh.log from Bro systems 7 | filter { 8 | if [event_type] == "bro_ssh" { 9 | # This is the initial parsing of the log 10 | csv { 11 | columns => ["timestamp","uid","source_ip","source_port","destination_ip","destination_port","version","authentication_success","direction","client","server","cipher_algorithm","mac_algorithm","compression_algorithm","kex_algorithm","host_key_algorithm","host_key","destination_country_code","destination_region","destination_city","destination_latitude","destination_longitude"] 12 | separator => " " 13 | } 14 | mutate { 15 | #add_tag => [ "conf_file_1115"] 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/1116_preprocess_bro_ssl.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/16/2017 5 | # 6 | # This conf file is based on accepting logs for ssl.log from Bro systems 7 | filter { 8 | if [event_type] == "bro_ssl" { 9 | # This is the initial parsing of the log 10 | csv { 11 | columns => ["timestamp","uid","source_ip","source_port","destination_ip","destination_port","ssl_version","cipher","curve","server_name","resumed","last_alert","next_protocol","established","certificate_chain_fuids","client_certificate_chain_fuids","certificate_subject","certificate_issuer","client_subject","client_issuer","validation_status"] 12 | separator => " " 13 | } 14 | mutate { 15 | gsub => [ "subject", "\\\\,", "|" ] 16 | } 17 | kv { 18 | include_keys => [ "CN", "C", "O", "OU", "ST", "SN", "L", "DC", "GN", "pseudonym", "serialNumber", "title", "initials" ] 19 | field_split => "," 20 | source => "certificate_issuer" 21 | } 22 | mutate { 23 | rename => { "CN" => "issuer_common_name"} 24 | rename => { "C" => "issuer_country_code"} 25 | rename => { "O" => "issuer_organization"} 26 | rename => { "OU" => "issuer_organization_unit"} 27 | rename => { "ST" => "issuer_state"} 28 | rename => { "SN" => "issuer_surname"} 29 | rename => { "L" => "issuer_locality"} 30 | rename => { "DC" => "issuer_distinguished_name"} 31 | rename => { "GN" => "issuer_given_name"} 32 | rename => { "pseudonym" => "issuer_pseudonym"} 33 | rename => { "serialNumber" => "issuer_serial_number"} 34 | rename => { "title" => "issuer_title"} 35 | rename => { "initials" => "issuer_initials"} 36 | } 37 | kv { 38 | include_keys => [ "CN", "C", "O", "OU", "ST", "SN", "L", "GN", "pseudonym", "serialNumber", "title", "initials" ] 39 | field_split => "," 40 | source => "certificate_subject" 41 | } 42 | mutate { 43 | rename => { "CN" => "certificate_common_name"} 44 | rename => { "C" => "certificate_country_code"} 45 | rename => { "O" => "certificate_organization"} 46 | rename => { "OU" => "certificate_organization_unit"} 47 | rename => { "ST" => "certificate_state"} 48 | rename => { "SN" => "certificate_surname"} 49 | rename => { "L" => "certificate_locality"} 50 | rename => { "GN" => "certificate_given_name"} 51 | rename => { "pseudonym" => "certificate_pseudonym"} 52 | rename => { "serialNumber" => "certificate_serial_number"} 53 | rename => { "title" => "certificate_title"} 54 | rename => { "initials" => "certificate_initials"} 55 | } 56 | if [certificate_subject] == "-" { 57 | mutate { 58 | remove_field => [ "certificate_subject" ] 59 | } 60 | } 61 | if [certificate_issuer] == "-" { 62 | mutate { 63 | remove_field => [ "certificate_issuer" ] 64 | } 65 | } 66 | if [certificate_common_name] { 67 | ruby { 68 | code => "event.set('certificate_common_name_length', event.get('certificate_common_name').length)" 69 | } 70 | } 71 | if [issuer_common_name] { 72 | ruby { 73 | code => "event.set('issuer_common_name_length', event.get('issuer_common_name').length)" 74 | } 75 | } 76 | if [server_name] == "-" { 77 | mutate { 78 | remove_field => [ "server_name" ] 79 | } 80 | } else { 81 | ruby { 82 | code => "event.set('server_name_length', event.get('server_name').length)" 83 | } 84 | } 85 | if [certificate_chain_fuids] == "-" { 86 | mutate { 87 | remove_field => [ "certificate_chain_fuids" ] 88 | } 89 | } else { 90 | ruby { 91 | code => "event.set('certificate_chain_count', event.get('certificate_chain_fuids').count(',') + 1)" 92 | } 93 | } 94 | if [client_certificate_chain_fuids] == "-" { 95 | mutate { 96 | remove_field => [ "client_certificate_chain_fuids" ] 97 | } 98 | } 99 | if [client_issuer] == "-" { 100 | mutate { 101 | remove_field => [ "client_issuer" ] 102 | } 103 | } 104 | if [client_subject] == "-" { 105 | mutate { 106 | remove_field => [ "client_subject" ] 107 | } 108 | } 109 | if [curve] == "-" { 110 | mutate { 111 | remove_field => [ "curve" ] 112 | } 113 | } 114 | if [issuer] == "-" { 115 | mutate { 116 | remove_field => [ "issuer" ] 117 | } 118 | } 119 | if [query] == "-" { 120 | mutate { 121 | remove_field => [ "query" ] 122 | } 123 | } 124 | if [subject] == "-" { 125 | mutate { 126 | remove_field => [ "subject" ] 127 | } 128 | } 129 | if [validation_status] == "-" { 130 | mutate { 131 | remove_field => [ "validation_status" ] 132 | } 133 | } 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/1117_preprocess_bro_syslog.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/16/2017 5 | # 6 | # This conf file is based on accepting logs for syslog.log from Bro systems 7 | filter { 8 | if [event_type] == "bro_syslog" { 9 | # This is the initial parsing of the log 10 | csv { 11 | columns => ["timestamp","uid","source_ip","source_port","destination_ip","destination_port","protocol","facilility","severity","message"] 12 | separator => " " 13 | } 14 | mutate { 15 | #add_tag => [ "conf_file_1117"] 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/1118_preprocess_bro_tunnel.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/16/2017 5 | # 6 | # This conf file is based on accepting logs for tunnel.log from Bro systems 7 | filter { 8 | if [event_type] == "bro_tunnel" { 9 | # This is the initial parsing of the log 10 | csv { 11 | columns => ["timestamp","uid","source_ip","source_port","destination_ip","destination_port","tunnel_type","action"] 12 | separator => " " 13 | } 14 | mutate { 15 | #add_tag => [ "conf_file_1118"] 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/1119_preprocess_bro_weird.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/16/2017 5 | # 6 | # This conf file is based on accepting logs for weird.log from Bro systems 7 | filter { 8 | if [event_type] == "bro_weird" { 9 | grok { 10 | match => [ "message", "(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*))" ] 11 | } 12 | mutate { 13 | #add_tag => [ "conf_file_1119"] 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/1121_preprocess_bro_mysql.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/16/2017 5 | # 6 | # This conf file is based on accepting logs for mysql.log from Bro systems 7 | filter { 8 | if [event_type] == "bro_mysql" { 9 | # This is the initial parsing of the log 10 | grok { 11 | match => [ "message", "(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*))" ] 12 | } 13 | mutate { 14 | #add_tag => [ "conf_file_1121"] 15 | } 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/1122_preprocess_bro_socks.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/16/2017 5 | # 6 | # This conf file is based on accepting logs for socks.log from Bro systems 7 | filter { 8 | if [event_type] == "bro_socks" { 9 | # This is the initial parsing of the log 10 | grok { 11 | match => [ "message", "(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*))\t(?(.*))\t(?(.*))\t(?(.*))\t(?(.*))\t(?(.*))\t(?(.*))" ] 12 | } 13 | mutate { 14 | #add_tag => [ "conf_file_1122"] 15 | } 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/1123_preprocess_bro_x509.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/16/2017 5 | # 6 | # This conf file is based on accepting logs for x509.log from Bro systems 7 | 8 | filter { 9 | if [event_type] == "bro_x509" { 10 | grok { 11 | match => [ "message", "(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*?))\t(?(.*))\t(?(.*))\t(?(.*))\t(?(.*))\t(?(.*))\t(?(.*))\t(?(.*))\t(?(.*))\t(?(.*))\t(?(.*))\t(?(.*))" ] 12 | } 13 | 14 | mutate { 15 | gsub => [ "certificate_issuer", "\\\\,", "|" ] 16 | gsub => [ "certificate_subject", "\\\\,", "|" ] 17 | } 18 | 19 | kv { 20 | include_keys => [ "CN", "C", "O", "OU", "ST", "SN", "L", "DC", "GN", "pseudonym", "serialNumber", "title", "initials" ] 21 | field_split => "," 22 | source => "certificate_issuer" 23 | } 24 | mutate { 25 | rename => { "CN" => "issuer_common_name"} 26 | rename => { "C" => "issuer_country_code"} 27 | rename => { "O" => "issuer_organization"} 28 | rename => { "OU" => "issuer_organization_unit"} 29 | rename => { "ST" => "issuer_state"} 30 | rename => { "SN" => "issuer_surname"} 31 | rename => { "L" => "issuer_locality"} 32 | rename => { "DC" => "issuer_distinguished_name"} 33 | rename => { "GN" => "issuer_given_name"} 34 | rename => { "pseudonym" => "issuer_pseudonym"} 35 | rename => { "serialNumber" => "issuer_serial_number"} 36 | rename => { "title" => "issuer_title"} 37 | rename => { "initials" => "issuer_initials"} 38 | } 39 | kv { 40 | include_keys => [ "CN", "C", "O", "OU", "ST", "SN", "L", "GN", "pseudonym", "serialNumber", "title", "initials" ] 41 | field_split => "," 42 | source => "certificate_subject" 43 | } 44 | mutate { 45 | rename => { "CN" => "certificate_common_name"} 46 | rename => { "C" => "certificate_country_code"} 47 | rename => { "O" => "certificate_organization"} 48 | rename => { "OU" => "certificate_organization_unit"} 49 | rename => { "ST" => "certificate_state"} 50 | rename => { "SN" => "certificate_surname"} 51 | rename => { "L" => "certificate_locality"} 52 | rename => { "GN" => "certificate_given_name"} 53 | rename => { "pseudonym" => "certificate_pseudonym"} 54 | rename => { "serialNumber" => "certificate_serial_number"} 55 | rename => { "title" => "certificate_title"} 56 | rename => { "initials" => "certificate_initials"} 57 | } 58 | if [query] == "-" { 59 | mutate { 60 | remove_field => [ "query" ] 61 | } 62 | } 63 | if [san_dns] == "-" { 64 | mutate { 65 | remove_field => [ "san_dns" ] 66 | } 67 | } 68 | if [san_email] == "-" { 69 | mutate { 70 | remove_field => [ "san_email" ] 71 | } 72 | } 73 | if [san_uri] == "-" { 74 | mutate { 75 | remove_field => [ "san_uri" ] 76 | } 77 | } 78 | if [san_ip] == "-" { 79 | mutate { 80 | remove_field => [ "san_ip" ] 81 | } 82 | } 83 | if [certificate_common_name] { 84 | ruby { 85 | code => "event.set('certificate_common_name_length', event.get('certificate_common_name').length)" 86 | } 87 | } 88 | if [issuer_common_name] { 89 | ruby { 90 | code => "event.set('issuer_common_name_length', event.get('issuer_common_name').length)" 91 | } 92 | } 93 | if [certificate_not_valid_after] == "-" { 94 | mutate { 95 | remove_field => [ "certificate_not_valid_after" ] 96 | } 97 | } 98 | if [certificate_not_valid_before] == "-" { 99 | mutate { 100 | remove_field => [ "certificate_not_valid_before" ] 101 | } 102 | } 103 | if [certificate_not_valid_after] and [certificate_not_valid_before] { 104 | ruby { 105 | code => "event.set('certificate_number_days_valid', ((event.get('certificate_not_valid_after') - event.get('certificate_not_valid_before')) / 86400).ceil)" 106 | } 107 | date { 108 | match => [ "certificate_not_valid_after", "UNIX" ] 109 | target => "certificate_not_valid_after" 110 | } 111 | date { 112 | match => [ "certificate_not_valid_before", "UNIX" ] 113 | target => "certificate_not_valid_before" 114 | } 115 | } 116 | mutate { 117 | #add_tag => [ "conf_file_1123"] 118 | } 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/1124_preprocess_bro_intel.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/16/2017 5 | # 6 | # This conf file is based on accepting logs for intel.log from Bro systems 7 | filter { 8 | if [event_type] == "bro_intel" { 9 | # This is the initial parsing of the log 10 | csv { 11 | columns => ["timestamp","uid","source_ip","source_port","destination_ip","destination_port","fuid","mime_type","file_description","indicator","indicator_event_type","seen_where","seen_node","sources"] 12 | separator => " " 13 | } 14 | mutate { 15 | #add_tag => [ "conf_file_1124"] 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/6000_bro.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 12/9/2016 5 | # 6 | # This conf file is based on accepting logs for conn.log from Bro systems 7 | filter { 8 | if "bro" in [tags] { 9 | date { 10 | match => [ "timestamp", "UNIX" ] 11 | } 12 | if [duration] == "-" { 13 | mutate { 14 | replace => [ "duration", "0" ] 15 | } 16 | } 17 | if [original_bytes] == "-" { 18 | mutate { 19 | replace => [ "original_bytes", "0" ] 20 | } 21 | } 22 | # If MissedBytes is unspecified set it to zero so it is an integer 23 | if [missed_bytes] == "-" { 24 | mutate { 25 | replace => [ "missed_bytes", "0" ] 26 | } 27 | } 28 | # If OriginalIPBytes is unspecified set it to zero so it is an integer 29 | if [original_ip_bytes] == "-" { 30 | mutate { 31 | replace => [ "original_ip_bytes", "0" ] 32 | } 33 | } 34 | # If RespondBytes is unspecified set it to zero so it is an integer 35 | if [respond_bytes] == "-" { 36 | mutate { 37 | replace => [ "respond_bytes", "0" ] 38 | } 39 | } 40 | # If RespondIPBytes is unspecified set it to zero so it is an integer 41 | if [respond_ip_bytes] == "-" { 42 | mutate { 43 | replace => [ "respond_ip_bytes", "0" ] 44 | } 45 | } 46 | if [source_port] == "-" { 47 | mutate { 48 | remove_field => ["source_port"] 49 | } 50 | } 51 | if [destination_port] == "-" { 52 | mutate { 53 | remove_field => ["destination_port"] 54 | } 55 | } 56 | if [virtual_host] == "-" { 57 | mutate { 58 | remove_field => ["virtual_host"] 59 | } 60 | } 61 | if [user] == "-" { 62 | mutate { 63 | remove_field => ["user"] 64 | } 65 | } 66 | 67 | # I renamed conn_uids to uid so that it is easy to pivot to all things tied to a connection 68 | mutate { 69 | rename => [ "connection_uids", "uid" ] 70 | } 71 | # If total_bytes is set to "-" change it to 0 so it is an integer 72 | if [total_bytes] == "-" { 73 | mutate { 74 | replace => [ "total_bytes", "0" ] 75 | } 76 | } 77 | # If seen_bytes is set to "-" change it to 0 so it is an integer 78 | if [seen_bytes] == "-" { 79 | mutate { 80 | replace => [ "seen_bytes", "0" ] 81 | } 82 | } 83 | # If missing_bytes is set to "-" change it to 0 so it is an integer 84 | if [missing_bytes] == "-" { 85 | mutate { 86 | replace => [ "missing_bytes", "0" ] 87 | } 88 | } 89 | # If pverflow_bytes is set to "-" change it to 0 so it is an integer 90 | if [overflow_bytes] == "-" { 91 | mutate { 92 | replace => [ "overflow_bytes", "0" ] 93 | } 94 | } 95 | # I recommend changing the field types below to integer or floats so searches can do greater than or less than 96 | # and also so math functions can be ran against them 97 | mutate { 98 | lowercase => [ "query" ] 99 | remove_field => [ "timestamp" ] 100 | } 101 | 102 | # Combine OriginalBytes and RespondBytes and save the value to total_bytes 103 | if [original_bytes] { 104 | if [respond_bytes] { 105 | ruby { 106 | code => "event.set('total_bytes',event.get('original_bytes') + event.get('respond_bytes'))" 107 | } 108 | } 109 | } 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/6400_suricata.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # Email: jhenderson@tekrefresh.comes 3 | # Last Update: 11/16/2017 4 | # 5 | # This conf file is based on accepting logs for suricata json events 6 | filter { 7 | if "suricata" in [tags] { 8 | if "test_data" not in [tags] { 9 | date { 10 | match => [ "timestamp", "ISO8601" ] 11 | } 12 | } else { 13 | mutate { 14 | remove_field => [ "timestamp" ] 15 | } 16 | } 17 | # I recommend renaming the fields below to be consistent with other log sources. This makes it easy to "pivot" between logs 18 | mutate { 19 | rename => [ "src_ip", "source_ip" ] 20 | rename => [ "dest_ip", "destination_ip" ] 21 | rename => [ "src_port", "source_port" ] 22 | rename => [ "dest_port", "destination_port" ] 23 | rename => [ "[flow][age]", "duration" ] 24 | rename => [ "[flow][alerted]", "flow_alerted" ] 25 | rename => [ "[flow][bytes_toclient]", "bytes_to_client" ] 26 | rename => [ "[flow][bytes_toserver]", "bytes_to_server" ] 27 | rename => [ "[flow][end]", "flow_end" ] 28 | rename => [ "[flow][pkts_toclient]", "packets_to_client" ] 29 | rename => [ "[flow][pkts_toserver]", "packets_to_server" ] 30 | rename => [ "[flow][reason]", "reason" ] 31 | rename => [ "[flow][start]", "flow_start" ] 32 | rename => [ "[flow][state]", "state" ] 33 | rename => [ "[netflow][age]", "duration" ] 34 | rename => [ "[netflow][bytes]", "bytes" ] 35 | rename => [ "[netflow][end]", "netflow_end" ] 36 | rename => [ "[netflow][start]", "netflow_start" ] 37 | rename => [ "[netflow][pkts]", "packets" ] 38 | rename => [ "[alert][action]", "action" ] 39 | rename => [ "[alert][category]", "category" ] 40 | rename => [ "[alert][gid]", "gid" ] 41 | rename => [ "[alert][rev]", "rev" ] 42 | rename => [ "[alert][severity]", "severity" ] 43 | rename => [ "[alert][signature]", "signature" ] 44 | rename => [ "[alert][signature_id]", "sid" ] 45 | rename => [ "[http][hostname]", "virtual_host" ] 46 | rename => [ "[http][http_content_type]", "http_content_type" ] 47 | rename => [ "[http][http_method]", "method" ] 48 | rename => [ "[http][http_user_agent]", "useragent" ] 49 | rename => [ "[http][length]", "http_payload_length" ] 50 | rename => [ "[http][protocol]", "http_version" ] 51 | rename => [ "[http][status]", "status_message" ] 52 | rename => [ "[http][url]", "url" ] 53 | rename => [ "[tls][fingerprint]", "certificate_serial_number" ] 54 | rename => [ "[tls][issuerdn]", "issuer_distinguished_name" ] 55 | rename => [ "[tls][notafter]", "certificate_not_valid_after" ] 56 | rename => [ "[tls][notbefore]", "certificate_not_valid_before" ] 57 | rename => [ "[tls][subject]", "certificate_common_name" ] 58 | rename => [ "[tls][version]", "tls_version" ] 59 | add_tag => [ "ids" ] 60 | } 61 | if [bytes_to_client] and [bytes_to_server] { 62 | if [bytes_to_client] < [bytes_to_server] { 63 | ruby { 64 | code => "event.set('byte_ratio_client', event.get('bytes_to_client').to_f / event.get('bytes_to_server').to_f)" 65 | } 66 | ruby { 67 | code => "event.set('byte_ratio_server', 1 - event.get('byte_ratio_client'))" 68 | } 69 | } else { 70 | ruby { 71 | code => "event.set('byte_ratio_server', event.get('bytes_to_server').to_f / event.get('bytes_to_client').to_f)" 72 | } 73 | ruby { 74 | code => "event.set('byte_ratio_client', 1 - event.get('byte_ratio_server'))" 75 | } 76 | } 77 | # OPTIONAL - LOOKUP DNS INFORMATION ON CERTAIN CONNECTIONS 78 | # Update elasticsearch host info for this to work 79 | # Requires logstash-filter-elasticsearch plugin 80 | #if [bytes_to_client] > 10000000 { 81 | # elasticsearch { 82 | # hosts => ["${ELASTICSEARCH_HOST}"] 83 | # index => "logstash-bro-*" 84 | # query => "event_type:dns AND answers:%{[destination_ip]}" 85 | # fields => [["highest_registered_domain","highest_registered_domain"],["parent_domain","parent_domain"],["parent_domain_length","parent_domain_length"],["query","query"],["query_length","query_length"],["sub_domain","sub_domain"]] 86 | # } 87 | #} 88 | #if [bytes_to_server] > 1000000 { 89 | # elasticsearch { 90 | # hosts => ["${ELASTICSEARCH_HOST}"] 91 | # index => "logstash-bro-*" 92 | # query => "type:dns AND answers:%{[destination_ip]}" 93 | # fields => [["highest_registered_domain","highest_registered_domain"],["parent_domain","parent_domain"],["parent_domain_length","parent_domain_length"],["query","query"],["query_length","query_length"],["sub_domain","sub_domain"]] 94 | # } 95 | #} 96 | #if [duration] > 14399 { 97 | # elasticsearch { 98 | # hosts => ["${ELASTICSEARCH_HOST}"] 99 | # index => "logstash-bro-*" 100 | # query => "type:dns AND answers:%{[destination_ip]}" 101 | # fields => [["highest_registered_domain","highest_registered_domain"],["parent_domain","parent_domain"],["parent_domain_length","parent_domain_length"],["query","query"],["query_length","query_length"],["sub_domain","sub_domain"]] 102 | # } 103 | #} 104 | #if [signature] !~ "SCAN" and [signature] !~ "POLICY" and [event_type] == "alert" { 105 | # elasticsearch { 106 | # hosts => ["${ELASTICSEARCH_HOST}"] 107 | # index => "logstash-bro-*" 108 | # query => "type:dns AND answers:%{[destination_ip]}" 109 | # fields => [["highest_registered_domain","highest_registered_domain"],["parent_domain","parent_domain"],["parent_domain_length","parent_domain_length"],["query","query"],["query_length","query_length"],["sub_domain","sub_domain"]] 110 | # } 111 | #} 112 | } 113 | # This will translate the alert.severity field into a severity field of either High, Medium, or Low 114 | if [event_type] == "alert" { 115 | if [severity] == 1 { 116 | mutate { 117 | add_field => { "severity_level" => "High" } 118 | } 119 | } 120 | if [severity] == 2 { 121 | mutate { 122 | add_field => { "severity_level" => "Medium" } 123 | } 124 | } 125 | if [severity] == 3 { 126 | mutate { 127 | add_field => { "severity_level" => "Low" } 128 | } 129 | } 130 | mutate { 131 | gsub => [ "sid", "\D", "" ] 132 | } 133 | # This extracts the signatures rule by looking up the rule by sid number on disk. 134 | if [gid] == 1 and [sid] > 0 and [sid] < 1000000000 { 135 | ruby { 136 | code => "sid = event.get('sid'); event.set('rule', `grep -h sid:#{sid} /etc/nsm/rules/*.rules`)" 137 | } 138 | } 139 | 140 | # If the alert is a Snort GPL alert break it apart for easier reading and categorization 141 | if [signature] =~ "GPL " { 142 | # This will parse out the category type from the alert 143 | grok { 144 | match => { "signature" => "GPL\s+%{DATA:category}\s" } 145 | } 146 | # This will store the category 147 | mutate { 148 | add_field => { "rule_type" => "Snort GPL" } 149 | lowercase => [ "category" ] 150 | } 151 | } 152 | # If the alert is an Emerging Threat alert break it apart for easier reading and categorization 153 | if [signature] =~ "ET " { 154 | # This will parse out the category type from the alert 155 | grok { 156 | match => { "signature" => "ET\s+%{DATA:category}\s" } 157 | } 158 | # This will store the category 159 | mutate { 160 | add_field => { "rule_type" => "Emerging Threats" } 161 | lowercase => [ "category" ] 162 | } 163 | } 164 | if [gid] == 1 and [sid] =~ '^[0-9]+$' { 165 | ruby { 166 | code => "sid = event.get('sid'); event.set('rule', `cat /etc/nsm/rules/*.rules | grep sid:#{sid}`)" 167 | } 168 | } 169 | # If rule is extracted try to parse out important rule metadata 170 | if [rule] { 171 | grok { 172 | break_on_match => false 173 | match => { "rule" => "(?MS[01][0-9]-[0-9]+)" } 174 | match => { "rule" => "cve,(?[0-9]{0,4}-[0-9]+)" } 175 | match => { "rule" => "bugtraq,(?[0-9]+)" } 176 | match => { "rule" => "securityfocus.com/bid/(?[0-9]+)" } 177 | match => { "rule" => "osvdb/(?[0-9]+)" } 178 | match => { "rule" => "exploit-db.com/exploits/(?[0-9]+)" } 179 | tag_on_failure => [] 180 | } 181 | } 182 | # This section adds URLs to lookup information about a rule online 183 | if [rule_type] == "Snort GPL" { 184 | mutate { 185 | add_field => [ "signature_info", "https://www.snort.org/search?query=%{gid}-%{sid}" ] 186 | } 187 | } 188 | if [rule_type] == "Emerging Threats" { 189 | mutate { 190 | add_field => [ "signature_info", "http://doc.emergingthreats.net/%{sid}" ] 191 | } 192 | } 193 | } 194 | } 195 | } 196 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/8001_postprocess_common_ip_augmentation.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 12/9/2016 5 | 6 | filter { 7 | if [source_ip] { 8 | if [source_ip] == "-" { 9 | mutate { 10 | replace => { "source_ip" => "0.0.0.0" } 11 | } 12 | } 13 | grok { 14 | match => { "source_ip" => "%{IPV6:source_ip_v6}" } 15 | remove_field => [ "source_ip" ] 16 | tag_on_failure => [] 17 | } 18 | geoip { 19 | source => "source_ip" 20 | target => "source_geo" 21 | } 22 | geoip { 23 | default_database_type => "ASN" 24 | source => "source_ip" 25 | target => "source_geo" 26 | } 27 | if [source_ip] { 28 | mutate { 29 | add_field => { "ips" => "%{source_ip}" } 30 | } 31 | } 32 | } 33 | if [destination_ip] { 34 | if [destination_ip] == "-" { 35 | mutate { 36 | replace => { "destination_ip" => "0.0.0.0" } 37 | } 38 | } 39 | grok { 40 | match => { "destination_ip" => "%{IPV6:destination_ip_v6}" } 41 | remove_field => [ "destination_ip" ] 42 | tag_on_failure => [] 43 | } 44 | geoip { 45 | source => "destination_ip" 46 | target => "destination_geo" 47 | } 48 | geoip { 49 | default_database_type => "ASN" 50 | source => "destination_ip" 51 | target => "destination_geo" 52 | } 53 | if [destination_ip] { 54 | mutate { 55 | add_field => { "ips" => "%{destination_ip}" } 56 | } 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/8006_postprocess_dns.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 12/9/2016 5 | 6 | filter { 7 | if [event_type] == "bro_dns" { 8 | # Used for whois lookups - can create log loop 9 | if [query] =~ "^whois\." { 10 | drop { } 11 | } 12 | if [query] =~ "labmeinc\.internal$" { 13 | mutate { 14 | add_tag => "internal_domain" 15 | add_field => { "highest_registered_domain" => "labmeinc.internal" } 16 | } 17 | } 18 | 19 | # REPLACE test.int with your internal domain 20 | if "internal_domain" not in [tags] { 21 | mutate { 22 | lowercase => [ "query" ] 23 | } 24 | if [query_type_name] != "NB" and [query_type_name] != "TKEY" and [query_type_name] != "NBSTAT" and [query_type_name] != "PTR" { 25 | tld { 26 | source => "query" 27 | } 28 | ruby { 29 | code => "event.set('query_length', event.get('query').length)" 30 | } 31 | mutate { 32 | rename => { "[SubLog][sessionid]" => "sub_session_id" } 33 | rename => { "[tld][domain]" => "highest_registered_domain" } 34 | rename => { "[tld][trd]" => "subdomain" } 35 | rename => { "[tld][tld]" => "top_level_domain" } 36 | rename => { "[tld][sld]" => "parent_domain" } 37 | } 38 | if [parent_domain] { 39 | ruby { 40 | code => "event.set('parent_domain_length', event.get('parent_domain').length)" 41 | } 42 | } 43 | if [subdomain] { 44 | ruby { 45 | code => "event.set('subdomain_length', event.get('subdomain').length)" 46 | } 47 | } 48 | } 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/8007_postprocess_http.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 12/9/2016 5 | 6 | filter { 7 | if [event_type] == "bro_http" { 8 | if [uri] { 9 | ruby { 10 | code => "event.set('uri_length', event.get('uri').length)" 11 | } 12 | } 13 | if [virtual_host] { 14 | ruby { 15 | code => "event.set('virtual_host_length', event.get('virtual_host').length)" 16 | } 17 | } 18 | if [user_agent] { 19 | ruby { 20 | code => "event.set('useragent_length', event.get('useragent_host').length)" 21 | } 22 | } 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/8009_postprocess_dns_top1m_tagging.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 4/11/2017 5 | 6 | filter { 7 | if [highest_registered_domain] and "internal_domain" not in [tags] { 8 | rest { 9 | request => { 10 | url => "http://localhost:20000/alexa/%{highest_registered_domain}" 11 | } 12 | sprintf => true 13 | json => false 14 | target => "site" 15 | } 16 | if [site] != "0" and [site] { 17 | mutate { 18 | add_tag => [ "top-1m" ] 19 | remove_field => [ "site" ] 20 | } 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/8010_postprocess_dns_creation_date.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolution.com 4 | # Last Update: 12/9/2016 5 | 6 | filter { 7 | if [event_type] == "bro_dns" { 8 | if [highest_registered_domain] and "top-1m" not in [tags] and "internal_domain" not in [tags] { 9 | rest { 10 | request => { 11 | url => "http://localhost:20000/domain/creation_date/%{highest_registered_domain}" 12 | } 13 | sprintf => true 14 | json => false 15 | target => "creation_date" 16 | } 17 | if [creation_date] =~ "No whois" { 18 | mutate { remove_field => [ "creation_date" ] } 19 | } 20 | if [creation_date] and [creation_date] != "" { 21 | grok { 22 | match => { "creation_date" => "(?[12][0-9]{3}-[01][0-9]-[0-3][0-9])" } 23 | remove_field => [ "creation_date" ] 24 | } 25 | if [modified_creation_date] { 26 | date { 27 | match => [ "modified_creation_date", "yyyy-MM-dd" ] 28 | remove_field => [ "modified_creation_date" ] 29 | target => "domain_creation_date" 30 | } 31 | } 32 | } 33 | if [creation_date] == "" { 34 | mutate { remove_field => [ "creation_date" ] } 35 | } 36 | } 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/8200_postprocess_tagging.conf: -------------------------------------------------------------------------------- 1 | filter { 2 | if [destination_ip] { 3 | if [destination_ip] =~ "2(?:2[4-9]|3\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]\d?|0)){3}" { 4 | mutate { 5 | add_tag => [ "multicast" ] 6 | } 7 | } 8 | if [destination_ip] == "255.255.255.255" { 9 | mutate { 10 | add_tag => [ "broadcast" ] 11 | } 12 | } 13 | if [destination_ip] =~ "\.255$" { 14 | mutate { 15 | add_tag => [ "possible_broadcast" ] 16 | } 17 | } 18 | if [destination_ip] and "multicast" not in [tags] and "broadcast" not in [tags] { 19 | if [destination_ip] =~ "^10\." or [destination_ip] =~ "192\.168\." or [destination_ip] =~ "172\.(1[6-9]|2[0-9]|3[0-1])\." { 20 | mutate { 21 | add_tag => [ "internal_destination" ] 22 | } 23 | } else { 24 | mutate { 25 | add_tag => [ "external_destination" ] 26 | } 27 | } 28 | if "internal_destination" not in [tags] { 29 | if [destination_ip] == "198.41.0.4" or [destination_ip] == "192.228.79.201" or [destination_ip] == "192.33.4.12" or [destination_ip] == "199.7.91.13" or [destination_ip] == "192.203.230.10" or [destination_ip] == "192.5.5.241" or [destination_ip] == "192.112.36.4" or [destination_ip] == "198.97.190.53" or [destination_ip] == "192.36.148.17" or [destination_ip] == "192.58.128.30" or [destination_ip] == "193.0.14.129" or [destination_ip] == "199.7.83.42" or [destination_ip] == "202.12.27.33" { 30 | mutate { 31 | add_tag => [ "root_dns_server" ] 32 | } 33 | } 34 | } 35 | } 36 | } 37 | if [source_ip] { 38 | if [source_ip] =~ "^10\." or [source_ip] =~ "192\.168\." or [source_ip] =~ "172\.(1[6-9]|2[0-9]|3[0-1])\." { 39 | mutate { 40 | add_tag => [ "internal_source" ] 41 | } 42 | } else { 43 | mutate { 44 | add_tag => [ "external_source" ] 45 | } 46 | } 47 | if "internal_source" not in [tags] { 48 | if [source_ip] == "198.41.0.4" or [source_ip] == "192.228.79.201" or [source_ip] == "192.33.4.12" or [source_ip] == "199.7.91.13" or [source_ip] == "192.203.230.10" or [source_ip] == "192.5.5.241" or [source_ip] == "192.112.36.4" or [source_ip] == "198.97.190.53" or [source_ip] == "192.36.148.17" or [source_ip] == "192.58.128.30" or [source_ip] == "193.0.14.129" or [source_ip] == "199.7.83.42" or [source_ip] == "202.12.27.33" { 49 | mutate { 50 | add_tag => [ "root_dns_server" ] 51 | } 52 | } 53 | } 54 | if "internal_source" in [tags] and "internal_destination" in [tags] { 55 | mutate { add_tag => [ "internal_only" ] } 56 | } 57 | # This section is for system tagging 58 | if [destination_port] { 59 | if [destination_port] == 53 and "internal_destination" in [tags] { 60 | mutate { add_tag => [ "dns_server" ] } 61 | } 62 | if [destination_port] == 21 and "internal_destination" in [tags] { 63 | mutate { add_tag => [ "ftp_server" ] } 64 | } 65 | if [destination_port] == 22 and "internal_destination" in [tags] { 66 | mutate { add_tag => [ "ssh_server" ] } 67 | } 68 | if [destination_port] == 139 and "internal_destination" in [tags] { 69 | mutate { add_tag => [ "smb_server", "old_smb_use" ] } 70 | } 71 | if [destination_port] == 445 and "internal_destination" in [tags] { 72 | mutate { add_tag => [ "smb_server" ] } 73 | } 74 | if [destination_port] == 389 and "internal_destination" in [tags] { 75 | mutate { add_tag => [ "ldap_server" ] } 76 | } 77 | if [destination_port] == 636 and "internal_destination" in [tags] { 78 | mutate { add_tag => [ "ldaps_server" ] } 79 | } 80 | if [destination_port] == 80 and "internal_destination" in [tags] { 81 | mutate { add_tag => [ "web_server", "http_server" ] } 82 | } 83 | if [destination_port] == 443 and "internal_destination" in [tags] { 84 | mutate { add_tag => [ "web_server", "https_server" ] } 85 | } 86 | if [destination_port] == 1433 and "internal_destination" in [tags] { 87 | mutate { add_tag => [ "mssql_server" ] } 88 | } 89 | if [destination_port] == 3389 and "internal_destination" in [tags] { 90 | mutate { add_tag => [ "mysql_server" ] } 91 | } 92 | if [destination_port] == 1521 and "internal_destination" in [tags] { 93 | mutate { add_tag => [ "oracle_server" ] } 94 | } 95 | if [destination_port] == 123 and "internal_destination" in [tags] { 96 | mutate { add_tag => [ "ntp_server" ] } 97 | } 98 | } 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /logstash/logstash_configs/bro-tsv/9000_output.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/16/2017 5 | 6 | output { 7 | if "bro" in [tags] { 8 | elasticsearch { 9 | index => "logstash-bro-%{+YYYY.MM.dd}" 10 | hosts => "${ELASTICSEARCH_HOST}" 11 | } 12 | } 13 | if "suricata" in [tags] { 14 | elasticsearch { 15 | index => "logstash-suricata-%{+YYYY.MM.dd}" 16 | hosts => "${ELASTICSEARCH_HOST}" 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /logstash/logstash_configs/firewall/0001_input_fortinet.conf: -------------------------------------------------------------------------------- 1 | input { 2 | udp { 3 | port => 7001 4 | tags => [ "firewall", "fortinet" ] 5 | add_field => { "log_event_type" => "fortinet" } 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /logstash/logstash_configs/firewall/8000_postprocess_tagging.conf: -------------------------------------------------------------------------------- 1 | filter { 2 | if [destination_ip] { 3 | if [destination_ip] =~ "2(?:2[4-9]|3\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]\d?|0)){3}" { 4 | mutate { 5 | add_tag => [ "multicast" ] 6 | } 7 | } 8 | if [destination_ip] == "255.255.255.255" { 9 | mutate { 10 | add_tag => [ "broadcast" ] 11 | } 12 | } 13 | if [destination_ip] =~ "\.255$" { 14 | mutate { 15 | add_tag => [ "possible_broadcast" ] 16 | } 17 | } 18 | if [destination_ip] and "multicast" not in [tags] and "broadcast" not in [tags] { 19 | if [destination_ip] =~ "^10\." or [destination_ip] =~ "192\.168\." or [destination_ip] =~ "172\.(1[6-9]|2[0-9]|3[0-1])\." { 20 | mutate { 21 | add_tag => [ "internal_destination" ] 22 | } 23 | } else { 24 | mutate { 25 | add_tag => [ "external_destination" ] 26 | } 27 | } 28 | if "internal_destination" not in [tags] { 29 | if [destination_ip] == "198.41.0.4" or [destination_ip] == "192.228.79.201" or [destination_ip] == "192.33.4.12" or [destination_ip] == "199.7.91.13" or [destination_ip] == "192.203.230.10" or [destination_ip] == "192.5.5.241" or [destination_ip] == "192.112.36.4" or [destination_ip] == "198.97.190.53" or [destination_ip] == "192.36.148.17" or [destination_ip] == "192.58.128.30" or [destination_ip] == "193.0.14.129" or [destination_ip] == "199.7.83.42" or [destination_ip] == "202.12.27.33" { 30 | mutate { 31 | add_tag => [ "root_dns_server" ] 32 | } 33 | } 34 | } 35 | } 36 | } 37 | if [source_ip] { 38 | if [source_ip] =~ "^10\." or [source_ip] =~ "192\.168\." or [source_ip] =~ "172\.(1[6-9]|2[0-9]|3[0-1])\." { 39 | mutate { 40 | add_tag => [ "internal_source" ] 41 | } 42 | } else { 43 | mutate { 44 | add_tag => [ "external_source" ] 45 | } 46 | } 47 | if "internal_source" not in [tags] { 48 | if [source_ip] == "198.41.0.4" or [source_ip] == "192.228.79.201" or [source_ip] == "192.33.4.12" or [source_ip] == "199.7.91.13" or [source_ip] == "192.203.230.10" or [source_ip] == "192.5.5.241" or [source_ip] == "192.112.36.4" or [source_ip] == "198.97.190.53" or [source_ip] == "192.36.148.17" or [source_ip] == "192.58.128.30" or [source_ip] == "193.0.14.129" or [source_ip] == "199.7.83.42" or [source_ip] == "202.12.27.33" { 49 | mutate { 50 | add_tag => [ "root_dns_server" ] 51 | } 52 | } 53 | } 54 | if "internal_source" in [tags] and "internal_destination" in [tags] { 55 | mutate { add_tag => [ "internal_only" ] } 56 | } 57 | # This section is for system tagging 58 | if [destination_port] { 59 | if [destination_port] == 53 and "internal_destination" in [tags] { 60 | mutate { add_tag => [ "dns_server" ] } 61 | } 62 | if [destination_port] == 21 and "internal_destination" in [tags] { 63 | mutate { add_tag => [ "ftp_server" ] } 64 | } 65 | if [destination_port] == 22 and "internal_destination" in [tags] { 66 | mutate { add_tag => [ "ssh_server" ] } 67 | } 68 | if [destination_port] == 139 and "internal_destination" in [tags] { 69 | mutate { add_tag => [ "smb_server", "old_smb_use" ] } 70 | } 71 | if [destination_port] == 445 and "internal_destination" in [tags] { 72 | mutate { add_tag => [ "smb_server" ] } 73 | } 74 | if [destination_port] == 389 and "internal_destination" in [tags] { 75 | mutate { add_tag => [ "ldap_server" ] } 76 | } 77 | if [destination_port] == 636 and "internal_destination" in [tags] { 78 | mutate { add_tag => [ "ldaps_server" ] } 79 | } 80 | if [destination_port] == 80 and "internal_destination" in [tags] { 81 | mutate { add_tag => [ "web_server", "http_server" ] } 82 | } 83 | if [destination_port] == 443 and "internal_destination" in [tags] { 84 | mutate { add_tag => [ "web_server", "https_server" ] } 85 | } 86 | if [destination_port] == 1433 and "internal_destination" in [tags] { 87 | mutate { add_tag => [ "mssql_server" ] } 88 | } 89 | if [destination_port] == 3389 and "internal_destination" in [tags] { 90 | mutate { add_tag => [ "mysql_server" ] } 91 | } 92 | if [destination_port] == 1521 and "internal_destination" in [tags] { 93 | mutate { add_tag => [ "oracle_server" ] } 94 | } 95 | if [destination_port] == 123 and "internal_destination" in [tags] { 96 | mutate { add_tag => [ "ntp_server" ] } 97 | } 98 | } 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /logstash/logstash_configs/firewall/8001_postprocess_common_ip_augmentation.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 12/9/2016 5 | 6 | filter { 7 | if [source_ip] { 8 | if [source_ip] == "-" { 9 | mutate { 10 | replace => { "source_ip" => "0.0.0.0" } 11 | } 12 | } 13 | grok { 14 | match => { "source_ip" => "%{IPV6:source_ip_v6}" } 15 | remove_field => [ "source_ip" ] 16 | tag_on_failure => [] 17 | } 18 | if "internal_source" not in [tags] { 19 | geoip { 20 | source => "source_ip" 21 | target => "source_geo" 22 | } 23 | geoip { 24 | default_database_type => "ASN" 25 | source => "source_ip" 26 | target => "source_geo" 27 | } 28 | } 29 | if [source_ip] { 30 | mutate { 31 | add_field => { "ips" => "%{source_ip}" } 32 | } 33 | } 34 | } 35 | if [destination_ip] { 36 | if [destination_ip] == "-" { 37 | mutate { 38 | replace => { "destination_ip" => "0.0.0.0" } 39 | } 40 | } 41 | grok { 42 | match => { "destination_ip" => "%{IPV6:destination_ip_v6}" } 43 | remove_field => [ "destination_ip" ] 44 | tag_on_failure => [] 45 | } 46 | if "internal_destination" not in [tags] { 47 | geoip { 48 | source => "destination_ip" 49 | target => "destination_geo" 50 | } 51 | geoip { 52 | default_database_type => "ASN" 53 | source => "destination_ip" 54 | target => "destination_geo" 55 | } 56 | } 57 | if [destination_ip] { 58 | mutate { 59 | add_field => { "ips" => "%{destination_ip}" } 60 | } 61 | } 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /logstash/logstash_configs/firewall/9000_output_fortinet.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolution.com 4 | # Last Update: 12/9/2016 5 | 6 | output { 7 | if "firewall" in [tags] { 8 | # stdout { codec => rubydebug } 9 | elasticsearch { 10 | index => "logstash-firewall-%{+YYYY.MM.dd}" 11 | hosts => "${ELASTICSEARCH_HOST}" 12 | } 13 | } 14 | } 15 | 16 | -------------------------------------------------------------------------------- /logstash/logstash_configs/monitoring/0000_input.conf: -------------------------------------------------------------------------------- 1 | input { 2 | beats { 3 | port => 5044 4 | } 5 | } -------------------------------------------------------------------------------- /logstash/logstash_configs/monitoring/9000_output.conf: -------------------------------------------------------------------------------- 1 | output { 2 | elasticsearch { 3 | index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}" 4 | hosts => "${ELASTICSEARCH_HOST}" 5 | } 6 | } -------------------------------------------------------------------------------- /logstash/logstash_configs/security_onion-json/0001_input_json.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/15/2017 5 | 6 | input { 7 | # Assumes json from syslog-ng 8 | tcp { 9 | port => 6050 10 | codec => json 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /logstash/logstash_configs/security_onion-json/0002_input_beats.conf: -------------------------------------------------------------------------------- 1 | input { 2 | beats { 3 | port => "5046" 4 | tags => "suricata" 5 | } 6 | } -------------------------------------------------------------------------------- /logstash/logstash_configs/security_onion-json/1000_syslogng_preprocess.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 3/23/2018 5 | 6 | filter { 7 | mutate { 8 | rename => { "MESSAGE" => "message" } 9 | rename => { "PROGRAM" => "log_event_type" } 10 | rename => { "FACILITY" => "syslog-facility" } 11 | rename => { "FILE_NAME" => "syslog-file_name" } 12 | rename => { "HOST" => "syslog-host" } 13 | rename => { "HOST_FROM" => "syslog-host_from" } 14 | rename => { "LEGACY_MSGHDR" => "syslog-legacy_msghdr" } 15 | rename => { "PID" => "syslog-pid" } 16 | rename => { "PRIORITY" => "syslog-priority" } 17 | rename => { "SOURCEIP" => "syslog-sourceip" } 18 | rename => { "TAGS" => "syslog-tags" } 19 | lowercase => [ "syslog-host_from" ] 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /logstash/logstash_configs/security_onion-json/1002_suricata_preprocess.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 3/23/2018 5 | # 6 | 7 | filter { 8 | if "suricata" in [tags] { 9 | mutate { 10 | add_field => { "log_event_type" => "suricata" } 11 | replace => { "host" => "%{[beat][hostname]}.lazard.com" } 12 | add_field => { "syslog-file_name" => "%{source}" } 13 | } 14 | } 15 | if [log_event_type] == "suricata" { 16 | json { 17 | source => "message" 18 | remove_field => "message" 19 | } 20 | date { 21 | match => [ "timestamp", "ISO8601" ] 22 | remove_field => [ "timestamp" ] 23 | } 24 | if [syslog-file_name] and [syslog-host] { 25 | mutate { 26 | add_field => { "source" => "%{syslog-file_name}" } 27 | } 28 | } 29 | mutate { 30 | rename => { "src_ip" => "source_ip" } 31 | rename => { "dest_ip" => "destination_ip" } 32 | rename => { "src_port" => "source_port" } 33 | rename => { "dest_port" => "destination_port" } 34 | rename => { "tx_id" => "transaction_id" } 35 | } 36 | if [event_type] == "flow" or [event_type] == "alert" { 37 | mutate { 38 | rename => { "[flow][age]" => "duration" } 39 | rename => { "[flow][alerted]" => "flow_alerted" } 40 | rename => { "[flow][bytes_toclient]" => "bytes_to_client" } 41 | rename => { "[flow][bytes_toserver]" => "bytes_to_server" } 42 | rename => { "[flow][end]" => "flow_end" } 43 | rename => { "[flow][pkts_toclient]" => "packets_to_client" } 44 | rename => { "[flow][pkts_toserver]" => "packets_to_server" } 45 | rename => { "[flow][reason]" => "reason" } 46 | rename => { "[flow][start]" => "flow_start" } 47 | rename => { "[flow][state]" => "state" } 48 | } 49 | } 50 | if [event_type] == "netflow" or [event_type] == "alert" { 51 | mutate { 52 | rename => { "[netflow][age]" => "duration" } 53 | rename => { "[netflow][bytes]" => "bytes" } 54 | rename => { "[netflow][end]" => "netflow_end" } 55 | rename => { "[netflow][start]" => "netflow_start" } 56 | rename => { "[netflow][pkts]" => "packets" } 57 | } 58 | } 59 | if [event_type] == "alert" { 60 | mutate { 61 | rename => { "[alert][action]" => "action" } 62 | rename => { "[alert][category]" => "category" } 63 | rename => { "[alert][gid]" => "gid" } 64 | rename => { "[alert][rev]" => "rev" } 65 | rename => { "[alert][severity]" => "severity" } 66 | rename => { "[alert][signature]" => "signature" } 67 | rename => { "[alert][signature_id]" => "sid" } 68 | add_tag => [ "ids" ] 69 | } 70 | } 71 | if [event_type] == "dns" or [event_type] == "alert" { 72 | mutate { 73 | rename => { "[dns][id]" => "dns_id" } 74 | rename => { "[dns][rcode]" => "response_code" } 75 | rename => { "[dns][rdata]" => "answer" } 76 | rename => { "[dns][rrname]" => "query" } 77 | rename => { "[dns][rrtype]" => "query_type" } 78 | rename => { "[dns][ttl]" => "ttl" } 79 | rename => { "[dns][type]" => "dns_type" } 80 | } 81 | } 82 | if [event_type] == "http" or [event_type] == "alert" { 83 | mutate { 84 | rename => { "[http][hostname]" => "virtual_host" } 85 | rename => { "[http][http_content_type]" => "http_content_type" } 86 | rename => { "[http][http_method]" => "method" } 87 | rename => { "[http][http_user_agent]" => "useragent" } 88 | rename => { "[http][length]" => "http_payload_length" } 89 | rename => { "[http][protocol]" => "http_version" } 90 | rename => { "[http][status]" => "status_message" } 91 | rename => { "[http][url]" => "uri" } 92 | } 93 | } 94 | if [event_type] == "ssh" or [event_type] == "alert" { 95 | mutate { 96 | rename => { "[ssh][client][proto_version]" => "ssh_client_protocol_version" } 97 | rename => { "[ssh][client][software_version]" => "ssh_client_software_version" } 98 | rename => { "[ssh][server][proto_version]" => "ssh_server_protocol_version" } 99 | rename => { "[ssh][server][software_version]" => "ssh_server_software_version" } 100 | } 101 | } 102 | if [event_type] == "tls" or [event_type] == "alert" { 103 | mutate { 104 | rename => { "[tls][fingerprint]" => "certificate_serial_number" } 105 | rename => { "[tls][issuerdn]" => "issuer_distinguished_name" } 106 | rename => { "[tls][notafter]" => "certificate_not_valid_after" } 107 | rename => { "[tls][notbefore]" => "certificate_not_valid_before" } 108 | rename => { "[tls][subject]" => "certificate_common_name" } 109 | rename => { "[tls][version]" => "tls_version" } 110 | } 111 | } 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /logstash/logstash_configs/security_onion-json/1003_ossec_preprocess.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 3/23/2018 5 | # 6 | 7 | filter { 8 | if [log_event_type] == "ossec" { 9 | mutate { 10 | add_tag => [ "hids" ] 11 | } 12 | grok { 13 | match => ["message", "^Alert Level: %{NONNEGINT;alert_level}; Rule: %{NONNEGINT:rule} - %{DATA:description}; Location: %{DATA:location}; user: +%{DATA:username}; %{SYSLOGTIMESTAMP} %{DATA:host} %{DATA:process}\[%{INT:pid}]: %{GREEDYDATA:details}", 14 | "message", "^Alert Level: %{NONNEGINT:alert_level}; Rule: %{NONNEGINT:rule} - %{DATA:description}; Location: %{DATA:location}; %{SYSLOGTIMESTAMP:timestamp} %{DATA:host} %{DATA:process}\[%{NONNEGINT:pid}]: %{GREEDYDATA:details}", 15 | "message", "^Alert Level: %{NONNEGINT:alert_level}; Rule: %{NONNEGINT:rule} - %{DATA:description}; Location: %{DATA:location}; %{SYSLOGTIMESTAMP} %{DATA:host} %{DATA:process}\[%{NONNEGINT:pid}]: %{GREEDYDATA:details}", 16 | "message", "^Alert Level: %{NONNEGINT:alert_level}; Rule: %{NONNEGINT:rule} - %{DATA:description}; Location: %{DATA:location}; %{SYSLOGTIMESTAMP:timestamp} %{DATA:host} %{DATA:program}: +%{DATA:username} : TTY=%{DATA:tty} ; PWD=%{DATA:dir} ; USER=%{DATA:escalated_user} ; COMMAND=%{GREEDYDATA:command}", 17 | "message", "^Alert Level: %{NONNEGINT:alert_level}; Rule: %{NONNEGINT:rule} - %{DATA:description}; Location: %{DATA:location}; %{SYSLOGTIMESTAMP:timestamp} %{DATA:host} %{DATA:program}: %{GREEDYDATA:details}", 18 | "message", "^Alert Level: %{NONNEGINT:alert_level}; Rule: %{NONNEGINT:rule} - %{DATA:description}; Location: %{DATA:location}; %{SYSLOGTIMESTAMP:timestamp} %{DATA:host} %{DATA:program}: +%{DATA:username} : %{GREEDYDATA:details}", 19 | "message", "^Alert Level: %{NONNEGINT:alert_level}; Rule: %{NONNEGINT:rule} - %{DATA:description}; Location: %{DATA:location}; srcip: %{IP:source_ip};%{GREEDYDATA:details}", 20 | "message", "^Alert Level: %{NONNEGINT:alert_level}; Rule: %{NONNEGINT:rule} - %{DATA:description}; Location: %{DATA:location}; %{DATA:username}: %{DATA}: \'%{DATA}': %{DATA:interface}: %{INT:num_packets}", 21 | "message", "^Alert Level: %{NONNEGINT:alert_level}; Rule: %{NONNEGINT:rule} - %{DATA:description}; Location: %{DATA:location}; %{DATA:username}: %{GREEDYDATA:details}.", 22 | "message", "^Alert Level: %{NONNEGINT:alert_Level}; Rule: %{NONNEGINT:Rule} - %{DATA:Description}; Location: %{DATA:location}; user: +%{DATA:username};", 23 | "message", "^Alert Level: %{NONNEGINT:alert_level}; Rule: %{NONNEGINT:rule} - %{DATA:description}; Location: %{DATA:location}; %{DATA}: %{DATA}: \'%{DATA}': %{DATA:interface}: %{NONNEGINT:num_packets}", 24 | "message", "^Alert Level: %{NONNEGINT:alert_level}; Rule: %{NONNEGINT:rule} - %{DATA:description}; Location: %{DATA:location}; %{GREEDYDATA:details}"] 25 | # Add tag for OSSEC alerts 26 | add_tag => [ "alert" ] 27 | } 28 | translate { 29 | field => "alert_level" 30 | destination => "classification" 31 | dictionary => [ 32 | "1", "None", 33 | "2", "System low priority notification", 34 | "3", "Successful/authorized event", 35 | "4", "System low priority error", 36 | "5", "User generated error", 37 | "6", "Low relevance attack", 38 | "7", '"Bad word" matching', 39 | "8", "First time seen", 40 | "9", "Error from invalid source", 41 | "10", "Multiple user generated errors", 42 | "11", "Integrity checking warning", 43 | "12", "High importance event", 44 | "13", "Unusal error (high importance)", 45 | "14", "High importance security event", 46 | "15", "Severe attack" 47 | ] 48 | } 49 | } 50 | 51 | if [log_event_type] == "ossec" and "alert" not in [tags] { 52 | grok { 53 | match => ["message", "%{DATA:username} : TTY=%{DATA:tty} ; PWD=%{DATA:dir} ; USER=%{DATA:escalated_user} ; COMMAND=%{GREEDYDATA:command}"] 54 | } 55 | } 56 | 57 | # OSSEC Archive Logs 58 | if [log_event_type] == "ossec_archive" { 59 | grok { 60 | match => ["message",'%{YEAR:year} %{SYSLOGTIMESTAMP:timestamp} %{DATA:location} %{IP:source_ip} - %{DATA:username} \[%{DATA:request_timestamp}] "%{DATA:method} %{DATA:requested_resource} %{DATA:protocol}\/%{DATA:protocol_version}" %{NONNEGINT:status_code} %{NONNEGINT:object_size} "%{DATA:referrer}" "%{DATA:user_agent}"', 61 | "message","%{YEAR:year} %{SYSLOGTIMESTAMP:timestamp} %{DATA:location} %{SYSLOGTIMESTAMP:ossec_timestamp} %{DATA:host} %{DATA:process}\[%{NONNEGINT:pid}]: \(%{DATA:username}\) CMD \(%{DATA:command}\)", 62 | "message", "%{YEAR:year} %{SYSLOGTIMESTAMP:timestamp} %{DATA:location} %{GREEDYDATA:details}","message","%{YEAR:year} %{SYSLOGTIMESTAMP:timestamp} %{DATA:location} %{SYSLOGTIMESTAMP:ossec_timestamp} %{DATA:ossec_host} %{DATA:process}\[%{NONNEGINT:pid}]: %{GREEDYDATA:details}", 63 | "message","%{DATA:age} %{DATA:program} %{DATA} '%{DATA:checksum}'"] 64 | remove_field => [ "ossec_timestamp" ] 65 | } 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /logstash/logstash_configs/security_onion-json/6000_bro.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 3/23/2018 5 | # 6 | 7 | filter { 8 | if [log_event_type] == "bro" { 9 | if [duration] == "-" { 10 | mutate { 11 | replace => [ "duration", "0" ] 12 | } 13 | } 14 | if [original_bytes] == "-" { 15 | mutate { 16 | replace => [ "original_bytes", "0" ] 17 | } 18 | } 19 | # If MissedBytes is unspecified set it to zero so it is an integer 20 | if [missed_bytes] == "-" { 21 | mutate { 22 | replace => [ "missed_bytes", "0" ] 23 | } 24 | } 25 | # If OriginalIPBytes is unspecified set it to zero so it is an integer 26 | if [original_ip_bytes] == "-" { 27 | mutate { 28 | replace => [ "original_ip_bytes", "0" ] 29 | } 30 | } 31 | # If RespondBytes is unspecified set it to zero so it is an integer 32 | if [respond_bytes] == "-" { 33 | mutate { 34 | replace => [ "respond_bytes", "0" ] 35 | } 36 | } 37 | # If RespondIPBytes is unspecified set it to zero so it is an integer 38 | if [respond_ip_bytes] == "-" { 39 | mutate { 40 | replace => [ "respond_ip_bytes", "0" ] 41 | } 42 | } 43 | if [source_port] == "-" { 44 | mutate { 45 | remove_field => ["source_port"] 46 | } 47 | } 48 | if [destination_port] == "-" { 49 | mutate { 50 | remove_field => ["destination_port"] 51 | } 52 | } 53 | if [virtual_host] == "-" { 54 | mutate { 55 | remove_field => ["virtual_host"] 56 | } 57 | } 58 | if [user] == "-" { 59 | mutate { 60 | remove_field => ["user"] 61 | } 62 | } 63 | 64 | # I renamed conn_uids to uid so that it is easy to pivot to all things tied to a connection 65 | mutate { 66 | rename => [ "connection_uids", "uid" ] 67 | } 68 | # If total_bytes is set to "-" change it to 0 so it is an integer 69 | if [total_bytes] == "-" { 70 | mutate { 71 | replace => [ "total_bytes", "0" ] 72 | } 73 | } 74 | # If seen_bytes is set to "-" change it to 0 so it is an integer 75 | if [seen_bytes] == "-" { 76 | mutate { 77 | replace => [ "seen_bytes", "0" ] 78 | } 79 | } 80 | # If missing_bytes is set to "-" change it to 0 so it is an integer 81 | if [missing_bytes] == "-" { 82 | mutate { 83 | replace => [ "missing_bytes", "0" ] 84 | } 85 | } 86 | # If pverflow_bytes is set to "-" change it to 0 so it is an integer 87 | if [overflow_bytes] == "-" { 88 | mutate { 89 | replace => [ "overflow_bytes", "0" ] 90 | } 91 | } 92 | if [original_bytes] { 93 | if [respond_bytes] { 94 | ruby { 95 | code => "event.set('total_bytes',event.get('original_bytes') + event.get('respond_bytes'))" 96 | } 97 | } 98 | } 99 | # I recommend changing the field types below to integer or floats so searches can do greater than or less than 100 | # and also so math functions can be ran against them 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /logstash/logstash_configs/security_onion-json/6400_suricata.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # Email: jhenderson@tekrefresh.comes 3 | # Last Update: 11/16/2017 4 | # 5 | # This conf file is based on accepting logs for suricata json events 6 | filter { 7 | if [log_event_type] == "suricata" { 8 | if [bytes_to_client] and [bytes_to_server] { 9 | if [bytes_to_client] < [bytes_to_server] { 10 | ruby { 11 | code => "event.set('byte_ratio_client', event.get('bytes_to_client').to_f / event.get('bytes_to_server').to_f)" 12 | } 13 | ruby { 14 | code => "event.set('byte_ratio_server', 1 - event.get('byte_ratio_client'))" 15 | } 16 | } else { 17 | ruby { 18 | code => "event.set('byte_ratio_server', event.get('bytes_to_server').to_f / event.get('bytes_to_client').to_f)" 19 | } 20 | ruby { 21 | code => "event.set('byte_ratio_client', 1 - event.get('byte_ratio_server'))" 22 | } 23 | } 24 | } 25 | 26 | # This will translate the alert.severity field into a severity field of either High, Medium, or Low 27 | if [event_type] == "alert" { 28 | if [severity] == 1 { 29 | mutate { 30 | add_field => { "severity_level" => "High" } 31 | } 32 | } 33 | if [severity] == 2 { 34 | mutate { 35 | add_field => { "severity_level" => "Medium" } 36 | } 37 | } 38 | if [severity] == 3 { 39 | mutate { 40 | add_field => { "severity_level" => "Low" } 41 | } 42 | } 43 | mutate { 44 | gsub => [ "sid", "\D", "" ] 45 | } 46 | 47 | # If the alert is a Snort GPL alert break it apart for easier reading and categorization 48 | if [signature] =~ "GPL " { 49 | # This will parse out the category type from the alert 50 | grok { 51 | match => { "signature" => "GPL\s+%{DATA:category}\s" } 52 | } 53 | # This will store the category 54 | mutate { 55 | add_field => { "rule_type" => "Snort GPL" } 56 | lowercase => [ "category" ] 57 | } 58 | } 59 | # If the alert is an Emerging Threat alert break it apart for easier reading and categorization 60 | if [signature] =~ "ET " { 61 | # This will parse out the category type from the alert 62 | grok { 63 | match => { "signature" => "ET\s+%{DATA:category}\s" } 64 | } 65 | # This will store the category 66 | mutate { 67 | add_field => { "rule_type" => "Emerging Threats" } 68 | } 69 | } 70 | mutate { 71 | lowercase => [ "category" ] 72 | } 73 | if [gid] == 1 and [sid] =~ '^[0-9]+$' { 74 | translate { 75 | field => "sid" 76 | destination => "rule" 77 | dictionary_path => "/opt/elastic_stack/logstash/translate/rules.csv" 78 | } 79 | } 80 | # If rule is extracted try to parse out important rule metadata 81 | if [rule] { 82 | grok { 83 | break_on_match => false 84 | match => { "rule" => "(?MS[01][0-9]-[0-9]+)" } 85 | match => { "rule" => "cve,(?[0-9]{0,4}-[0-9]+)" } 86 | match => { "rule" => "bugtraq,(?[0-9]+)" } 87 | match => { "rule" => "securityfocus.com/bid/(?[0-9]+)" } 88 | match => { "rule" => "osvdb/(?[0-9]+)" } 89 | match => { "rule" => "exploit-db.com/exploits/(?[0-9]+)" } 90 | tag_on_failure => [] 91 | } 92 | } 93 | # This section adds URLs to lookup information about a rule online 94 | if [rule_type] == "Snort GPL" { 95 | mutate { 96 | add_field => [ "signature_info", "https://www.snort.org/search?query=%{gid}-%{sid}" ] 97 | } 98 | } 99 | if [rule_type] == "Emerging Threats" { 100 | mutate { 101 | add_field => [ "signature_info", "http://doc.emergingthreats.net/%{sid}" ] 102 | } 103 | } 104 | # OPTIONAL - LOOKUP DNS INFORMATION ON CERTAIN CONNECTIONS 105 | # Update elasticsearch host info for this to work 106 | # Requires logstash-filter-elasticsearch plugin 107 | if "potential corporate privacy violation" not in [category] and "info" not in [category] and "generic protocol command decode" not in [category] { 108 | elasticsearch { 109 | hosts => ["${ELASTICSEARCH_HOST}"] 110 | index => "logstash-bro-*" 111 | query => "log_event_type:bro AND event_type:dns AND answers:%{[destination_ip]}" 112 | fields => [["highest_registered_domain","source_highest_registered_domain"],["parent_domain","source_parent_domain"]] 113 | } 114 | elasticsearch { 115 | hosts => ["${ELASTICSEARCH_HOST}"] 116 | index => "logstash-bro-*" 117 | query => "log_event_type:bro AND event_type:dns AND answers:%{[source_ip]}" 118 | fields => [["highest_registered_domain","destination_highest_registered_domain"],["parent_domain","destination_parent_domain"]] 119 | } 120 | } 121 | } 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /logstash/logstash_configs/security_onion-json/8000_postprocess_tagging.conf: -------------------------------------------------------------------------------- 1 | filter { 2 | if [destination_ip] { 3 | if [destination_ip] =~ "2(?:2[4-9]|3\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]\d?|0)){3}" { 4 | mutate { 5 | add_tag => [ "multicast" ] 6 | } 7 | } 8 | if [destination_ip] == "255.255.255.255" { 9 | mutate { 10 | add_tag => [ "broadcast" ] 11 | } 12 | } 13 | if [destination_ip] =~ "\.255$" { 14 | mutate { 15 | add_tag => [ "possible_broadcast" ] 16 | } 17 | } 18 | if [destination_ip] and "multicast" not in [tags] and "broadcast" not in [tags] { 19 | if [destination_ip] =~ "^10\." or [destination_ip] =~ "192\.168\." or [destination_ip] =~ "172\.(1[6-9]|2[0-9]|3[0-1])\." { 20 | mutate { 21 | add_tag => [ "internal_destination" ] 22 | } 23 | } else { 24 | mutate { 25 | add_tag => [ "external_destination" ] 26 | } 27 | } 28 | if "internal_destination" not in [tags] { 29 | if [destination_ip] == "198.41.0.4" or [destination_ip] == "192.228.79.201" or [destination_ip] == "192.33.4.12" or [destination_ip] == "199.7.91.13" or [destination_ip] == "192.203.230.10" or [destination_ip] == "192.5.5.241" or [destination_ip] == "192.112.36.4" or [destination_ip] == "198.97.190.53" or [destination_ip] == "192.36.148.17" or [destination_ip] == "192.58.128.30" or [destination_ip] == "193.0.14.129" or [destination_ip] == "199.7.83.42" or [destination_ip] == "202.12.27.33" { 30 | mutate { 31 | add_tag => [ "root_dns_server" ] 32 | } 33 | } 34 | } 35 | } 36 | } 37 | if [source_ip] { 38 | if [source_ip] =~ "^10\." or [source_ip] =~ "192\.168\." or [source_ip] =~ "172\.(1[6-9]|2[0-9]|3[0-1])\." { 39 | mutate { 40 | add_tag => [ "internal_source" ] 41 | } 42 | } else { 43 | mutate { 44 | add_tag => [ "external_source" ] 45 | } 46 | } 47 | if "internal_source" not in [tags] { 48 | if [source_ip] == "198.41.0.4" or [source_ip] == "192.228.79.201" or [source_ip] == "192.33.4.12" or [source_ip] == "199.7.91.13" or [source_ip] == "192.203.230.10" or [source_ip] == "192.5.5.241" or [source_ip] == "192.112.36.4" or [source_ip] == "198.97.190.53" or [source_ip] == "192.36.148.17" or [source_ip] == "192.58.128.30" or [source_ip] == "193.0.14.129" or [source_ip] == "199.7.83.42" or [source_ip] == "202.12.27.33" { 49 | mutate { 50 | add_tag => [ "root_dns_server" ] 51 | } 52 | } 53 | } 54 | if "internal_source" in [tags] and "internal_destination" in [tags] { 55 | mutate { add_tag => [ "internal_only" ] } 56 | } 57 | # This section is for system tagging 58 | if [destination_port] { 59 | if [destination_port] == 53 and "internal_destination" in [tags] { 60 | mutate { add_tag => [ "dns_server" ] } 61 | } 62 | if [destination_port] == 21 and "internal_destination" in [tags] { 63 | mutate { add_tag => [ "ftp_server" ] } 64 | } 65 | if [destination_port] == 22 and "internal_destination" in [tags] { 66 | mutate { add_tag => [ "ssh_server" ] } 67 | } 68 | if [destination_port] == 139 and "internal_destination" in [tags] { 69 | mutate { add_tag => [ "smb_server", "old_smb_use" ] } 70 | } 71 | if [destination_port] == 445 and "internal_destination" in [tags] { 72 | mutate { add_tag => [ "smb_server" ] } 73 | } 74 | if [destination_port] == 389 and "internal_destination" in [tags] { 75 | mutate { add_tag => [ "ldap_server" ] } 76 | } 77 | if [destination_port] == 636 and "internal_destination" in [tags] { 78 | mutate { add_tag => [ "ldaps_server" ] } 79 | } 80 | if [destination_port] == 80 and "internal_destination" in [tags] { 81 | mutate { add_tag => [ "web_server", "http_server" ] } 82 | } 83 | if [destination_port] == 443 and "internal_destination" in [tags] { 84 | mutate { add_tag => [ "web_server", "https_server" ] } 85 | } 86 | if [destination_port] == 1433 and "internal_destination" in [tags] { 87 | mutate { add_tag => [ "mssql_server" ] } 88 | } 89 | if [destination_port] == 3389 and "internal_destination" in [tags] { 90 | mutate { add_tag => [ "mysql_server" ] } 91 | } 92 | if [destination_port] == 1521 and "internal_destination" in [tags] { 93 | mutate { add_tag => [ "oracle_server" ] } 94 | } 95 | if [destination_port] == 123 and "internal_destination" in [tags] { 96 | mutate { add_tag => [ "ntp_server" ] } 97 | } 98 | } 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /logstash/logstash_configs/security_onion-json/8001_postprocess_common_ip_augmentation.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 12/9/2016 5 | 6 | filter { 7 | if [source_ip] { 8 | if [source_ip] == "-" { 9 | mutate { 10 | replace => { "source_ip" => "0.0.0.0" } 11 | } 12 | } 13 | grok { 14 | match => { "source_ip" => "%{IPV6:source_ip_v6}" } 15 | remove_field => [ "source_ip" ] 16 | tag_on_failure => [] 17 | } 18 | if "internal_source" not in [tags] { 19 | geoip { 20 | source => "source_ip" 21 | target => "source_geo" 22 | } 23 | geoip { 24 | default_database_type => "ASN" 25 | source => "source_ip" 26 | target => "source_geo" 27 | } 28 | } 29 | if [source_ip] { 30 | mutate { 31 | add_field => { "ips" => "%{source_ip}" } 32 | } 33 | } 34 | } 35 | if [destination_ip] { 36 | if [destination_ip] == "-" { 37 | mutate { 38 | replace => { "destination_ip" => "0.0.0.0" } 39 | } 40 | } 41 | grok { 42 | match => { "destination_ip" => "%{IPV6:destination_ip_v6}" } 43 | remove_field => [ "destination_ip" ] 44 | tag_on_failure => [] 45 | } 46 | if "internal_destination" not in [tags] { 47 | geoip { 48 | source => "destination_ip" 49 | target => "destination_geo" 50 | } 51 | geoip { 52 | default_database_type => "ASN" 53 | source => "destination_ip" 54 | target => "destination_geo" 55 | } 56 | } 57 | if [destination_ip] { 58 | mutate { 59 | add_field => { "ips" => "%{destination_ip}" } 60 | } 61 | } 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /logstash/logstash_configs/security_onion-json/8006_postprocess_dns.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 12/9/2016 5 | 6 | filter { 7 | if [event_type] == "dns" { 8 | if [query] { 9 | mutate { 10 | lowercase => [ "query" ] 11 | } 12 | if [query] =~ "labmeinc\.internal$" { 13 | mutate { 14 | add_tag => "internal_domain" 15 | add_field => { "highest_registered_domain" => "labmeinc.internal" } 16 | } 17 | } 18 | 19 | # REPLACE test.int with your internal domain 20 | if "internal_domain" not in [tags] { 21 | if [query_type_name] != "NB" and [query_type_name] != "TKEY" and [query_type_name] != "NBSTAT" and [query_type_name] != "PTR" { 22 | tld { 23 | source => "query" 24 | } 25 | mutate { 26 | rename => { "[SubLog][sessionid]" => "sub_session_id" } 27 | rename => { "[tld][domain]" => "highest_registered_domain" } 28 | rename => { "[tld][trd]" => "subdomain" } 29 | rename => { "[tld][tld]" => "top_level_domain" } 30 | rename => { "[tld][sld]" => "parent_domain" } 31 | } 32 | ruby { 33 | code => "event.set('query_length', event.get('query').length)" 34 | } 35 | if [parent_domain] { 36 | ruby { 37 | code => "event.set('parent_domain_length', event.get('parent_domain').length)" 38 | } 39 | } 40 | if [subdomain] { 41 | ruby { 42 | code => "event.set('subdomain_length', event.get('subdomain').length)" 43 | } 44 | } 45 | if [highest_registered_domain] { 46 | ruby { 47 | code => "event.set('highest_registered_domain_length', event.get('highest_registered_domain').length)" 48 | } 49 | } 50 | } 51 | } 52 | } 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /logstash/logstash_configs/security_onion-json/8007_postprocess_http.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 12/9/2016 5 | 6 | filter { 7 | if [event_type] == "http" { 8 | if [uri] { 9 | ruby { 10 | code => "event.set('uri_length', event.get('uri').length)" 11 | } 12 | } 13 | if [virtual_host] { 14 | ruby { 15 | code => "event.set('virtual_host_length', event.get('virtual_host').length)" 16 | } 17 | } 18 | if [user_agent] { 19 | ruby { 20 | code => "event.set('useragent_length', event.get('user_agent').length)" 21 | } 22 | } 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /logstash/logstash_configs/security_onion-json/8009_postprocess_dns_top1m_tagging.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 4/11/2017 5 | 6 | filter { 7 | if [event_type] == "dns" { 8 | if [highest_registered_domain] and "internal_domain" not in [tags] { 9 | rest { 10 | request => { 11 | url => "http://domain_stats:20000/alexa/%{highest_registered_domain}" 12 | } 13 | sprintf => true 14 | json => false 15 | target => "site" 16 | } 17 | if [site] != "0" and [site] { 18 | mutate { 19 | add_tag => [ "top-1m" ] 20 | remove_field => [ "site" ] 21 | } 22 | } 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /logstash/logstash_configs/security_onion-json/8010_postprocess_dns_creation_date.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolution.com 4 | # Last Update: 12/9/2016 5 | 6 | filter { 7 | if [event_type] == "dns" { 8 | if [highest_registered_domain] and "top-1m" not in [tags] and "internal_domain" not in [tags] and [query_type_name] != "NB" and [query_type_name] != "TKEY" and [query_type_name] != "NBSTAT" and [query_type_name] != "PTR" { 9 | rest { 10 | request => { 11 | url => "http://domain_stats:20000/domain/creation_date/%{highest_registered_domain}" 12 | } 13 | sprintf => true 14 | json => false 15 | target => "creation_date" 16 | } 17 | if [creation_date] =~ "No whois" { 18 | mutate { remove_field => [ "creation_date" ] } 19 | } 20 | if [creation_date] and [creation_date] != "" { 21 | grok { 22 | match => { "creation_date" => "(?[12][0-9]{3}-[01][0-9]-[0-3][0-9])" } 23 | remove_field => [ "creation_date" ] 24 | } 25 | if [modified_creation_date] { 26 | date { 27 | match => [ "modified_creation_date", "yyyy-MM-dd" ] 28 | remove_field => [ "modified_creation_date" ] 29 | target => "domain_creation_date" 30 | } 31 | } 32 | } 33 | if [creation_date] == "" { 34 | mutate { remove_field => [ "creation_date" ] } 35 | } 36 | } 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /logstash/logstash_configs/security_onion-json/8502_postprocess_freq_analysis_bro_dns.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 4/11/2017 5 | 6 | filter { 7 | if [event_type] == "dns" { 8 | # If Query exists run a frequency analysis against it. In order for this to work you must have 9 | # freq.py and the corresponding frequency table in /opt/freq/. This is a huge boost to security 10 | # and I highly recommend you set this up. Example, if a frequency score less than 6 exists 11 | # then there is a likelihood that something malicious is happening. 12 | # 13 | # For higher accuracy, please generate your own frequency tables. For questions on setup, 14 | # please refer to https://github.com/SMAPPER 15 | if [query_type_name] == "A" or [query_type_name] == "AAAA" and "top-1m" not in [tags] and "internal_domain" not in [tags] { 16 | if [parent_domain] and [parent_domain_length] > 5 { 17 | mutate { 18 | add_field => { "freq_parent_domain" => "%{parent_domain}"} 19 | } 20 | mutate { 21 | gsub => [ "freq_parent_domain", "[^a-zA-Z0-9]", "" ] 22 | } 23 | rest { 24 | request => { 25 | url => "http://freq_server:10004/measure/%{freq_parent_domain}" 26 | } 27 | sprintf => true 28 | json => false 29 | target => "parent_domain_frequency_score" 30 | } 31 | if [parent_domain_frequency_score] { 32 | mutate { 33 | add_field => { "frequency_scores" => "%{parent_domain_frequency_score}" } 34 | remove_field => [ "freq_parent_domain" ] 35 | } 36 | } 37 | } 38 | if [subdomain] and [subdomain_length] > 5 { 39 | mutate { 40 | add_field => { "freq_subdomain" => "%{subdomain}"} 41 | } 42 | mutate { 43 | gsub => [ "freq_subdomain", "[^a-zA-Z0-9]", "" ] 44 | } 45 | rest { 46 | request => { 47 | url => "http://freq_server:10004/measure/%{freq_subdomain}" 48 | } 49 | sprintf => true 50 | json => false 51 | target => "subdomain_frequency_score" 52 | } 53 | if [subdomain_frequency_score] { 54 | mutate { 55 | add_field => { "frequency_scores" => "%{subdomain_frequency_score}" } 56 | remove_field => [ "freq_subdomain" ] 57 | } 58 | } 59 | } 60 | if [highest_registered_domain] and [highest_registered_domain_length] > 5 { 61 | mutate { 62 | add_field => { "freq_highest_registered_domain" => "%{highest_registered_domain}"} 63 | } 64 | mutate { 65 | gsub => [ "freq_highest_registered_domain", "[^a-zA-Z0-9]", "" ] 66 | } 67 | rest { 68 | request => { 69 | url => "http://freq_server:10004/measure/%{freq_highest_registered_domain}" 70 | } 71 | sprintf => true 72 | json => false 73 | target => "highest_registered_domain_frequency_score" 74 | } 75 | if [highest_registered_domain_frequency_score] { 76 | mutate { 77 | add_field => { "frequency_scores" => "%{highest_registered_domain_frequency_score}" } 78 | remove_field => [ "freq_highest_registered_domain" ] 79 | } 80 | } 81 | } 82 | } 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /logstash/logstash_configs/security_onion-json/8503_postprocess_freq_analysis_bro_http.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 12/9/2016 5 | 6 | filter { 7 | if [event_type] == "http" { 8 | # If uri exists run a frequency analysis against it. In order for this to work you must have 9 | # freq.py and the corresponding frequency table in /opt/freq/. This is a huge boost to security 10 | # and I highly recommend you set this up. Example, if a frequency score less than 6 exists 11 | # then there is a likelihood that something malicious is happening. 12 | # 13 | # For higher accuracy, please generate your own frequency tables. For questions on setup, 14 | # please refer to https://github.com/SMAPPER 15 | if [virtual_host]{ 16 | if [virtual_host_length] > 5 { 17 | mutate { 18 | add_field => { "freq_virtual_host" => "%{virtual_host}" } 19 | } 20 | mutate { 21 | gsub => [ "freq_virtual_host", "[^a-zA-Z0-9]", "" ] 22 | } 23 | rest { 24 | request => { 25 | url => "http://freq_server:10004/measure/%{freq_virtual_host}" 26 | } 27 | sprintf => true 28 | json => false 29 | target => "virtual_host_frequency_score" 30 | } 31 | } 32 | if [virtual_host_frequency_score] { 33 | mutate { 34 | add_field => { "frequency_scores" => "%{virtual_host_frequency_score}" } 35 | remove_field => [ "freq_virtual_host" ] 36 | } 37 | } 38 | } 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /logstash/logstash_configs/security_onion-json/8504_postprocess_freq_analysis_bro_ssl.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 12/9/2016 5 | 6 | filter { 7 | if [event_type] == "ssl" { 8 | # If CHANGE_ME exists run a frequency analysis against it. In order for this to work you must have 9 | # freq.py and the corresponding frequency table in /opt/freq/. This is a huge boost to security 10 | # and I highly recommend you set this up. Example, if a frequency score less than 6 exists 11 | # then there is a likelihood that something malicious is happening. 12 | # 13 | # For higher accuracy, please generate your own frequency tables. For questions on setup, 14 | # please refer to https://github.com/SMAPPER 15 | if [server_name] and [server_name] != "" and [server_name] != "-" { 16 | mutate { 17 | add_field => { "freq_common_name" => "%{server_name}" } 18 | } 19 | mutate { 20 | gsub => [ "freq_common_name", "[^a-zA-Z0-9]", "" ] 21 | } 22 | rest { 23 | request => { 24 | url => "http://freq_server:10004/measure/%{freq_common_name}" 25 | } 26 | sprintf => true 27 | json => false 28 | target => "server_name_frequency_score" 29 | } 30 | if [server_name_frequency_score] { 31 | mutate { 32 | add_field => { "frequency_scores" => "%{server_name_frequency_score}" } 33 | remove_field => [ "freq_common_name" ] 34 | } 35 | } 36 | } 37 | if [issuer_common_name] and [issuer_common_name] != "" and [server_name] != "-" { 38 | mutate { 39 | add_field => { "freq_common_name" => "%{issuer_common_name}" } 40 | } 41 | mutate { 42 | gsub => [ "freq_common_name", "[^a-zA-Z0-9]", "" ] 43 | } 44 | rest { 45 | request => { 46 | url => "http://freq_server:10004/measure/%{freq_common_name}" 47 | } 48 | sprintf => true 49 | json => false 50 | target => "issuer_common_name_frequency_score" 51 | } 52 | if [issuer_common_name_frequency_score] { 53 | mutate { 54 | add_field => { "frequency_scores" => "%{issuer_common_name_frequency_score}" } 55 | remove_field => [ "freq_common_name" ] 56 | } 57 | } 58 | } 59 | if [certificate_common_name] and [certificate_common_name] != "" and [server_name] != "-" { 60 | mutate { 61 | add_field => { "freq_common_name" => "%{certificate_common_name}" } 62 | } 63 | mutate { 64 | gsub => [ "freq_common_name", "[^a-zA-Z0-9]", "" ] 65 | } 66 | rest { 67 | request => { 68 | url => "http://freq_server:10004/measure/%{freq_common_name}" 69 | } 70 | sprintf => true 71 | json => false 72 | target => "certificate_common_name_frequency_score" 73 | } 74 | if [certificate_common_name_frequency_score] { 75 | mutate { 76 | add_field => { "frequency_scores" => "%{certificate_common_name_frequency_score}" } 77 | remove_field => [ "freq_common_name" ] 78 | } 79 | } 80 | } 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /logstash/logstash_configs/security_onion-json/8505_postprocess_freq_analysis_bro_x509.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 12/9/2016 5 | 6 | filter { 7 | if [event_type] == "x509" { 8 | # If SubjectCommonName exists run a frequency analysis against it. In order for this to work you must have 9 | # freq.py and the corresponding frequency table in /opt/freq/. This is a huge boost to security 10 | # and I highly recommend you set this up. Example, if a frequency score less than 6 exists 11 | # then there is a likelihood that something malicious is happening. 12 | # 13 | # For higher accuracy, please generate your own frequency tables. For questions on setup, 14 | # please refer to https://github.com/SMAPPER 15 | if [issuer_common_name] and [issuer_common_name] == "" and [server_name] != "-" { 16 | mutate { 17 | add_field => { "freq_common_name" => "%{issuer_common_name}" } 18 | } 19 | mutate { 20 | gsub => [ "freq_common_name", "[^a-zA-Z0-9]", "" ] 21 | } 22 | rest { 23 | request => { 24 | url => "http://freq_server:10004/measure/%{freq_common_name}" 25 | } 26 | sprintf => true 27 | json => false 28 | target => "issuer_common_name_frequency_score" 29 | } 30 | if [issuer_common_name_frequency_score] { 31 | mutate { 32 | add_field => { "frequency_scores" => "%{issuer_common_name_frequency_score}" } 33 | remove_field => [ "freq_common_name" ] 34 | } 35 | } 36 | } 37 | if [issuer_organization] and [issuer_organization] == "" and [server_name] != "-" { 38 | mutate { 39 | add_field => { "freq_common_name" => "%{issuer_organization}" } 40 | } 41 | mutate { 42 | gsub => [ "freq_common_name", "[^a-zA-Z0-9]", "" ] 43 | } 44 | rest { 45 | request => { 46 | url => "http://freq_server:10004/measure/%{freq_common_name}" 47 | } 48 | sprintf => true 49 | json => false 50 | target => "issuer_organization_frequency_score" 51 | } 52 | if [issuer_organization_frequency_score] { 53 | mutate { 54 | add_field => { "frequency_scores" => "%{issuer_organization_frequency_score}" } 55 | remove_field => [ "freq_common_name" ] 56 | } 57 | } 58 | } 59 | if [certificate_common_name] and [certificate_common_name] == "" and [server_name] != "-" { 60 | mutate { 61 | add_field => { "freq_common_name" => "%{certificate_common_name}" } 62 | } 63 | mutate { 64 | gsub => [ "freq_common_name", "[^a-zA-Z0-9]", "" ] 65 | } 66 | rest { 67 | request => { 68 | url => "http://freq_server:10004/measure/%{freq_common_name}" 69 | } 70 | sprintf => true 71 | json => false 72 | target => "certificate_common_name_frequency_score" 73 | } 74 | if [certificate_common_name_frequency_score] { 75 | mutate { 76 | add_field => { "frequency_scores" => "%{certificate_common_name_frequency_score}" } 77 | remove_field => [ "freq_common_name" ] 78 | } 79 | } 80 | } 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /logstash/logstash_configs/security_onion-json/9000_output.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/16/2017 5 | 6 | output { 7 | if [log_event_type] == "bro" { 8 | elasticsearch { 9 | index => "logstash-bro-%{+YYYY.MM.dd}" 10 | hosts => "${ELASTICSEARCH_HOST}" 11 | } 12 | } 13 | if [log_event_type] == "suricata" { 14 | elasticsearch { 15 | index => "logstash-ids-%{+YYYY.MM.dd}" 16 | hosts => "${ELASTICSEARCH_HOST}" 17 | } 18 | } 19 | if [log_event_type] == "ossec" { 20 | elasticsearch { 21 | index => "logstash-ossec-%{+YYYY.MM.dd}" 22 | hosts => "${ELASTICSEARCH_HOST}" 23 | } 24 | } 25 | if [log_event_type] != "suricata" and [log_event_type] != "ossec" and [log_event_type] != "bro" { 26 | elasticsearch { 27 | index => "logstash-so-%{+YYYY.MM.dd}" 28 | hosts => "${ELASTICSEARCH_HOST}" 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /logstash/logstash_configs/windows/0006_input_windows_beats.conf: -------------------------------------------------------------------------------- 1 | # Author: Justin Henderson 2 | # SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics 3 | # Email: justin@hasecuritysolutions.com 4 | # Last Update: 11/15/2017 5 | 6 | input { 7 | # Assumes WinLogBeat logs over port 5045 8 | beats { 9 | port => "5045" 10 | add_field => { "log_event_type" => "windows" } 11 | } 12 | } -------------------------------------------------------------------------------- /logstash/logstash_configs/windows/8200_postprocess_tagging.conf: -------------------------------------------------------------------------------- 1 | filter { 2 | if [destination_ip] =~ "2(?:2[4-9]|3\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]\d?|0)){3}" { 3 | mutate { 4 | add_tag => [ "multicast" ] 5 | } 6 | } 7 | if [destination_ip] == "255.255.255.255" { 8 | mutate { 9 | add_tag => [ "broadcast" ] 10 | } 11 | } 12 | if [destination_ip] and "multicast" not in [tags] and "broadcast" not in [tags] { 13 | if [destination_ip] =~ "^10\." or [destination_ip] =~ "192\.168\." or [destination_ip] =~ "172\.(1[6-9]|2[0-9]|3[0-1])\." { 14 | mutate { 15 | add_tag => [ "internal_destination" ] 16 | } 17 | } else { 18 | mutate { 19 | add_tag => [ "external_destination" ] 20 | } 21 | } 22 | if "internal_destination" not in [tags] { 23 | if [destination_ip] == "198.41.0.4" or [destination_ip] == "192.228.79.201" or [destination_ip] == "192.33.4.12" or [destination_ip] == "199.7.91.13" or [destination_ip] == "192.203.230.10" or [destination_ip] == "192.5.5.241" or [destination_ip] == "192.112.36.4" or [destination_ip] == "198.97.190.53" or [destination_ip] == "192.36.148.17" or [destination_ip] == "192.58.128.30" or [destination_ip] == "193.0.14.129" or [destination_ip] == "199.7.83.42" or [destination_ip] == "202.12.27.33" { 24 | mutate { 25 | add_tag => [ "root_dns_server" ] 26 | } 27 | } 28 | } 29 | } 30 | if [source_ip] { 31 | if [source_ip] =~ "^10\." or [source_ip] =~ "192\.168\." or [source_ip] =~ "172\.(1[6-9]|2[0-9]|3[0-1])\." { 32 | mutate { 33 | add_tag => [ "internal_source" ] 34 | } 35 | } else { 36 | mutate { 37 | add_tag => [ "external_source" ] 38 | } 39 | } 40 | if "internal_source" not in [tags] { 41 | if [source_ip] == "198.41.0.4" or [source_ip] == "192.228.79.201" or [source_ip] == "192.33.4.12" or [source_ip] == "199.7.91.13" or [source_ip] == "192.203.230.10" or [source_ip] == "192.5.5.241" or [source_ip] == "192.112.36.4" or [source_ip] == "198.97.190.53" or [source_ip] == "192.36.148.17" or [source_ip] == "192.58.128.30" or [source_ip] == "193.0.14.129" or [source_ip] == "199.7.83.42" or [source_ip] == "202.12.27.33" { 42 | mutate { 43 | add_tag => [ "root_dns_server" ] 44 | } 45 | } 46 | } 47 | if "internal_source" in [tags] and "internal_destination" in [tags] { 48 | mutate { add_tag => [ "internal_only" ] } 49 | } 50 | # This section is for system tagging 51 | if [destination_port] == 53 and "internal_destination" in [tags] { 52 | mutate { add_tag => [ "dns_server" ] } 53 | } 54 | if [destination_port] == 21 and "internal_destination" in [tags] { 55 | mutate { add_tag => [ "ftp_server" ] } 56 | } 57 | if [destination_port] == 22 and "internal_destination" in [tags] { 58 | mutate { add_tag => [ "ssh_server" ] } 59 | } 60 | if [destination_port] == 139 and "internal_destination" in [tags] { 61 | mutate { add_tag => [ "smb_server", "old_smb_use" ] } 62 | } 63 | if [destination_port] == 445 and "internal_destination" in [tags] { 64 | mutate { add_tag => [ "smb_server" ] } 65 | } 66 | if [destination_port] == 389 and "internal_destination" in [tags] { 67 | mutate { add_tag => [ "ldap_server" ] } 68 | } 69 | if [destination_port] == 636 and "internal_destination" in [tags] { 70 | mutate { add_tag => [ "ldaps_server" ] } 71 | } 72 | if [destination_port] == 80 and "internal_destination" in [tags] { 73 | mutate { add_tag => [ "web_server", "http_server" ] } 74 | } 75 | if [destination_port] == 443 and "internal_destination" in [tags] { 76 | mutate { add_tag => [ "web_server", "https_server" ] } 77 | } 78 | if [destination_port] == 1433 and "internal_destination" in [tags] { 79 | mutate { add_tag => [ "mssql_server" ] } 80 | } 81 | if [destination_port] == 3389 and "internal_destination" in [tags] { 82 | mutate { add_tag => [ "mysql_server" ] } 83 | } 84 | if [destination_port] == 1521 and "internal_destination" in [tags] { 85 | mutate { add_tag => [ "oracle_server" ] } 86 | } 87 | if [destination_port] == 123 and "internal_destination" in [tags] { 88 | mutate { add_tag => [ "ntp_server" ] } 89 | } 90 | } 91 | } -------------------------------------------------------------------------------- /logstash/logstash_configs/windows/8999_postprocess_windows_filter.conf: -------------------------------------------------------------------------------- 1 | filter { 2 | if [event_id] == 5157 { 3 | if "multicast" in [tags] or "broadcast" in [tags] or "possible_broadcast" in [tags] { 4 | drop { } 5 | } 6 | } 7 | # Noisy events 8 | #if [event_id] == 5156 or [event_id] == 5158 or [event_id] == 5154 or [event_id] == 5152 or [event_id] == 4672 or [event_id] == 12 or [event_id] == 4673 or [event_id] == 4690 or [event_id] == 4658 or [event_id] == 7036 or [event_id] == 36874 or [event_id] == 4627 or [event_id] == 4985 { 9 | # drop { } 10 | #} 11 | #if [source_name] == "Group Policy Services" { drop { } } 12 | #if [event_data][SubjectUserName] == "jhenderson" { drop { } } 13 | #if [user] == "LABMEINC\jhenderson" or [user] == "jhenderson" { 14 | # drop { } 15 | #} 16 | if [event_id] == 4624 and "machine" in [tags] { drop { } } 17 | if [event_id] == 4634 and "machine" in [tags] { drop { } } 18 | # Need to update to include proper parent image 19 | #if [event_id] == 1 and [image] == "C:\Windows\System32\svchost.exe" { drop {} } 20 | } 21 | -------------------------------------------------------------------------------- /logstash/logstash_configs/windows/9000_output.conf: -------------------------------------------------------------------------------- 1 | output { 2 | elasticsearch { 3 | index => "winlogbeat-%{+YYYY.MM.dd}" 4 | hosts => "${ELASTICSEARCH_HOST}" 5 | } 6 | } -------------------------------------------------------------------------------- /logstash/monitor_pipeline.yml: -------------------------------------------------------------------------------- 1 | - pipeline.id: monitoring 2 | path.config: "/opt/elastic_stack/logstash/logstash_configs/monitoring" 3 | -------------------------------------------------------------------------------- /logstash/persistent_data/readme.txt: -------------------------------------------------------------------------------- 1 | empty file -------------------------------------------------------------------------------- /logstash/pipelines.yml.example: -------------------------------------------------------------------------------- 1 | - pipeline.id: windows 2 | path.config: "/opt/logstash_configs/windows" 3 | - pipeline.id: security_onion-json 4 | path.config: "/opt/logstash_configs/security_onion-json" 5 | - pipeline.id: firewall 6 | path.config: "/opt/logstash_configs/firewall" 7 | #- pipeline.id: bro-tsv 8 | # path.config: "/opt/logstash_configs/bro-json" 9 | -------------------------------------------------------------------------------- /logstash/rules/app-layer-events.rules: -------------------------------------------------------------------------------- 1 | # App layer event rules 2 | # 3 | # SID's fall in the 2260000+ range. See http://doc.emergingthreats.net/bin/view/Main/SidAllocation 4 | # 5 | # These sigs fire at most once per connection. 6 | # 7 | # A flowint applayer.anomaly.count is incremented for each match. By default it will be 0. 8 | # 9 | alert ip any any -> any any (msg:"SURICATA Applayer Mismatch protocol both directions"; flow:established; app-layer-event:applayer_mismatch_protocol_both_directions; flowint:applayer.anomaly.count,+,1; classtype:protocol-command-decode; sid:2260000; rev:1;) 10 | alert ip any any -> any any (msg:"SURICATA Applayer Wrong direction first Data"; flow:established; app-layer-event:applayer_wrong_direction_first_data; flowint:applayer.anomaly.count,+,1; classtype:protocol-command-decode; sid:2260001; rev:1;) 11 | alert ip any any -> any any (msg:"SURICATA Applayer Detect protocol only one direction"; flow:established; app-layer-event:applayer_detect_protocol_only_one_direction; flowint:applayer.anomaly.count,+,1; classtype:protocol-command-decode; sid:2260002; rev:1;) 12 | alert ip any any -> any any (msg:"SURICATA Applayer Protocol detection skipped"; flow:established; app-layer-event:applayer_proto_detection_skipped; flowint:applayer.anomaly.count,+,1; classtype:protocol-command-decode; sid:2260003; rev:1;) 13 | # alert if STARTTLS was not followed by actual SSL/TLS 14 | alert tcp any any -> any any (msg:"SURICATA Applayer No TLS after STARTTLS"; flow:established; app-layer-event:applayer_no_tls_after_starttls; flowint:applayer.anomaly.count,+,1; classtype:protocol-command-decode; sid:2260004; rev:2;) 15 | # unexpected protocol in protocol upgrade 16 | alert tcp any any -> any any (msg:"SURICATA Applayer Unexpected protocol"; flow:established; app-layer-event:applayer_unexpected_protocol; flowint:applayer.anomaly.count,+,1; classtype:protocol-command-decode; sid:2260005; rev:1;) 17 | 18 | #next sid is 2260006 19 | -------------------------------------------------------------------------------- /logstash/rules/black_list.rules: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HASecuritySolutions/elastic_stack/cf0a86470a3ffb6b1df6470120cedc939ab7d46d/logstash/rules/black_list.rules -------------------------------------------------------------------------------- /logstash/rules/dnp3-events.rules: -------------------------------------------------------------------------------- 1 | # DNP3 application decoder event rules. 2 | # 3 | # This SIDs fall in the 2270000+ range. See: 4 | # http://doc.emergingthreats.net/bin/view/Main/SidAllocation 5 | 6 | # Flooded. 7 | alert dnp3 any any -> any any (msg:"SURICATA DNP3 Request flood detected"; \ 8 | app-layer-event:dnp3.flooded; classtype:protocol-command-decode; sid:2270000; rev:2;) 9 | 10 | # Length to small for PDU type. For example, link specifies the type 11 | # as user data, but the length field is not large enough for user 12 | # data. 13 | alert dnp3 any any -> any any (msg:"SURICATA DNP3 Length too small"; \ 14 | app-layer-event:dnp3.len_too_small; classtype:protocol-command-decode; sid:2270001; rev:3;) 15 | 16 | # Bad link layer CRC. 17 | alert dnp3 any any -> any any (msg:"SURICATA DNP3 Bad link CRC"; \ 18 | app-layer-event:dnp3.bad_link_crc; classtype:protocol-command-decode; sid:2270002; rev:2;) 19 | 20 | # Bad transport layer CRC. 21 | alert dnp3 any any -> any any (msg:"SURICATA DNP3 Bad transport CRC"; \ 22 | app-layer-event:dnp3.bad_transport_crc; classtype:protocol-command-decode; sid:2270003; rev:2;) 23 | 24 | # Unknown object. 25 | alert dnp3 any any -> any any (msg:"SURICATA DNP3 Unknown object"; \ 26 | app-layer-event:dnp3.unknown_object; classtype:protocol-command-decode; sid:2270004; rev:2;) 27 | -------------------------------------------------------------------------------- /logstash/rules/dns-events.rules: -------------------------------------------------------------------------------- 1 | # Response (answer) we didn't see a Request for. Could be packet loss. 2 | alert dns any any -> any any (msg:"SURICATA DNS Unsolicited response"; flow:to_client; app-layer-event:dns.unsollicited_response; classtype:protocol-command-decode; sid:2240001; rev:2;) 3 | # Malformed data in request. Malformed means length fields are wrong, etc. 4 | alert dns any any -> any any (msg:"SURICATA DNS malformed request data"; flow:to_server; app-layer-event:dns.malformed_data; classtype:protocol-command-decode; sid:2240002; rev:2;) 5 | alert dns any any -> any any (msg:"SURICATA DNS malformed response data"; flow:to_client; app-layer-event:dns.malformed_data; classtype:protocol-command-decode; sid:2240003; rev:2;) 6 | # Response flag set on to_server packet 7 | alert dns any any -> any any (msg:"SURICATA DNS Not a request"; flow:to_server; app-layer-event:dns.not_a_request; classtype:protocol-command-decode; sid:2240004; rev:2;) 8 | # Response flag not set on to_client packet 9 | alert dns any any -> any any (msg:"SURICATA DNS Not a response"; flow:to_client; app-layer-event:dns.not_a_response; classtype:protocol-command-decode; sid:2240005; rev:2;) 10 | # Z flag (reserved) not 0 11 | alert dns any any -> any any (msg:"SURICATA DNS Z flag set"; app-layer-event:dns.z_flag_set; classtype:protocol-command-decode; sid:2240006; rev:2;) 12 | # Request Flood Detected 13 | alert dns any any -> any any (msg:"SURICATA DNS request flood detected"; flow:to_server; app-layer-event:dns.flooded; classtype:protocol-command-decode; sid:2240007; rev:2;) 14 | # Per-flow (state) memcap reached. Relates to the app-layer.protocols.dns.state-memcap setting. 15 | alert dns any any -> any any (msg:"SURICATA DNS flow memcap reached"; flow:to_server; app-layer-event:dns.state_memcap_reached; classtype:protocol-command-decode; sid:2240008; rev:3;) 16 | -------------------------------------------------------------------------------- /logstash/rules/files.rules: -------------------------------------------------------------------------------- 1 | # Example rules for using the file handling and extraction functionality in Suricata. 2 | # 3 | # For storing files make sure you enable the "file" output. 4 | # Also, make sure you read the comments that go with it in the suricata.yaml file. 5 | 6 | # Alert on files with jpg or bmp extensions 7 | #alert http any any -> any any (msg:"FILEEXT JPG file claimed"; fileext:"jpg"; sid:1; rev:1;) 8 | #alert http any any -> any any (msg:"FILEEXT BMP file claimed"; fileext:"bmp"; sid:3; rev:1;) 9 | 10 | # Store all files with jpg or pdf extension. 11 | #alert http any any -> any any (msg:"FILESTORE jpg"; flow:established,to_server; fileext:"jpg"; filestore; sid:6; rev:1;) 12 | #alert http any any -> any any (msg:"FILESTORE pdf"; flow:established,to_server; fileext:"pdf"; filestore; sid:8; rev:1;) 13 | 14 | # Store all PDF files, regardless of their name. 15 | #alert http any any -> any any (msg:"FILEMAGIC pdf"; flow:established,to_server; filemagic:"PDF document"; filestore; sid:9; rev:1;) 16 | 17 | # Same for JPEG's. 18 | #alert http any any -> any any (msg:"FILEMAGIC jpg(1)"; flow:established,to_server; filemagic:"JPEG image data"; filestore; sid:10; rev:1;) 19 | #alert http any any -> any any (msg:"FILEMAGIC jpg(2)"; flow:established,to_server; filemagic:"JFIF"; filestore; sid:11; rev:1;) 20 | 21 | # Unually short file 22 | #alert http any any -> any any (msg:"FILEMAGIC short"; flow:established,to_server; filemagic:"very short file (no magic)"; filestore; sid:12; rev:1;) 23 | 24 | # Simply store all files we encounter, no alerts. 25 | #alert http any any -> any any (msg:"FILE store all"; filestore; noalert; sid:15; rev:1;) 26 | 27 | # Store all JPG files, don't alert. 28 | #alert http any any -> any any (msg:"FILE magic"; filemagic:"JFIF"; filestore; noalert; sid:16; rev:1;) 29 | #alert http any any -> any any (msg:"FILE magic"; filemagic:"GIF"; filestore; noalert; sid:23; rev:1;) 30 | #alert http any any -> any any (msg:"FILE magic"; filemagic:"PNG"; filestore; noalert; sid:17; rev:1;) 31 | 32 | # Store all Windows executables 33 | #alert http any any -> any any (msg:"FILE magic -- windows"; flow:established,to_client; filemagic:"executable for MS Windows"; filestore; sid:18; rev:1;) 34 | 35 | # Alert on PNG with 1x1 pixels (tracking) 36 | #alert http any any -> any any (msg:"FILE tracking PNG (1x1 pixel) (1)"; filemagic:"PNG image data, 1 x 1,"; sid:19; rev:1;) 37 | #alert http any any -> any any (msg:"FILE tracking PNG (1x1 pixel) (2)"; filemagic:"PNG image data, 1 x 1|00|"; sid:20; rev:1;) 38 | 39 | # Alert on GIT with 1x1 pixels (tracking) 40 | # The pattern matches on |00| which is the end of the magic buffer, this way we won't match on 1 x 128. 41 | #alert http any any -> any any (msg:"FILE tracking GIF (1x1 pixel)"; filemagic:"GIF image data, version 89a, 1 x 1|00|"; sid:21; rev:1;) 42 | 43 | # Alert and store pdf attachment but not pdf file 44 | #alert http any any -> any any (msg:"FILE pdf claimed, but not pdf"; flow:established,to_client; fileext:"pdf"; filemagic:!"PDF document"; filestore; sid:22; rev:1;) 45 | 46 | # Alert and store files over SMTP 47 | #alert smtp any any -> any any (msg:"File Found over SMTP and stored"; filestore; sid:27; rev:1;) 48 | -------------------------------------------------------------------------------- /logstash/rules/http-events.rules: -------------------------------------------------------------------------------- 1 | # HTTP event rules 2 | # 3 | # SID's fall in the 2221000+ range. See http://doc.emergingthreats.net/bin/view/Main/SidAllocation 4 | # 5 | # These sigs fire at most once per HTTP transaction. 6 | # 7 | # A flowint http.anomaly.count is incremented for each match. By default it will be 0. 8 | # 9 | alert http any any -> any any (msg:"SURICATA HTTP unknown error"; flow:established; app-layer-event:http.unknown_error; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221000; rev:1;) 10 | alert http any any -> any any (msg:"SURICATA HTTP gzip decompression failed"; flow:established; app-layer-event:http.gzip_decompression_failed; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221001; rev:1;) 11 | alert http any any -> any any (msg:"SURICATA HTTP request field missing colon"; flow:established,to_server; app-layer-event:http.request_field_missing_colon; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221002; rev:1;) 12 | alert http any any -> any any (msg:"SURICATA HTTP response field missing colon"; flow:established,to_client; app-layer-event:http.response_field_missing_colon; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221020; rev:1;) 13 | alert http any any -> any any (msg:"SURICATA HTTP invalid request chunk len"; flow:established,to_server; app-layer-event:http.invalid_request_chunk_len; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221003; rev:1;) 14 | alert http any any -> any any (msg:"SURICATA HTTP invalid response chunk len"; flow:established,to_client; app-layer-event:http.invalid_response_chunk_len; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221004; rev:1;) 15 | alert http any any -> any any (msg:"SURICATA HTTP invalid transfer encoding value in request"; flow:established,to_server; app-layer-event:http.invalid_transfer_encoding_value_in_request; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221005; rev:1;) 16 | alert http any any -> any any (msg:"SURICATA HTTP invalid transfer encoding value in response"; flow:established,to_client; app-layer-event:http.invalid_transfer_encoding_value_in_response; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221006; rev:1;) 17 | alert http any any -> any any (msg:"SURICATA HTTP invalid content length field in request"; flow:established,to_server; app-layer-event:http.invalid_content_length_field_in_request; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221007; rev:1;) 18 | alert http any any -> any any (msg:"SURICATA HTTP invalid content length field in response"; flow:established,to_client; app-layer-event:http.invalid_content_length_field_in_response; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221008; rev:1;) 19 | alert http any any -> any any (msg:"SURICATA HTTP status 100-Continue already seen"; flow:established,to_client; app-layer-event:http.100_continue_already_seen; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221009; rev:1;) 20 | alert http any any -> any any (msg:"SURICATA HTTP unable to match response to request"; flow:established,to_client; app-layer-event:http.unable_to_match_response_to_request; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221010; rev:1;) 21 | alert http any any -> any any (msg:"SURICATA HTTP invalid server port in request"; flow:established,to_server; app-layer-event:http.invalid_server_port_in_request; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221011; rev:1;) 22 | alert http any any -> any any (msg:"SURICATA HTTP invalid authority port"; flow:established; app-layer-event:http.invalid_authority_port; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221012; rev:1;) 23 | alert http any any -> any any (msg:"SURICATA HTTP request header invalid"; flow:established,to_server; app-layer-event:http.request_header_invalid; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221013; rev:1;) 24 | alert http any any -> any any (msg:"SURICATA HTTP response header invalid"; flow:established,to_client; app-layer-event:http.response_header_invalid; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221021; rev:1;) 25 | alert http any any -> any any (msg:"SURICATA HTTP missing Host header"; flow:established,to_server; app-layer-event:http.missing_host_header; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221014; rev:1;) 26 | # Alert if hostname is both part of URL and Host header and they are not the same. 27 | alert http any any -> any any (msg:"SURICATA HTTP Host header ambiguous"; flow:established,to_server; app-layer-event:http.host_header_ambiguous; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221015; rev:1;) 28 | alert http any any -> any any (msg:"SURICATA HTTP invalid request field folding"; flow:established,to_server; app-layer-event:http.invalid_request_field_folding; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221016; rev:1;) 29 | alert http any any -> any any (msg:"SURICATA HTTP invalid response field folding"; flow:established,to_client; app-layer-event:http.invalid_response_field_folding; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221017; rev:1;) 30 | alert http any any -> any any (msg:"SURICATA HTTP request buffer too long"; flow:established,to_server; app-layer-event:http.request_field_too_long; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221018; rev:1;) 31 | alert http any any -> any any (msg:"SURICATA HTTP response buffer too long"; flow:established,to_client; app-layer-event:http.response_field_too_long; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221019; rev:1;) 32 | # Multipart parser detected generic error. 33 | alert http any any -> any any (msg:"SURICATA HTTP multipart generic error"; flow:established,to_server; app-layer-event:http.multipart_generic_error; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221022; rev:1;) 34 | # Multipart header claiming a file to present, but no actual filedata available. 35 | alert http any any -> any any (msg:"SURICATA HTTP multipart no filedata"; flow:established,to_server; app-layer-event:http.multipart_no_filedata; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221023; rev:1;) 36 | # Multipart header invalid. 37 | alert http any any -> any any (msg:"SURICATA HTTP multipart invalid header"; flow:established,to_server; app-layer-event:http.multipart_invalid_header; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221024; rev:1;) 38 | # Warn when the port in the Host: header doesn't match the actual TCP Server port. 39 | alert http any any -> any any (msg:"SURICATA HTTP request server port doesn't match TCP port"; flow:established,to_server; app-layer-event:http.request_server_port_tcp_port_mismatch; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221026; rev:1;) 40 | # Host part of URI is invalid 41 | alert http any any -> any any (msg:"SURICATA HTTP Host part of URI is invalid"; flow:established,to_server; app-layer-event:http.request_uri_host_invalid; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221027; rev:1;) 42 | # Host header is invalid 43 | alert http any any -> any any (msg:"SURICATA HTTP Host header invalid"; flow:established,to_server; app-layer-event:http.request_header_host_invalid; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221028; rev:1;) 44 | # URI is terminated by non-compliant characters. RFC allows for space (0x20), but many implementations permit others like tab and more. 45 | alert http any any -> any any (msg:"SURICATA HTTP URI terminated by non-compliant character"; flow:established,to_server; app-layer-event:http.uri_delim_non_compliant; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221029; rev:1;) 46 | # Method is terminated by non-compliant characters. RFC allows for space (0x20), but many implementations permit others like tab and more. 47 | alert http any any -> any any (msg:"SURICATA HTTP METHOD terminated by non-compliant character"; flow:established,to_server; app-layer-event:http.method_delim_non_compliant; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221030; rev:1;) 48 | # Request line started with whitespace 49 | alert http any any -> any any (msg:"SURICATA HTTP Request line with leading whitespace"; flow:established,to_server; app-layer-event:http.request_line_leading_whitespace; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221031; rev:1;) 50 | 51 | # next sid 2221032 52 | 53 | -------------------------------------------------------------------------------- /logstash/rules/local.rules: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HASecuritySolutions/elastic_stack/cf0a86470a3ffb6b1df6470120cedc939ab7d46d/logstash/rules/local.rules -------------------------------------------------------------------------------- /logstash/rules/modbus-events.rules: -------------------------------------------------------------------------------- 1 | # Modbus Protocol version field is incorrect (Modbus version = 0) 2 | alert modbus any any -> any any (msg:"SURICATA Modbus invalid Protocol version"; app-layer-event:modbus.invalid_protocol_id; classtype:protocol-command-decode; sid:2250001; rev:2;) 3 | # Response (answer) we didn't see a Request for. Could be packet loss. 4 | alert modbus any any -> any any (msg:"SURICATA Modbus unsolicited response"; app-layer-event:modbus.unsolicited_response; classtype:protocol-command-decode; sid:2250002; rev:2;) 5 | # Malformed request or response. Malformed means length field is wrong 6 | alert modbus any any -> any any (msg:"SURICATA Modbus invalid Length"; app-layer-event:modbus.invalid_length; classtype:protocol-command-decode; sid:2250003; rev:2;) 7 | # Unit identifier field is incorrect 8 | alert modbus any any -> any any (msg:"SURICATA Modbus invalid Unit Identifier"; app-layer-event:modbus.invalid_unit_identifier; classtype:protocol-command-decode; sid:2250004; rev:2;) 9 | # Modbus Function code is incorrect 10 | alert modbus any any -> any any (msg:"SURICATA Modbus invalid Function code"; app-layer-event:modbus.invalid_function_code; classtype:protocol-command-decode; sid:2250005; rev:2;) 11 | # Modbus Request/Response value field is incorrect 12 | alert modbus any any -> any any (msg:"SURICATA Modbus invalid Value"; app-layer-event:modbus.invalid_value; classtype:protocol-command-decode; sid:2250006; rev:2;) 13 | # Modbus Expception code is incorrect 14 | alert modbus any any -> any any (msg:"SURICATA Modbus Exception code invalid"; flow:to_client; app-layer-event:modbus.invalid_exception_code; classtype:protocol-command-decode; sid:2250007; rev:2;) 15 | # Value field in Modbus Response does not match with Modbus Request 16 | alert modbus any any -> any any (msg:"SURICATA Modbus Data mismatch"; flow:to_client; app-layer-event:modbus.value_mismatch; classtype:protocol-command-decode; sid:2250008; rev:2;) 17 | # Request Flood Detected 18 | alert modbus any any -> any any (msg:"SURICATA Modbus Request flood detected"; flow:to_server; app-layer-event:modbus.flooded; classtype:protocol-command-decode; sid:2250009; rev:2;) 19 | -------------------------------------------------------------------------------- /logstash/rules/nfs-events.rules: -------------------------------------------------------------------------------- 1 | # NFS app layer event rules 2 | # 3 | # SID's fall in the 2223000+ range. See https://redmine.openinfosecfoundation.org/projects/suricata/wiki/AppLayer 4 | # 5 | # These sigs fire at most once per connection. 6 | # 7 | alert nfs any any -> any any (msg:"SURICATA NFS malformed request data"; flow:to_server; app-layer-event:nfs.malformed_data; classtype:protocol-command-decode; sid:2223000; rev:1;) 8 | alert nfs any any -> any any (msg:"SURICATA NFS malformed response data"; flow:to_client; app-layer-event:nfs.malformed_data; classtype:protocol-command-decode; sid:2223001; rev:1;) 9 | -------------------------------------------------------------------------------- /logstash/rules/ntp-events.rules: -------------------------------------------------------------------------------- 1 | # NTP app layer event rules 2 | # 3 | # SID's fall in the 2222000+ range. See https://redmine.openinfosecfoundation.org/projects/suricata/wiki/AppLayer 4 | # 5 | # These sigs fire at most once per connection. 6 | # 7 | alert ntp any any -> any any (msg:"SURICATA NTP malformed request data"; flow:to_server; app-layer-event:ntp.malformed_data; classtype:protocol-command-decode; sid:2222000; rev:1;) 8 | alert ntp any any -> any any (msg:"SURICATA NTP malformed response data"; flow:to_client; app-layer-event:ntp.malformed_data; classtype:protocol-command-decode; sid:2222001; rev:1;) 9 | -------------------------------------------------------------------------------- /logstash/rules/smtp-events.rules: -------------------------------------------------------------------------------- 1 | # SMTP event rules 2 | # 3 | # SID's fall in the 2220000+ range. See http://doc.emergingthreats.net/bin/view/Main/SidAllocation 4 | # 5 | # These sigs fire at most once per connection. 6 | # 7 | # A flowint smtp.anomaly.count is incremented for each match. By default it will be 0. 8 | # 9 | alert smtp any any -> any any (msg:"SURICATA SMTP invalid reply"; flow:established,to_client; app-layer-event:smtp.invalid_reply; flowint:smtp.anomaly.count,+,1; classtype:protocol-command-decode; sid:2220000; rev:1;) 10 | alert smtp any any -> any any (msg:"SURICATA SMTP unable to match reply with request"; flow:established,to_client; app-layer-event:smtp.unable_to_match_reply_with_request; flowint:smtp.anomaly.count,+,1; classtype:protocol-command-decode; sid:2220001; rev:1;) 11 | alert smtp any any -> any any (msg:"SURICATA SMTP max command line len exceeded"; flow:established; app-layer-event:smtp.max_command_line_len_exceeded; flowint:smtp.anomaly.count,+,1; classtype:protocol-command-decode; sid:2220002; rev:1;) 12 | alert smtp any any -> any any (msg:"SURICATA SMTP max reply line len exceeded"; flow:established,to_client; app-layer-event:smtp.max_reply_line_len_exceeded; flowint:smtp.anomaly.count,+,1; classtype:protocol-command-decode; sid:2220003; rev:1;) 13 | alert smtp any any -> any any (msg:"SURICATA SMTP invalid pipelined sequence"; flow:established,to_server; app-layer-event:smtp.invalid_pipelined_sequence; flowint:smtp.anomaly.count,+,1; classtype:protocol-command-decode; sid:2220004; rev:1;) 14 | alert smtp any any -> any any (msg:"SURICATA SMTP bdat chunk len exceeded"; flow:established; app-layer-event:smtp.bdat_chunk_len_exceeded; flowint:smtp.anomaly.count,+,1; classtype:protocol-command-decode; sid:2220005; rev:1;) 15 | alert smtp any any -> any any (msg:"SURICATA SMTP no server welcome message"; flow:established,to_client; app-layer-event:smtp.no_server_welcome_message; flowint:smtp.anomaly.count,+,1; classtype:protocol-command-decode; sid:2220006; rev:1;) 16 | alert smtp any any -> any any (msg:"SURICATA SMTP tls rejected"; flow:established; app-layer-event:smtp.tls_rejected; flowint:smtp.anomaly.count,+,1; classtype:protocol-command-decode; sid:2220007; rev:1;) 17 | alert smtp any any -> any any (msg:"SURICATA SMTP data command rejected"; flow:established,to_client; app-layer-event:smtp.data_command_rejected; flowint:smtp.anomaly.count,+,1; classtype:protocol-command-decode; sid:2220008; rev:1;) 18 | 19 | # SMTP MIME events 20 | #alert smtp any any -> any any (msg:"SURICATA SMTP Mime parser failed"; flow:established; app-layer-event:smtp.mime_parse_failed; flowint:smtp.anomaly.count,+,1; classtype:protocol-command-decode; sid:2220009; rev:1;) 21 | #alert smtp any any -> any any (msg:"SURICATA SMTP Mime malformed message found"; flow:established; app-layer-event:smtp.mime_malformed_msg; flowint:smtp.anomaly.count,+,1; classtype:protocol-command-decode; sid:2220010; rev:1;) 22 | #alert smtp any any -> any any (msg:"SURICATA SMTP Mime base64-decoding failed"; flow:established; app-layer-event:smtp.mime_invalid_base64; flowint:smtp.anomaly.count,+,1; classtype:protocol-command-decode; sid:2220011; rev:1;) 23 | #alert smtp any any -> any any (msg:"SURICATA SMTP Mime header name len exceeded"; flow:established; app-layer-event:smtp.mime_long_header_name; flowint:smtp.anomaly.count,+,1; classtype:protocol-command-decode; sid:2220012; rev:1;) 24 | #alert smtp any any -> any any (msg:"SURICATA SMTP Mime header value len exceeded"; flow:established; app-layer-event:smtp.mime_long_header_value; flowint:smtp.anomaly.count,+,1; classtype:protocol-command-decode; sid:2220013; rev:1;) 25 | #alert smtp any any -> any any (msg:"SURICATA SMTP Mime quoted-printable-decoding failed"; flow:established; app-layer-event:smtp.mime_invalid_qp; flowint:smtp.anomaly.count,+,1; classtype:protocol-command-decode; sid:2220014; rev:1;) 26 | #alert smtp any any -> any any (msg:"SURICATA SMTP Mime line len exceeded"; flow:established; app-layer-event:smtp.mime_long_line; flowint:smtp.anomaly.count,+,1; classtype:protocol-command-decode; sid:2220015; rev:1;) 27 | #alert smtp any any -> any any (msg:"SURICATA SMTP Mime encoded line len exceeded"; flow:established; app-layer-event:smtp.mime_long_enc_line; flowint:smtp.anomaly.count,+,1; classtype:protocol-command-decode; sid:2220016; rev:1;) 28 | alert smtp any any -> any any (msg:"SURICATA SMTP Mime boundary length exceeded"; flow:established,to_server; app-layer-event:smtp.mime_long_boundary; flowint:smtp.anomaly.count,+,1; classtype:protocol-command-decode; sid:2220017; rev:1;) 29 | 30 | alert smtp any any -> any any (msg:"SURICATA SMTP duplicate fields"; flow:established,to_server; app-layer-event:smtp.duplicate_fields; flowint:smtp.anomaly.count,+,1; classtype:protocol-command-decode; sid:2220018; rev:1;) 31 | alert smtp any any -> any any (msg:"SURICATA SMTP unparsable content"; flow:established,to_server; app-layer-event:smtp.unparsable_content; flowint:smtp.anomaly.count,+,1; classtype:protocol-command-decode; sid:2220019; rev:1;) 32 | # next sid 2220020 33 | -------------------------------------------------------------------------------- /logstash/rules/so_rules.rules: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HASecuritySolutions/elastic_stack/cf0a86470a3ffb6b1df6470120cedc939ab7d46d/logstash/rules/so_rules.rules -------------------------------------------------------------------------------- /logstash/rules/tls-events.rules: -------------------------------------------------------------------------------- 1 | # TLS event rules 2 | # 3 | # SID's fall in the 2230000+ range. See http://doc.emergingthreats.net/bin/view/Main/SidAllocation 4 | # 5 | # These sigs fire at most once per connection. 6 | # 7 | # A flowint tls.anomaly.count is incremented for each match. By default it will be 0. 8 | # 9 | alert tls any any -> any any (msg:"SURICATA TLS invalid SSLv2 header"; flow:established; app-layer-event:tls.invalid_sslv2_header; flowint:tls.anomaly.count,+,1; classtype:protocol-command-decode; sid:2230000; rev:1;) 10 | alert tls any any -> any any (msg:"SURICATA TLS invalid TLS header"; flow:established; app-layer-event:tls.invalid_tls_header; flowint:tls.anomaly.count,+,1; classtype:protocol-command-decode; sid:2230001; rev:1;) 11 | alert tls any any -> any any (msg:"SURICATA TLS invalid record version"; flow:established; app-layer-event:tls.invalid_record_version; flowint:tls.anomaly.count,+,1; classtype:protocol-command-decode; sid:2230015; rev:1;) 12 | alert tls any any -> any any (msg:"SURICATA TLS invalid record type"; flow:established; app-layer-event:tls.invalid_record_type; flowint:tls.anomaly.count,+,1; classtype:protocol-command-decode; sid:2230002; rev:1;) 13 | alert tls any any -> any any (msg:"SURICATA TLS invalid handshake message"; flow:established; app-layer-event:tls.invalid_handshake_message; flowint:tls.anomaly.count,+,1; classtype:protocol-command-decode; sid:2230003; rev:1;) 14 | alert tls any any -> any any (msg:"SURICATA TLS invalid certificate"; flow:established; app-layer-event:tls.invalid_certificate; flowint:tls.anomaly.count,+,1; classtype:protocol-command-decode; sid:2230004; rev:1;) 15 | alert tls any any -> any any (msg:"SURICATA TLS certificate missing element"; flow:established; app-layer-event:tls.certificate_missing_element; flowint:tls.anomaly.count,+,1; classtype:protocol-command-decode; sid:2230005; rev:1;) 16 | alert tls any any -> any any (msg:"SURICATA TLS certificate unknown element"; flow:established; app-layer-event:tls.certificate_unknown_element; flowint:tls.anomaly.count,+,1; classtype:protocol-command-decode; sid:2230006; rev:1;) 17 | alert tls any any -> any any (msg:"SURICATA TLS certificate invalid length"; flow:established; app-layer-event:tls.certificate_invalid_length; flowint:tls.anomaly.count,+,1; classtype:protocol-command-decode; sid:2230007; rev:1;) 18 | alert tls any any -> any any (msg:"SURICATA TLS certificate invalid string"; flow:established; app-layer-event:tls.certificate_invalid_string; flowint:tls.anomaly.count,+,1; classtype:protocol-command-decode; sid:2230008; rev:1;) 19 | alert tls any any -> any any (msg:"SURICATA TLS error message encountered"; flow:established; app-layer-event:tls.error_message_encountered; flowint:tls.anomaly.count,+,1; classtype:protocol-command-decode; sid:2230009; rev:1;) 20 | alert tls any any -> any any (msg:"SURICATA TLS invalid record/traffic"; flow:established; app-layer-event:tls.invalid_ssl_record; flowint:tls.anomaly.count,+,1; classtype:protocol-command-decode; sid:2230010; rev:1;) 21 | alert tls any any -> any any (msg:"SURICATA TLS heartbeat encountered"; flow:established; app-layer-event:tls.heartbeat_message; flowint:tls.anomaly.count,+,1; classtype:protocol-command-decode; sid:2230011; rev:1;) 22 | alert tls any any -> any any (msg:"SURICATA TLS overflow heartbeat encountered, possible exploit attempt (heartbleed)"; flow:established; app-layer-event:tls.overflow_heartbeat_message; flowint:tls.anomaly.count,+,1; classtype:protocol-command-decode; reference:cve,2014-0160; sid:2230012; rev:1;) 23 | alert tls any any -> any any (msg:"SURICATA TLS invalid heartbeat encountered, possible exploit attempt (heartbleed)"; flow:established; app-layer-event:tls.invalid_heartbeat_message; flowint:tls.anomaly.count,+,1; classtype:protocol-command-decode; reference:cve,2014-0160; sid:2230013; rev:1;) 24 | alert tls any any -> any any (msg:"SURICATA TLS invalid encrypted heartbeat encountered, possible exploit attempt (heartbleed)"; flow:established; app-layer-event:tls.dataleak_heartbeat_mismatch; flowint:tls.anomaly.count,+,1; classtype:protocol-command-decode; reference:cve,2014-0160; sid:2230014; rev:1;) 25 | alert tls any any -> any any (msg:"SURICATA TLS multiple SNI extensions"; flow:established,to_server; app-layer-event:tls.multiple_sni_extensions; flowint:tls.anomaly.count,+,1; classtype:protocol-command-decode; sid:2230016; rev:1;) 26 | alert tls any any -> any any (msg:"SURICATA TLS invalid SNI type"; flow:established,to_server; app-layer-event:tls.invalid_sni_type; flowint:tls.anomaly.count,+,1; classtype:protocol-command-decode; sid:2230017; rev:1;) 27 | alert tls any any -> any any (msg:"SURICATA TLS invalid SNI length"; flow:established,to_server; app-layer-event:tls.invalid_sni_length; flowint:tls.anomaly.count,+,1; classtype:protocol-command-decode; sid:2230018; rev:1;) 28 | alert tls any any -> any any (msg:"SURICATA TLS handshake invalid length"; flow:established; app-layer-event:tls.handshake_invalid_length; flowint:tls.anomaly.count,+,1; classtype:protocol-command-decode; sid:2230019; rev:1;) 29 | alert tls any any -> any any (msg:"SURICATA TLS too many records in packet"; flow:established; app-layer-event:tls.too_many_records_in_packet; flowint:tls.anomaly.count,+,1; classtype:protocol-command-decode; sid:2230020; rev:1;) 30 | 31 | #next sid is 2230021 32 | -------------------------------------------------------------------------------- /logstash/rules/white_list.rules: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HASecuritySolutions/elastic_stack/cf0a86470a3ffb6b1df6470120cedc939ab7d46d/logstash/rules/white_list.rules -------------------------------------------------------------------------------- /scripts/initialize.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cp -f /opt/elastic_stack/docker-compose.yml.example /opt/elastic_stack/docker-compose.yml 3 | cp -f /opt/elastic_stack/curator/example/* /opt/elastic_stack/curator/ 4 | cp -f /opt/elastic_stack/cron/custom-cron.example /opt/elastic_stack/cron/custom-cron 5 | cp -f /opt/elastic_stack/elastalert/config.yaml.example /opt/elastic_stack/elastalert/config.yaml 6 | if [ ! -d /opt/elastic_stack/elastalert/rules ]; 7 | then 8 | mkdir /opt/elastic_stack/elastalert/rules 9 | fi 10 | mkdir /opt/elastic_stack/elastalert/rules 11 | cp -f /opt/elastic_stack/elastalert/example_rules/* /opt/elastic_stack/elastalert/rules/ 12 | cp -f /opt/elastic_stack/logstash/pipelines.yml.example /opt/elastic_stack/logstash/pipelines.yml 13 | cd /opt/elastic_stack 14 | docker-compose up --no-start 15 | -------------------------------------------------------------------------------- /scripts/prereq.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | VERSION=`cat /etc/lsb-release | grep DISTRIB_CODENAME | cut -d"=" -f2` 3 | SUDOUSER=`logname` 4 | MODEL=`sudo dmidecode -t system | grep "Product Name" | cut -d":" -f2 | sed 's/^ *//g'` 5 | apt update 6 | if [ $(dpkg-query -W -f='${Status}' git 2>/dev/null | grep -c "ok installed") -eq 0 ]; 7 | then 8 | echo "Installing git" 9 | apt install -y git 10 | else 11 | echo "Git is already installed" 12 | fi 13 | if [ $(dpkg-query -W -f='${Status}' curl 2>/dev/null | grep -c "ok installed") -eq 0 ]; 14 | then 15 | echo "Installing curl" 16 | apt install -y curl 17 | else 18 | echo "Curl is already installed" 19 | fi 20 | if [ $(dpkg-query -W -f='${Status}' nfs-common 2>/dev/null | grep -c "ok installed") -eq 0 ]; 21 | then 22 | echo "Installing nfs-common" 23 | apt install -y nfs-common 24 | else 25 | echo "NFS Common is already installed" 26 | fi 27 | 28 | if grep -q 'deb \[arch=amd64\] https://download.docker.com/linux/ubuntu' /etc/apt/sources.list 29 | then 30 | echo "Docker software repository is already installed" 31 | else 32 | echo "Docker software repository is not installed. Installing" 33 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - 34 | sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" 35 | sudo apt-get update 36 | if grep -q 'deb \[arch=amd64\] https://download.docker.com/linux/ubuntu' /etc/apt/sources.list 37 | then 38 | echo "Docker software repository is now installed" 39 | fi 40 | fi 41 | if [ $(dpkg-query -W -f='${Status}' docker-ce 2>/dev/null | grep -c "ok installed") -eq 0 ]; 42 | then 43 | echo "Installing docker" 44 | sudo apt-get install -y docker-ce 45 | sudo sed -i '/LimitCORE=infinity/ a LimitMEMLOCK=infinity' /lib/systemd/system/docker.service 46 | sudo systemctl daemon-reload 47 | sudo systemctl enable docker.service 48 | else 49 | echo "Docker is already installed" 50 | fi 51 | if grep docker /etc/group | grep -q ${SUDOUSER} 52 | then 53 | echo "Current user already member of docker group" 54 | else 55 | echo "Adding current user to docker group" 56 | sudo usermod -aG docker ${SUDOUSER} 57 | fi 58 | if [ -f /usr/local/bin/docker-compose ]; 59 | then 60 | echo "Docker Compose is already installed" 61 | else 62 | echo "Installing Docker Compose" 63 | sudo curl -L https://github.com/docker/compose/releases/download/1.19.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose 64 | sudo chmod +x /usr/local/bin/docker-compose 65 | fi 66 | if grep -q 'vm.max_map_count' /etc/sysctl.conf 67 | then 68 | echo "VM Max Map Count already configured" 69 | else 70 | echo "Setting vm.max_map_count to 262144" 71 | sudo sysctl -w vm.max_map_count=262144 72 | echo "vm.max_map_count=262144" | sudo tee -a /etc/sysctl.conf 73 | fi 74 | if grep -q 'docker - memlock unlimited' /etc/security/limits.conf 75 | then 76 | echo "Security limits already configured" 77 | else 78 | echo "Setting security limits for elasticsearch, root, and docker" 79 | echo "elasticsearch - nofile 65535" | sudo tee -a /etc/security/limits.conf 80 | echo "elasticsearch - memlock unlimited" | sudo tee -a /etc/security/limits.conf 81 | echo "root - memlock unlimited" | sudo tee -a /etc/security/limits.conf 82 | echo "elasticsearch soft memlock unlimited" | sudo tee -a /etc/security/limits.conf 83 | echo "elasticsearch hard memlock unlimited" | sudo tee -a /etc/security/limits.conf 84 | echo "docker - nofile 65535" | sudo tee -a /etc/security/limits.conf 85 | echo "docker - memlock unlimited" | sudo tee -a /etc/security/limits.conf 86 | echo "docker soft memlock unlimited" | sudo tee -a /etc/security/limits.conf 87 | echo "docker hard memlock unlimited" | sudo tee -a /etc/security/limits.conf 88 | fi 89 | if grep -q 'swap' /etc/fstab 90 | then 91 | echo 'Disabling swap' 92 | sudo sed -i '/swap/d' /etc/fstab 93 | else 94 | echo 'Swap has already been disabled' 95 | fi 96 | -------------------------------------------------------------------------------- /winlogbeat_configs/winlogbeat_recommended_default.yml: -------------------------------------------------------------------------------- 1 | ###################### Winlogbeat Configuration Example ########################## 2 | 3 | # This file is an example configuration file highlighting only the most common 4 | # options. The winlogbeat.reference.yml file from the same directory contains all the 5 | # supported options with more comments. You can use it as a reference. 6 | # 7 | # You can find the full configuration reference here: 8 | # https://www.elastic.co/guide/en/beats/winlogbeat/index.html 9 | 10 | #======================= Winlogbeat specific options ========================== 11 | 12 | # event_logs specifies a list of event logs to monitor as well as any 13 | # accompanying options. The YAML data type of event_logs is a list of 14 | # dictionaries. 15 | # 16 | # The supported keys are name (required), tags, fields, fields_under_root, 17 | # forwarded, ignore_older, level, event_id, provider, and include_xml. Please 18 | # visit the documentation for the complete details of each option. 19 | # https://go.es.io/WinlogbeatConfig 20 | winlogbeat.event_logs: 21 | - name: Application 22 | event_id: -1001, -7036, -916 23 | - name: Security 24 | event_id: -4664, -4656, -4627 25 | - name: System 26 | event_id: -36874 27 | - name: Microsoft-Windows-PowerShell/Operational 28 | - name: Microsoft-Windows-Sysmon/Operational 29 | - name: Microsoft-Windows-DriverFrameworks-UserMode/Operational 30 | event_id: 2003, 2102 31 | 32 | 33 | #==================== Elasticsearch template setting ========================== 34 | 35 | #setup.template.settings: 36 | #index.number_of_shards: 3 37 | #index.codec: best_compression 38 | #_source.enabled: false 39 | 40 | #================================ General ===================================== 41 | 42 | # The name of the shipper that publishes the network data. It can be used to group 43 | # all the transactions sent by a single shipper in the web interface. 44 | #name: 45 | 46 | # The tags of the shipper are included in their own field with each 47 | # transaction published. 48 | #tags: ["service-X", "web-tier"] 49 | 50 | # Optional fields that you can specify to add additional information to the 51 | # output. 52 | #fields: 53 | # env: staging 54 | 55 | 56 | #============================== Dashboards ===================================== 57 | # These settings control loading the sample dashboards to the Kibana index. Loading 58 | # the dashboards is disabled by default and can be enabled either by setting the 59 | # options here, or by using the `-setup` CLI flag or the `setup` command. 60 | #setup.dashboards.enabled: false 61 | 62 | # The URL from where to download the dashboards archive. By default this URL 63 | # has a value which is computed based on the Beat name and version. For released 64 | # versions, this URL points to the dashboard archive on the artifacts.elastic.co 65 | # website. 66 | #setup.dashboards.url: 67 | 68 | #============================== Kibana ===================================== 69 | 70 | # Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. 71 | # This requires a Kibana endpoint configuration. 72 | #setup.kibana: 73 | 74 | # Kibana Host 75 | # Scheme and port can be left out and will be set to the default (http and 5601) 76 | # In case you specify and additional path, the scheme is required: http://localhost:5601/path 77 | # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 78 | #host: "192.168.2.201:5601" 79 | 80 | #============================= Elastic Cloud ================================== 81 | 82 | # These settings simplify using winlogbeat with the Elastic Cloud (https://cloud.elastic.co/). 83 | 84 | # The cloud.id setting overwrites the `output.elasticsearch.hosts` and 85 | # `setup.kibana.host` options. 86 | # You can find the `cloud.id` in the Elastic Cloud web UI. 87 | #cloud.id: 88 | 89 | # The cloud.auth setting overwrites the `output.elasticsearch.username` and 90 | # `output.elasticsearch.password` settings. The format is `:`. 91 | #cloud.auth: 92 | 93 | #================================ Outputs ===================================== 94 | 95 | # Configure what output to use when sending the data collected by the beat. 96 | 97 | #-------------------------- Elasticsearch output ------------------------------ 98 | #output.elasticsearch: 99 | # Array of hosts to connect to. 100 | #hosts: ["192.168.2.201:9200"] 101 | 102 | # Optional protocol and basic auth credentials. 103 | #protocol: "https" 104 | #username: "elastic" 105 | #password: "changeme" 106 | 107 | #----------------------------- Logstash output -------------------------------- 108 | output.logstash: 109 | # The Logstash hosts 110 | hosts: ["elasticsearch:5045"] 111 | 112 | # Optional SSL. By default is off. 113 | # List of root certificates for HTTPS server verifications 114 | #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] 115 | 116 | # Certificate for SSL client authentication 117 | #ssl.certificate: "/etc/pki/client/cert.pem" 118 | 119 | # Client Certificate Key 120 | #ssl.key: "/etc/pki/client/cert.key" 121 | 122 | #================================ Logging ===================================== 123 | 124 | # Sets log level. The default log level is info. 125 | # Available log levels are: critical, error, warning, info, debug 126 | #logging.level: debug 127 | 128 | # At debug level, you can selectively enable logging only for some components. 129 | # To enable all selectors use ["*"]. Examples of other selectors are "beat", 130 | # "publish", "service". 131 | #logging.selectors: ["*"] --------------------------------------------------------------------------------