├── CoreAnalyticsParser ├── CoreAnalyticsParser.py └── README.md ├── CrowdResponseSplunkApp ├── SplunkCR README.txt └── SplunkCR_2016Apr04.tar.gz ├── LICENSE ├── O365-Outlook-Activities ├── README.md ├── activities.py ├── requirements.txt └── retriever.py ├── PortHawk ├── PortHawk-Deployment.sh ├── porthawk-client.py ├── porthawk-server-log.py └── suricata.yaml ├── README.md ├── forensic-docs └── X-Ways_Cheat_Sheet.pdf ├── sysmon ├── sysmon_keywords.txt └── sysmon_parse.cmd ├── ual-analyzer ├── README.md ├── databases │ └── geoip │ │ ├── COPYRIGHT.txt │ │ ├── GeoLite2-City.mmdb │ │ ├── LICENSE.txt │ │ └── README.txt ├── plugins │ ├── enrichers.py │ └── parsers.py └── ual-analyzer.py └── vshot └── vshot /CoreAnalyticsParser/CoreAnalyticsParser.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | 3 | ''' 4 | @ author: Kshitij Kumar 5 | @ email: kshitij.kumar@crowdstrike.com 6 | 7 | ''' 8 | import sys 9 | import json 10 | import csv 11 | import pytz 12 | import glob 13 | import argparse 14 | import time 15 | import os 16 | import dateutil.parser as parser 17 | from collections import OrderedDict 18 | 19 | 20 | class data_writer: 21 | 22 | def __init__(self, name, headers, datatype, outputdir='./'): 23 | self.name = name 24 | self.datatype = datatype 25 | self.headers = headers 26 | self.output_filename = self.name+'.'+self.datatype 27 | self.data_file_name = os.path.join(outputdir, self.output_filename) 28 | 29 | if self.datatype == 'csv': 30 | with open(self.data_file_name, 'w') as data_file: 31 | writer = csv.writer(data_file) 32 | writer.writerow(headers) 33 | elif self.datatype == 'json': 34 | with open(self.data_file_name, 'w') as data_file: 35 | pass 36 | 37 | def write_entry(self, data): 38 | if self.datatype == 'csv': 39 | with open(self.data_file_name, 'a') as data_file: 40 | writer = csv.writer(data_file) 41 | writer.writerow(data) 42 | elif self.datatype == 'json': 43 | zipped_data = dict(zip(self.headers, data)) 44 | with open(self.data_file_name, 'a') as data_file: 45 | json.dump(zipped_data, data_file) 46 | data_file.write('\n') 47 | 48 | 49 | def stat(file): 50 | os.environ['TZ'] = 'UTC0' 51 | 52 | stat = os.lstat(file) 53 | mtime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(stat.st_mtime)) 54 | atime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(stat.st_atime)) 55 | ctime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(stat.st_ctime)) 56 | btime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(stat.st_birthtime)) 57 | 58 | return({'mtime': mtime, 'atime': atime, 'ctime': ctime, 'btime': btime}) 59 | 60 | 61 | def CoreAnalyticsParser(): 62 | aparser = argparse.ArgumentParser( 63 | description="CoreAnalyticsParser.py: a script to parse core_analytics files to csv - \ 64 | an artifact of Mach-O file execution that retains up to one month of data. This \ 65 | artifact is only avalable on macOS 10.13 and higher." 66 | ) 67 | mutu = aparser.add_mutually_exclusive_group(required=True) 68 | mutu.add_argument('-i', '--input', default='./', help='input file or directory containing \ 69 | core_analytics files and/or aggregate files', required=False) 70 | mutu.add_argument('-d', '--disk', default=False, help='parse all core_analytics files and \ 71 | aggregates on current disk', required=False, action='store_true') 72 | aparser.add_argument('-o', '--outputdir', default='./', help='output directory', required=False) 73 | aparser.add_argument('-j', '--json', default=False, help='produce output in JSON format', 74 | required=False, action='store_true') 75 | args = aparser.parse_args() 76 | 77 | outfile_loc = os.path.join(args.outputdir, 'CoreAnalyticsParser_output') 78 | 79 | headers = ['src_report', 'diag_start', 'diag_end', 'name', 'uuid', 'processName', 80 | 'appDescription', 'appName', 'appVersion', 'foreground', 'uptime', 81 | 'uptime_parsed', 'powerTime', 'powerTime_parsed', 'activeTime', 'activeTime_parsed', 82 | 'activations', 'launches', 'activityPeriods', 'idleTimeouts', 'Uptime', 83 | 'Count', 'version', 'identifier', 'overflow'] 84 | 85 | if args.json: 86 | fmt = 'json' 87 | else: 88 | fmt = 'csv' 89 | 90 | output = data_writer('CoreAnalyticsParser_output', headers, fmt, args.outputdir) 91 | 92 | if os.geteuid() != 0: 93 | print ("[!] Running script without sudo, but trying to parse files on local disk.") 94 | print (" Parsing of aggregate files may fail!") 95 | 96 | # Parse .core_analytics files from input locaation or from their directory on disk. 97 | if args.disk: 98 | analytics_location = glob.glob('/Library/Logs/DiagnosticReports/Analytics*.core_analytics') 99 | elif args.input and not args.input.endswith('.core_analytics'): 100 | analytics_location = glob.glob(args.input+'/Analytics*.core_analytics') 101 | elif args.input and args.input.endswith('.core_analytics'): 102 | analytics_location = [args.input] 103 | 104 | if len(analytics_location) < 1: 105 | print ("[!] No .core_analytics files found.") 106 | else: 107 | print ("[+] Found {0} .core_analytics files to parse.".format(len(analytics_location))) 108 | 109 | counter = 0 110 | for file in analytics_location: 111 | data = open(file, 'r').read() 112 | data_lines = [json.loads(i) for i in data.split('\n') if i.startswith("{\"message\":")] 113 | 114 | try: 115 | diag_start = [json.loads(i) for i in data.split('\n') if 116 | i.startswith("{\"_marker\":") and "end-of-file" 117 | not in i][0]['startTimestamp'] 118 | except ValueError: 119 | diag_start = "ERROR" 120 | 121 | try: 122 | diag_end = [json.loads(i) for i in data.split('\n') if 123 | i.startswith("{\"timestamp\":")][0]['timestamp'] 124 | diag_end = str(parser.parse(diag_end).astimezone(pytz.utc)) 125 | diag_end = diag_end.replace(' ', 'T').replace('+00:00', 'Z') 126 | except ValueError: 127 | diag_end = "ERROR" 128 | 129 | for i in data_lines: 130 | record = OrderedDict((h, '') for h in headers) 131 | record['src_report'] = file 132 | record['diag_start'] = diag_start 133 | record['diag_end'] = diag_end 134 | record['name'] = i['name'] 135 | record['uuid'] = i['uuid'] 136 | 137 | # If any fields not currently recorded (based on the headers above) appear, 138 | # they will be added to overflow. 139 | record['overflow'] = {} 140 | 141 | for k, v in i['message'].items(): 142 | if k in record.keys(): 143 | record[k] = i['message'][k] 144 | else: 145 | record['overflow'].update({k: v}) 146 | 147 | if len(record['overflow']) == 0: 148 | record['overflow'] = '' 149 | 150 | if record['uptime'] != '': 151 | record['uptime_parsed'] = time.strftime("%H:%M:%S", 152 | time.gmtime(record['uptime'])) 153 | 154 | if record['activeTime'] != '': 155 | record['activeTime_parsed'] = time.strftime("%H:%M:%S", 156 | time.gmtime(record['activeTime'])) 157 | 158 | if record['powerTime'] != '': 159 | record['powerTime_parsed'] = time.strftime("%H:%M:%S", 160 | time.gmtime(record['powerTime'])) 161 | 162 | if record['appDescription'] != '': 163 | record['appName'] = record['appDescription'].split(' ||| ')[0] 164 | record['appVersion'] = record['appDescription'].split(' ||| ')[1] 165 | 166 | line = record.values() 167 | output.write_entry(line) 168 | counter += 1 169 | 170 | # Parse aggregate files either from input location or from their directory on disk. 171 | if args.disk: 172 | agg_location = glob.glob('/private/var/db/analyticsd/aggregates/*') 173 | elif args.input: 174 | agg_location = [i for i in glob.glob(args.input+'/*-*-*-*') if '.' not in i] 175 | 176 | if len(agg_location) < 1: 177 | print ("[!] No aggregate files found.") 178 | else: 179 | print ("[+] Found {0} aggregate files to parse.".format(len(agg_location))) 180 | 181 | for aggregate in agg_location: 182 | data = open(aggregate, 'r').read() 183 | data_lines = json.loads(data) 184 | 185 | diag_start = stat(aggregate)['btime'] 186 | diag_end = stat(aggregate)['mtime'] 187 | 188 | raw = [i for i in data_lines if len(i) == 2 and (len(i[0]) == 3 and len(i[1]) == 7)] 189 | for i in raw: 190 | record = OrderedDict((h, '') for h in headers) 191 | 192 | record['src_report'] = aggregate 193 | record['diag_start'] = diag_start 194 | record['diag_end'] = diag_end 195 | record['uuid'] = os.path.basename(aggregate) 196 | record['processName'] = i[0][0] 197 | 198 | record['appDescription'] = i[0][1] 199 | if record['appDescription'] != '': 200 | record['appName'] = record['appDescription'].split(' ||| ')[0] 201 | record['appVersion'] = record['appDescription'].split(' ||| ')[1] 202 | 203 | record['foreground'] = i[0][2] 204 | 205 | record['uptime'] = i[1][0] 206 | record['uptime_parsed'] = time.strftime("%H:%M:%S", time.gmtime(i[1][0])) 207 | 208 | record['activeTime'] = i[1][1] 209 | record['activeTime_parsed'] = time.strftime("%H:%M:%S", time.gmtime(i[1][1])) 210 | 211 | record['launches'] = i[1][2] 212 | record['idleTimeouts'] = i[1][3] 213 | record['activations'] = i[1][4] 214 | record['activityPeriods'] = i[1][5] 215 | 216 | record['powerTime'] = i[1][6] 217 | record['powerTime_parsed'] = time.strftime("%H:%M:%S", time.gmtime(i[1][6])) 218 | 219 | line = record.values() 220 | output.write_entry(line) 221 | counter += 1 222 | 223 | if counter > 0: 224 | print ("[+] Wrote {0} lines to {1}.{2}.".format(counter, outfile_loc, fmt)) 225 | else: 226 | print ("[!] No output file generated.") 227 | 228 | 229 | if __name__ == "__main__": 230 | 231 | CoreAnalyticsParser() 232 | -------------------------------------------------------------------------------- /CoreAnalyticsParser/README.md: -------------------------------------------------------------------------------- 1 | # CoreAnalyticsParser 2 | 3 | ## Purpose 4 | 5 | This is a simple script that collates CoreAnalytics data (evidence of program execution) from .core_analytics files and aggregate files into CSV or JSON to make analysis of this artifact more efficient. 6 | 7 | ## Requirements 8 | 9 | This artifact only exists on macOS 10.13 and above. If you are attempting to test this script on your local machine, ensure that this OS requirement is met, and that you are running the script as sudo in order to capture aggregates data (which resides in /private/var/db/analyticsd/aggregates/). Please also ensure that pytz and dateutil have been installed on the system on which you are running the script. 10 | 11 | ## Compatibility 12 | 13 | This script supports Python 2.7. 14 | 15 | ## Requirements 16 | 17 | - dateutil.parser 18 | - pytz 19 | 20 | ## Usage 21 | 22 | At its simplest, you can run CoreAnalyticsParser against your local machine with the following invocation: 23 | 24 | sudo CoreAnalyticsParser.py -d 25 | 26 | The script will output a file named "CoreAnalyticsParser_output.csv" to the current working directory from which the script is called. 27 | 28 | If you have collected these files and the aggregate files from an image, it is possible to point the script at a flat directory that contains all files you would like to analyze (both .core_analytics and aggregate files) with the -i/--input flag. The output directory can be specified with the -o/--outputdir flag. 29 | 30 | CoreAnalyticsParser.py -i /path/to/core_analytics_files -o /path/to/outputdir 31 | 32 | If you would like the script to output JSON rather than CSV (the default), use the -j/--json flag. 33 | 34 | sudo CoreAnalyticsParser.py -d -o /path/to/outputdir -j 35 | 36 | ## Output 37 | 38 | A JSON record from the script's output may appear as below. This record includes all fields that are included per record by default. 39 | 40 | { 'src_report': '/path/to/Analytics_2018-06-29-173717_ML-C02PA037R9QZ.core_analytics', 41 | 'diag_start': '2018-06-29T00:00:09Z', 42 | 'diag_end': '2018-06-30T00:37:17.660000Z', 43 | 'name': 'comappleosanalyticsappUsage', 44 | 'uuid': '4d7c9e4a-8c8c-4971-bce3-09d38d078849', 45 | 'processName': 'Google Chrome', 46 | 'appDescription': 'com.google.Chrome ||| 67.0.3396.87 (3396.87)', 47 | 'appName': 'com.google.Chrome', 48 | 'appVersion': '67.0.3396.87 (3396.87)', 49 | 'foreground': 'YES', 50 | 'uptime': '26110', 51 | 'uptime_parsed': '7:15:10', 52 | 'powerTime': '12537', 53 | 'powerTime_parsed': '3:28:57', 54 | 'activeTime': '4250', 55 | 'activeTime_parsed': '1:10:50', 56 | 'activations': '105', 57 | 'launches': '0', 58 | 'activityPeriods': '12', 59 | 'idleTimeouts': '4', 60 | 'overflow': ''} 61 | 62 | -------------------------------------------------------------------------------- /CrowdResponseSplunkApp/SplunkCR README.txt: -------------------------------------------------------------------------------- 1 | CrowdResponse Splunk Application 2 | 3 | Author: Paul Jaramillo, DFIR_Janitor 4 | Company: CrowdStrike 5 | Credits: Robin Keir, Danny Lungstrom, Ashley Nuckols, Josh Liburdi, Brandan Finney 6 | 7 | Current Version: 0.9 8 | Published Date: 2015 May 30 9 | Last Change: 2015 May 30 10 | 11 | Dependencies: Splunk 6.2 or newer, CrowdResponse for host data collection 12 | 13 | Functions: 14 | - Data Summary 15 | - Host Finder 16 | - Module Searches - Quick Wins 17 | - Methodology Searches - Hunting 18 | - Timeline (2.0 Planned) 19 | - Intel (2.0 Planned) 20 | - Reports (2.0 Planned) -------------------------------------------------------------------------------- /CrowdResponseSplunkApp/SplunkCR_2016Apr04.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CrowdStrike/Forensics/6d6a2aa2b9d023667d45cdaaa822e408ac058e06/CrowdResponseSplunkApp/SplunkCR_2016Apr04.tar.gz -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015, CrowdStrike, Inc. 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 2. Redistributions in binary form must reproduce the above copyright notice, 10 | this list of conditions and the following disclaimer in the documentation 11 | and/or other materials provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 14 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 15 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 16 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 17 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 18 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 19 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 20 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 21 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 22 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 | -------------------------------------------------------------------------------- /O365-Outlook-Activities/README.md: -------------------------------------------------------------------------------- 1 | # O365-Outlook-Activities 2 | 3 | ## NOTE ## 4 | 5 | Access to the Activities API has been restricted by Microsoft. Accordingly, there are no plans to develop this tool any further. 6 | 7 | ## Description 8 | 9 | This tool leverages the [Office 365 Outlook Activities API](https://www.crowdstrike.com/blog/hiding-in-plain-sight-using-the-office-365-activities-api-to-investigate-business-email-compromises/). It will continuously fetch and write activities to a CSV file until all activities matching the specified criteria have been retrieved. 10 | 11 | ### Compatibility 12 | 13 | This tool supports all versions of Python 3. 14 | 15 | ### Requirements 16 | 17 | - requests 18 | 19 | ### Installation 20 | 21 | Download activities.py and retriever.py. 22 | 23 | ### Access token 24 | 25 | This tool requires a valid OAuth 2.0 access token. For testing purposes, a token can be obtained from the [Outlook Dev Center - OAuth Sandbox](https://oauthplay.azurewebsites.net). Supply the token to the tool by setting an `OAUTH_TOKEN` environment variable (preferred method) or by including it as a command-line argument. 26 | 27 | ### Usage 28 | 29 | ``` 30 | usage: retriever.py --user --output 31 | [--token ] [--start ] 32 | [--end ] [--types [ ...]] 33 | 34 | --user Target user (user principal name) 35 | --output CSV output filename 36 | --token OAuth access token 37 | --start Start timestamp (ISO 8601) 38 | --end End timestamp (ISO 8601) 39 | --types [ ...] Space-delimited list of activity types 40 | 41 | ``` 42 | 43 | ### Examples 44 | 45 | Example 1: Retrieve `MessageDelivered` activities that occurred after January 1: 46 | ``` 47 | python retriever.py --user victim@contoso.com --output activities.csv --types MessageDelivered --start 2018-01-01T00:00:00Z 48 | ``` 49 | Example 2: Retrieve `ServerLogon` and `SearchResult` activities that occurred in the month of May: 50 | ``` 51 | python retriever.py --user victim@contoso.com --output activities.csv --types ServerLogon SearchResult --start 2018-05-01T00:00:00Z --end 2018-05-31T23:59:59Z 52 | ``` 53 | Example 3: Retrieve the entire history of activities for a user. (NOTE: This may take a long time) 54 | ``` 55 | python retriever.py --user victim@contoso.com --output activities.csv 56 | ``` 57 | 58 | 59 | -------------------------------------------------------------------------------- /O365-Outlook-Activities/activities.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """This module wraps basic functions of the Office 365 Outlook Activities API. 3 | """ 4 | 5 | import requests 6 | 7 | 8 | class Activity(object): 9 | """Represents an Office 365 Outlook Activity 10 | """ 11 | 12 | def __init__(self, activity): 13 | custom_props = activity.pop('CustomProperties', None) 14 | self.__dict__ = activity 15 | 16 | if custom_props: 17 | self.CustomProperties = {prop['Name']: prop['Value'] 18 | for prop in custom_props} 19 | 20 | 21 | class OutlookService(object): 22 | """Provides a simple abstraction layer and basic HTTP exception handling 23 | """ 24 | 25 | base_url = "https://outlook.office.com/api/v2.0/Users('{}')" 26 | headers = {'Accept': 'application/json; odata.metadata=none', 27 | 'Prefer': 'exchange.behavior="ActivityAccess"', 28 | 'User-Agent': 'PythonOutlookService/1.0'} 29 | timeout = 10 30 | 31 | def __init__(self, access_token): 32 | self.access_token = access_token 33 | self.request_session = requests.Session() 34 | self.request_session.timeout = self.timeout 35 | self.request_session.auth = OAuth(self.access_token) 36 | self.request_session.headers.update(self.headers) 37 | 38 | def get_activities(self, user, **kwargs): 39 | api_url = self.base_url.format(user) + "/Activities?" 40 | params = {'${}'.format(param): value 41 | for param, value in kwargs.items()} 42 | 43 | response = self.request_session.get(api_url, params=params) 44 | self._handle_errors(response) 45 | activities = [Activity(activity) 46 | for activity in response.json()['value']] 47 | 48 | return activities 49 | 50 | def _handle_errors(self, response): 51 | if 199 < response.status_code < 300: 52 | return True 53 | elif response.status_code == 401: 54 | error = self._get_auth_error_from_headers(response.headers) 55 | elif 299 < response.status_code < 500: 56 | try: 57 | error = response.json()['error']['message'] 58 | except ValueError: 59 | error = 'Client error.' 60 | else: 61 | error = 'Server error.' 62 | 63 | raise ValueError('HTTP {}: {}'.format(response.status_code, error)) 64 | 65 | @staticmethod 66 | def _get_auth_error_from_headers(headers): 67 | fields = headers.get('x-ms-diagnostics') 68 | if fields: 69 | for field in fields.split(';'): 70 | if field.startswith('reason'): 71 | return field.split('=')[1][1:-1] 72 | 73 | 74 | class OAuth(requests.auth.AuthBase): 75 | def __init__(self, access_token): 76 | self.access_token = access_token 77 | 78 | def __call__(self, request): 79 | request.headers['Authorization'] = 'Bearer {}'.format( 80 | self.access_token) 81 | return request 82 | -------------------------------------------------------------------------------- /O365-Outlook-Activities/requirements.txt: -------------------------------------------------------------------------------- 1 | requests==2.20.0 2 | -------------------------------------------------------------------------------- /O365-Outlook-Activities/retriever.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """This tool retrieves Office 365 Outlook Activities for a specified user. 3 | 4 | Obtain a valid OAuth 2.0 access token from https://oauthplay.azurewebsites.net 5 | or implement your own OAuth 2.0 flow for Azure AD. 6 | """ 7 | 8 | import argparse 9 | import csv 10 | import os 11 | import sys 12 | 13 | from activities import OutlookService 14 | 15 | 16 | # Max number of activities per batch (1 - 1000) 17 | BATCH_SIZE = 1000 18 | 19 | # List of properties to retrieve 20 | PROPERTIES = ['TimeStamp', 21 | 'AppIdType', 22 | 'ActivityIdType', 23 | 'ActivityItemId', 24 | 'ActivityCreationTime', 25 | 'ClientSessionId', 26 | 'CustomProperties'] 27 | 28 | # Parse arguments 29 | parser = argparse.ArgumentParser( 30 | description='Retrieve Office 365 Outlook Activities') 31 | parser.add_argument('--user', metavar='', 32 | help='Target user (user principal name)', required=True) 33 | parser.add_argument('--output', metavar='', 34 | help='CSV output filename', required=True) 35 | parser.add_argument('--token', metavar='', 36 | help='OAuth access token', required=False) 37 | parser.add_argument('--start', metavar='', 38 | help='Start timestamp (ISO 8601)', required=False) 39 | parser.add_argument('--end', metavar='', 40 | help='End timestamp (ISO 8601)', required=False) 41 | parser.add_argument('--types', metavar='', 42 | help='Space-delimited list of activity types', nargs='+', 43 | required=False) 44 | args = parser.parse_args() 45 | 46 | # Verify access token was supplied 47 | access_token = args.token if args.token else os.environ.get('OAUTH_TOKEN') 48 | if not access_token: 49 | print("An access token must be supplied via the '--token' command-line " 50 | "argument or via the 'OAUTH_TOKEN' environment variable.") 51 | sys.exit(1) 52 | 53 | # Construct filter expression 54 | filters = [] 55 | if args.start: 56 | filters.append('(TimeStamp ge {})'.format(args.start)) 57 | if args.end: 58 | filters.append('(TimeStamp le {})'.format(args.end)) 59 | if args.types: 60 | types = ["ActivityIdType eq '{}'".format( 61 | activity_type) for activity_type in args.types] 62 | filters.append('({})'.format(' or '.join(types))) 63 | filter_expression = ' and '.join(filters) 64 | 65 | # Create Outlook service 66 | service = OutlookService(access_token) 67 | 68 | # Begin processing activities 69 | batches = 0 70 | while True: 71 | try: 72 | 73 | # Retrieve batch of activities 74 | activities = service.get_activities( 75 | args.user, filter=filter_expression, 76 | top=BATCH_SIZE, skip=batches * BATCH_SIZE, 77 | select=','.join(PROPERTIES)) 78 | 79 | # Exit if error occurred issuing request 80 | except (IOError, ValueError) as error: 81 | print(error) 82 | sys.exit(1) 83 | 84 | if batches == 0: 85 | 86 | # Exit if no activities returned 87 | if not activities: 88 | print('No activities returned using the specified criteria.') 89 | sys.exit(1) 90 | 91 | try: 92 | 93 | # Create CSV file and write header 94 | csv_file = open(args.output, 'w+', encoding='utf-8-sig') 95 | writer = csv.DictWriter( 96 | csv_file, extrasaction='ignore', 97 | fieldnames=PROPERTIES, lineterminator='\n') 98 | writer.writeheader() 99 | 100 | # Exit if error occurred writing to file 101 | except IOError as error: 102 | print(error) 103 | sys.exit(1) 104 | 105 | print('Retrieving activities', end='') 106 | 107 | # Write rows to CSV file 108 | for activity in activities: 109 | writer.writerow(vars(activity)) 110 | 111 | # Print status and flush buffers 112 | print('.', end='') 113 | sys.stdout.flush() 114 | csv_file.flush() 115 | 116 | # Break if final batch 117 | if len(activities) < BATCH_SIZE: 118 | break 119 | batches += 1 120 | 121 | # Close file and print completion status 122 | csv_file.close() 123 | print('\nSuccessfully retrieved {} activities.'.format( 124 | batches * BATCH_SIZE + len(activities))) 125 | -------------------------------------------------------------------------------- /PortHawk/PortHawk-Deployment.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #get internet target ip address 4 | echo -n "Please enter the Internet IP Address for the Server: " 5 | read internetipaddress 6 | sed -i -e "s/REPLACETHISIPADDRESSREPLACETHISIPADDRESS/$internetipaddress/" porthawk-client.py 7 | 8 | # add repo 9 | sudo apt-add-repository -y ppa:oisf/suricata-stable 10 | 11 | # basic update 12 | sudo apt-get -y --force-yes update 13 | sudo apt-get -y --force-yes upgrade 14 | 15 | # IP table rules to not respond to malformed packets sent 16 | sudo iptables -A OUTPUT -p tcp --tcp-flags RST RST -j DROP 17 | sudo iptables -A OUTPUT -p icmp -j DROP 18 | 19 | # install apps 20 | sudo apt-get -y install suricata 21 | sudo apt-get -y install build-essential 22 | sudo apt-get -y install python-dev 23 | sudo apt-get -y install python-pip 24 | sudo pip install --upgrade pip 25 | sudo pip install pycrypto 26 | sudo echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections 27 | sudo echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections 28 | sudo echo iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections 29 | sudo apt-get -y install iptables-persistent 30 | 31 | # porthawk rules 32 | echo "alert ip any any -> $internetipaddress any (msg:\"porthawk\";content:\"porthawk\";)" > /etc/suricata/rules/porthawk.rules 33 | sudo mv suricata.yaml /etc/suricata/suricata.yaml 34 | sudo sed -i 's/eth0/'"$(ifconfig | grep $(hostname -I) -B 1 | grep -Eo '^[^ ]+')"'/g' /etc/suricata/suricata.yaml 35 | 36 | # create the suricata user 37 | sudo adduser --disabled-password --gecos "" suri 38 | sudo addgroup suri 39 | sudo adduser suri suri 40 | sudo mkdir -p /var/log/suricata/pcaps 41 | sudo chown -R suri:suri /var/log/suricata/ 42 | sudo chown -R suri:suri /etc/suricata/ 43 | sudo mkdir -p /home/suri/porthawk/engagements 44 | 45 | #generate RSA keys used to encrypt engagement name within the traffic 46 | openssl genrsa 512 > privkey.pem 47 | openssl rsa -in privkey.pem -outform PEM -pubout -out pubkey.pem 48 | #inserting priv key to server log script 49 | echo -n "RSAPrivKey = \"\"\"" > out.tmp 50 | cat privkey.pem >> out.tmp 51 | echo \"\"\" >> out.tmp 52 | sed -i -e "/REPLACETHISRSAKEYREPLACETHISRSAKEYREPLACETHISRSAKEY/r out.tmp" porthawk-server-log.py 53 | rm out.tmp 54 | 55 | #inserting pub key to client 56 | echo -n "pubKey = \"\"\"" > out.tmp 57 | cat pubkey.pem >> out.tmp 58 | echo \"\"\" >> out.tmp 59 | sed -i -e "/RSAPUBKEYREPLACERSAPUBKEYREPLACERSAPUBKEYREPLACE/r out.tmp" porthawk-client.py 60 | rm out.tmp 61 | 62 | #move python script and files over and make sure permissions are right 63 | sudo mv porthawk-server-log.py /home/suri/porthawk/porthawk-server-log.py 64 | sudo mv privkey.pem /home/suri/porthawk/ 65 | sudo mv pubkey.pem /home/suri/porthawk/ 66 | sudo mv porthawk-client.py /home/suri/porthawk/ 67 | sudo chown -R suri:suri /home/suri/ 68 | sudo chown -R suri:suri /var/log/suricata 69 | #sudo chown -R suri:suri /etc/suricata 70 | 71 | # cronjobs 72 | ## Every day at 4 am, kill suricata, remove the eve.json file, and then restart suricata 73 | (sudo crontab -l ; echo "0 4 * * * sudo kill -15 \$\(sudo cat /var/run/suricata.pid\); sudo rm /var/log/suricata/eve.json; sleep 30; sudo suricata -c /etc/suricata/suricata.yaml -i $(ifconfig | grep $(hostname -I) -B 1 | grep -Eo '^[^ ]+') --user=suri --group=suri -D")| sudo crontab - 74 | ## On boot, start suricata 75 | (sudo crontab -l ; echo "@reboot sudo suricata -c /etc/suricata/suricata.yaml -i $(ifconfig | grep $(hostname -I) -B 1 | grep -Eo '^[^ ]+') -user=suri -group=suri -D")| sudo crontab - 76 | ## On boot, start the python file as user 'suri' 77 | (sudo crontab -l ; echo "@reboot sudo nohup /usr/bin/python /home/suri/porthawk/porthawk-server-log.py &")| sudo crontab - 78 | 79 | # remove packets suricata caught while started 80 | sudo rm /var/log/suricata/eve.json 81 | 82 | # prompt for a reboot 83 | clear 84 | echo "" 85 | echo "====================" 86 | echo " TIME FOR A REBOOT! " 87 | echo "====================" 88 | echo "" 89 | 90 | sudo shutdown -r now 91 | -------------------------------------------------------------------------------- /PortHawk/porthawk-client.py: -------------------------------------------------------------------------------- 1 | import logging 2 | logging.getLogger("scapy.runtime").setLevel(logging.ERROR) 3 | import socket 4 | import sys, getopt 5 | from Crypto.PublicKey import RSA 6 | from scapy.all import * 7 | 8 | # RSAPUBKEYREPLACERSAPUBKEYREPLACERSAPUBKEYREPLACE 9 | 10 | encryptKey = RSA.importKey(pubKey) 11 | 12 | def portHawk(engagementName, hostname, interface, verbose): 13 | 14 | trigger = 'porthawk' 15 | if hostname == '': 16 | hostname = socket.gethostname() 17 | 18 | data = "[%s](%s)" % (engagementName, hostname) 19 | data = trigger + encryptKey.encrypt(data, 32)[0].encode('hex') 20 | serverIP = "REPLACETHISIPADDRESSREPLACETHISIPADDRESS" 21 | 22 | if interface != '': 23 | s = conf.L3socket(iface=interface) 24 | if verbose: 25 | import progressbar 26 | bar = progressbar.ProgressBar() 27 | # send ICMP Packets type 0-255 28 | print "sending ICMP packets..." 29 | for n in bar(range(0,256)): 30 | s.send(IP(dst=serverIP) /ICMP(type=n)/str(data)) 31 | 32 | # send UDP Packets 33 | print "sending UDP packets..." 34 | bar = progressbar.ProgressBar() 35 | for n in bar(range(0,65536)): 36 | s.send(IP(dst=serverIP) / UDP(dport=n) / Raw(load=data)) 37 | 38 | # send TCP Packets 39 | print "sending TCP packets..." 40 | bar = progressbar.ProgressBar() 41 | for n in bar(range(0,65536)): 42 | s.send(IP(dst=serverIP) / TCP(dport=n) / Raw(load=data)) 43 | else: 44 | # send ICMP 45 | for n in range(0,256): 46 | s.send(IP(dst=serverIP) /ICMP(type=n)/str(data)) 47 | 48 | # send UDP Packets 49 | for n in range(0,65536): 50 | s.send(IP(dst=serverIP) / UDP(dport=n) / Raw(load=data)) 51 | 52 | # send TCP Packets 53 | for n in range(0,65536): 54 | s.send(IP(dst=serverIP) / TCP(dport=n) / Raw(load=data)) 55 | else: 56 | if verbose: 57 | import progressbar 58 | bar = progressbar.ProgressBar() 59 | # send ICMP Packets type 0-255 60 | print "sending ICMP packets..." 61 | for n in bar(range(0,256)): 62 | send(IP(dst=serverIP) /ICMP(type=n)/str(data), verbose=0) 63 | 64 | # send UDP Packets 65 | print "sending UDP packets..." 66 | bar = progressbar.ProgressBar() 67 | for n in bar(range(0,65536)): 68 | send(IP(dst=serverIP) / UDP(dport=n) / Raw(load=data), verbose=0) 69 | 70 | # send TCP Packets 71 | print "sending TCP packets..." 72 | bar = progressbar.ProgressBar() 73 | for n in bar(range(0,65536)): 74 | send(IP(dst=serverIP) / TCP(dport=n) / Raw(load=data), verbose=0) 75 | else: 76 | # send ICMP 77 | for n in range(0,256): 78 | send(IP(dst=serverIP) /ICMP(type=n)/str(data), verbose=0) 79 | 80 | # send UDP Packets 81 | for n in range(0,65536): 82 | send(IP(dst=serverIP) / UDP(dport=n) / Raw(load=data), verbose=0) 83 | 84 | # send TCP Packets 85 | for n in range(0,65536): 86 | send(IP(dst=serverIP) / TCP(dport=n) / Raw(load=data), verbose=0) 87 | 88 | 89 | def main(argv): 90 | engagementName = '' 91 | hostname = '' 92 | interface = '' 93 | verbose = False 94 | 95 | usage = 'usage: porthawk.py -e engagementName -n (optional) Computer Name -i (optional - specify this for speed) interface --verbose (optional)' 96 | try: 97 | opts, args = getopt.getopt(argv,"e:n:i:v:h", ["engagement=","name=","verbose","help"]) 98 | except getopt.GetoptError: 99 | print usage 100 | sys.exit(2) 101 | for opt, arg in opts: 102 | if opt in ("-h", "--help"): 103 | print usage 104 | sys.exit() 105 | elif opt in ("-e", "--engagement"): 106 | engagementName = arg 107 | elif opt in ("-v", "--verbose"): 108 | verbose = True 109 | elif opt in ("-i", "--interface"): 110 | interface = arg 111 | elif opt in ("-n", "--name"): 112 | hostname = arg 113 | 114 | if engagementName == '': 115 | print usage 116 | sys.exit() 117 | 118 | return (engagementName, hostname, interface, verbose) 119 | 120 | if __name__ == "__main__": 121 | portHawk(*main(sys.argv[1:])) 122 | -------------------------------------------------------------------------------- /PortHawk/porthawk-server-log.py: -------------------------------------------------------------------------------- 1 | import json 2 | from pprint import pprint 3 | import bisect 4 | import time 5 | from Crypto.PublicKey import RSA 6 | import os 7 | import subprocess 8 | os.nice(10) # to prevent this script from locking up the server 9 | 10 | # REPLACETHISRSAKEYREPLACETHISRSAKEYREPLACETHISRSAKEY 11 | 12 | 13 | decryptKey = RSA.importKey(RSAPrivKey) 14 | 15 | while True: 16 | database = {} 17 | try: # script could try to run while eve.json is being wiped 18 | with open ('/var/log/suricata/eve.json') as data_file: 19 | for suricata_hit in data_file: 20 | packet = json.loads(suricata_hit) 21 | try: 22 | if packet['alert']['signature'] == 'porthawk': 23 | packetContent = packet['payload_printable'] 24 | try: 25 | if packetContent[:8] == 'porthawk': 26 | classification = decryptKey.decrypt(packetContent[8:].decode("hex")) 27 | engagement = classification[classification.index("[") + 1:classification.rindex("]")] 28 | hostname = classification[classification.index("(") + 1:classification.rindex(")")] 29 | protocol = packet['proto'] 30 | 31 | def insertPacket(protocol): 32 | if protocol == 'ICMP': 33 | if packet['icmp_type'] not in database[engagement][hostname]['ICMP_type']: 34 | bisect.insort(database[engagement][hostname]['ICMP_type'], packet['icmp_type']) 35 | 36 | elif protocol == 'UDP': 37 | if packet['dest_port'] not in database[engagement][hostname]['UDP_dest_port']: 38 | bisect.insort(database[engagement][hostname]['UDP_dest_port'], packet['dest_port']) 39 | 40 | elif protocol == 'TCP': 41 | if packet['dest_port'] not in database[engagement][hostname]['TCP_dest_port']: 42 | bisect.insort(database[engagement][hostname]['TCP_dest_port'], packet['dest_port']) 43 | 44 | # has the database seen the engagement before 45 | if engagement in database: 46 | if hostname in database[engagement]: # the hostname AND engagement is there 47 | insertPacket(protocol) 48 | else: # add the hostname, and initialize ICMP/UDP/TCP:[] (engagement is there) 49 | database[engagement].update({hostname:{'ICMP_type':[],'UDP_dest_port':[],'TCP_dest_port':[]}}) 50 | insertPacket(protocol) 51 | else: # add the engagement and host to the database 52 | database[engagement] = {hostname:{'ICMP_type':[],'UDP_dest_port':[],'TCP_dest_port':[]}} 53 | insertPacket(protocol) 54 | except ValueError: # this prevents packets that are false positives from breaking program 55 | continue 56 | except KeyError: 57 | continue 58 | 59 | 60 | for key,value in database.iteritems(): 61 | file_name = '/home/suri/porthawk/engagements/%s.json' % key 62 | #if logic here to see if engagement name file exists 63 | if os.path.exists(file_name): #if the file does exist, check to see if it has that host 64 | with open (file_name, 'r+') as existing_engagement: 65 | new_hosts = [] 66 | new_hosts_flag = False 67 | for host_name in existing_engagement: # for each host existing in the .json file 68 | for collected_hostname,associated_ports in value.iteritems(): # for each collected hostname 69 | if collected_hostname in host_name: # if it is there, existing, then pass 70 | pass 71 | else: # else it is a new host in an existing engagement, possibly one of many, add it to a 'to-be appended' db 72 | new_hosts.append({collected_hostname:associated_ports}) 73 | new_hosts_flag = True 74 | 75 | if new_hosts_flag: 76 | # remove the last '}', so the program can add the new hosts within the JSON object 77 | existing_engagement.seek(-1, os.SEEK_END) 78 | existing_engagement.truncate() 79 | for entry in new_hosts: 80 | # comma separate the entries, and clip the {} json.dumps puts on the data by default 81 | existing_engagement.write(",") 82 | existing_engagement.write(json.dumps(entry)[1:-1]) 83 | existing_engagement.write("}") 84 | else: # if the engagement does not exist, add it 85 | with open(file_name, 'w') as f: 86 | f.write(json.dumps(value)) 87 | except IOError: # script could try to run while eve.json is being wiped, so wait 5 seconds 88 | time.sleep(5) 89 | 90 | subprocess.call("chmod -R 555 /home/suri/porthawk/engagements", shell=True) 91 | time.sleep(900) # script runs every 15 min 92 | -------------------------------------------------------------------------------- /PortHawk/suricata.yaml: -------------------------------------------------------------------------------- 1 | %YAML 1.1 2 | --- 3 | 4 | # Suricata configuration file. In addition to the comments describing all 5 | # options in this file, full documentation can be found at: 6 | # https://redmine.openinfosecfoundation.org/projects/suricata/wiki/Suricatayaml 7 | 8 | ## 9 | 10 | vars: 11 | # more specifc is better for alert accuracy and performance 12 | address-groups: 13 | HOME_NET: "[192.168.0.0/16,10.0.0.0/8,172.16.0.0/12]" 14 | #HOME_NET: "[192.168.0.0/16]" 15 | #HOME_NET: "[10.0.0.0/8]" 16 | #HOME_NET: "[172.16.0.0/12]" 17 | #HOME_NET: "any" 18 | 19 | EXTERNAL_NET: "!$HOME_NET" 20 | #EXTERNAL_NET: "any" 21 | 22 | HTTP_SERVERS: "$HOME_NET" 23 | SMTP_SERVERS: "$HOME_NET" 24 | SQL_SERVERS: "$HOME_NET" 25 | DNS_SERVERS: "$HOME_NET" 26 | TELNET_SERVERS: "$HOME_NET" 27 | AIM_SERVERS: "$EXTERNAL_NET" 28 | DNP3_SERVER: "$HOME_NET" 29 | DNP3_CLIENT: "$HOME_NET" 30 | MODBUS_CLIENT: "$HOME_NET" 31 | MODBUS_SERVER: "$HOME_NET" 32 | ENIP_CLIENT: "$HOME_NET" 33 | ENIP_SERVER: "$HOME_NET" 34 | 35 | port-groups: 36 | HTTP_PORTS: "80" 37 | SHELLCODE_PORTS: "!80" 38 | ORACLE_PORTS: 1521 39 | SSH_PORTS: 22 40 | DNP3_PORTS: 20000 41 | MODBUS_PORTS: 502 42 | 43 | 44 | ## 45 | ## Step 2: select the rules to enable or disable 46 | ## 47 | 48 | default-rule-path: /etc/suricata/rules 49 | rule-files: 50 | # - botcc.rules 51 | # - botcc.portgrouped.rules 52 | # - ciarmy.rules 53 | # - compromised.rules 54 | # - drop.rules 55 | # - dshield.rules 56 | # - emerging-activex.rules 57 | # - emerging-attack_response.rules 58 | # - emerging-chat.rules 59 | # - emerging-current_events.rules 60 | # - emerging-dns.rules 61 | # - emerging-dos.rules 62 | # - emerging-exploit.rules 63 | # - emerging-ftp.rules 64 | # - emerging-games.rules 65 | # - emerging-icmp_info.rules 66 | # - emerging-icmp.rules 67 | # - emerging-imap.rules 68 | # - emerging-inappropriate.rules 69 | # - emerging-info.rules 70 | # - emerging-malware.rules 71 | # - emerging-misc.rules 72 | # - emerging-mobile_malware.rules 73 | # - emerging-netbios.rules 74 | # - emerging-p2p.rules 75 | # - emerging-policy.rules 76 | # - emerging-pop3.rules 77 | # - emerging-rpc.rules 78 | # - emerging-scada.rules 79 | # - emerging-scada_special.rules 80 | # - emerging-scan.rules 81 | # - emerging-shellcode.rules 82 | # - emerging-smtp.rules 83 | # - emerging-snmp.rules 84 | # - emerging-sql.rules 85 | # - emerging-telnet.rules 86 | # - emerging-tftp.rules 87 | # - emerging-trojan.rules 88 | # - emerging-user_agents.rules 89 | # - emerging-voip.rules 90 | # - emerging-web_client.rules 91 | # - emerging-web_server.rules 92 | # - emerging-web_specific_apps.rules 93 | # - emerging-worm.rules 94 | # - tor.rules 95 | # - decoder-events.rules # available in suricata sources under rules dir 96 | # - stream-events.rules # available in suricata sources under rules dir 97 | # - http-events.rules # available in suricata sources under rules dir 98 | # - smtp-events.rules # available in suricata sources under rules dir 99 | # - dns-events.rules # available in suricata sources under rules dir 100 | # - tls-events.rules # available in suricata sources under rules dir 101 | # - modbus-events.rules # available in suricata sources under rules dir 102 | # - app-layer-events.rules # available in suricata sources under rules dir 103 | # - dnp3-events.rules # available in suricata sources under rules dir 104 | - porthawk.rules 105 | 106 | classification-file: /etc/suricata/classification.config 107 | reference-config-file: /etc/suricata/reference.config 108 | # threshold-file: /etc/suricata/threshold.config 109 | 110 | 111 | ## 112 | ## Step 3: select outputs to enable 113 | ## 114 | 115 | # The default logging directory. Any log or output file will be 116 | # placed here if its not specified with a full path name. This can be 117 | # overridden with the -l command line parameter. 118 | default-log-dir: /var/log/suricata/ 119 | 120 | # global stats configuration 121 | stats: 122 | enabled: yes 123 | # The interval field (in seconds) controls at what interval 124 | # the loggers are invoked. 125 | interval: 8 126 | 127 | # Configure the type of alert (and other) logging you would like. 128 | outputs: 129 | # a line based alerts log similar to Snort's fast.log 130 | - fast: 131 | enabled: yes 132 | filename: fast.log 133 | append: yes 134 | filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' 135 | 136 | # Extensible Event Format (nicknamed EVE) event log in JSON format 137 | - eve-log: 138 | enabled: yes 139 | filetype: regular #regular|syslog|unix_dgram|unix_stream|redis 140 | filename: eve.json 141 | #prefix: "@cee: " # prefix to prepend to each log entry 142 | # the following are valid when type: syslog above 143 | #identity: "suricata" 144 | #facility: local5 145 | level: Info ## possible levels: Emergency, Alert, Critical, 146 | ## Error, Warning, Notice, Info, Debug 147 | #redis: 148 | # server: 127.0.0.1 149 | # port: 6379 150 | # mode: list ## possible values: list (default), channel 151 | # key: suricata ## key or channel to use (default to suricata) 152 | # Redis pipelining set up. This will enable to only do a query every 153 | # 'batch-size' events. This should lower the latency induced by network 154 | # connection at the cost of some memory. There is no flushing implemented 155 | # so this setting as to be reserved to high traffic suricata. 156 | # pipelining: 157 | # enabled: yes ## set enable to yes to enable query pipelining 158 | # batch-size: 10 ## number of entry to keep in buffer 159 | types: 160 | - alert: 161 | payload: yes # enable dumping payload in Base64 162 | payload-buffer-size: 4kb # max size of payload buffer to output in eve-log 163 | payload-printable: yes # enable dumping payload in printable (lossy) format 164 | packet: yes # enable dumping of packet (without stream segments) 165 | #http: yes # enable dumping of http fields 166 | #tls: yes # enable dumping of tls fields 167 | #ssh: yes # enable dumping of ssh fields 168 | #smtp: yes # enable dumping of smtp fields 169 | #dnp3: yes # enable dumping of DNP3 fields 170 | 171 | # Enable the logging of tagged packets for rules using the 172 | # "tag" keyword. 173 | #tagged-packets: yes 174 | 175 | # HTTP X-Forwarded-For support by adding an extra field or overwriting 176 | # the source or destination IP address (depending on flow direction) 177 | # with the one reported in the X-Forwarded-For HTTP header. This is 178 | # helpful when reviewing alerts for traffic that is being reverse 179 | # or forward proxied. 180 | xff: 181 | enabled: no 182 | # Two operation modes are available, "extra-data" and "overwrite". 183 | mode: extra-data 184 | # Two proxy deployments are supported, "reverse" and "forward". In 185 | # a "reverse" deployment the IP address used is the last one, in a 186 | # "forward" deployment the first IP address is used. 187 | deployment: reverse 188 | # Header name where the actual IP address will be reported, if more 189 | # than one IP address is present, the last IP address will be the 190 | # one taken into consideration. 191 | header: X-Forwarded-For 192 | - http: 193 | extended: no # enable this for extended logging information 194 | # custom allows additional http fields to be included in eve-log 195 | # the example below adds three additional fields when uncommented 196 | #custom: [Accept-Encoding, Accept-Language, Authorization] 197 | #- dns: 198 | # control logging of queries and answers 199 | # default yes, no to disable 200 | #query: yes # enable logging of DNS queries 201 | #answer: yes # enable logging of DNS answers 202 | # control which RR types are logged 203 | # all enabled if custom not specified 204 | #custom: [a, aaaa, cname, mx, ns, ptr, txt] 205 | - tls: 206 | extended: no # enable this for extended logging information 207 | - files: 208 | force-magic: no # force logging magic on all logged files 209 | # force logging of checksums, available hash functions are md5, 210 | # sha1 and sha256 211 | #force-hash: [md5] 212 | #- drop: 213 | # alerts: yes # log alerts that caused drops 214 | # flows: all # start or all: 'start' logs only a single drop 215 | # # per flow direction. All logs each dropped pkt. 216 | - smtp: 217 | #extended: yes # enable this for extended logging information 218 | # this includes: bcc, message-id, subject, x_mailer, user-agent 219 | # custom fields logging from the list: 220 | # reply-to, bcc, message-id, subject, x-mailer, user-agent, received, 221 | # x-originating-ip, in-reply-to, references, importance, priority, 222 | # sensitivity, organization, content-md5, date 223 | #custom: [received, x-mailer, x-originating-ip, relays, reply-to, bcc] 224 | # output md5 of fields: body, subject 225 | # for the body you need to set app-layer.protocols.smtp.mime.body-md5 226 | # to yes 227 | #md5: [body, subject] 228 | 229 | #- ssh 230 | #- stats: 231 | # totals: no # stats for all threads merged together 232 | # threads: no # per thread stats 233 | # deltas: no # include delta values 234 | # bi-directional flows 235 | #- flow 236 | # uni-directional flows 237 | #- netflow 238 | #- dnp3 239 | 240 | # alert output for use with Barnyard2 241 | - unified2-alert: 242 | enabled: no 243 | filename: unified2.alert 244 | 245 | # File size limit. Can be specified in kb, mb, gb. Just a number 246 | # is parsed as bytes. 247 | #limit: 32mb 248 | 249 | # Sensor ID field of unified2 alerts. 250 | #sensor-id: 0 251 | 252 | # Include payload of packets related to alerts. Defaults to true, set to 253 | # false if payload is not required. 254 | #payload: yes 255 | 256 | # HTTP X-Forwarded-For support by adding the unified2 extra header or 257 | # overwriting the source or destination IP address (depending on flow 258 | # direction) with the one reported in the X-Forwarded-For HTTP header. 259 | # This is helpful when reviewing alerts for traffic that is being reverse 260 | # or forward proxied. 261 | xff: 262 | enabled: no 263 | # Two operation modes are available, "extra-data" and "overwrite". Note 264 | # that in the "overwrite" mode, if the reported IP address in the HTTP 265 | # X-Forwarded-For header is of a different version of the packet 266 | # received, it will fall-back to "extra-data" mode. 267 | mode: extra-data 268 | # Two proxy deployments are supported, "reverse" and "forward". In 269 | # a "reverse" deployment the IP address used is the last one, in a 270 | # "forward" deployment the first IP address is used. 271 | deployment: reverse 272 | # Header name where the actual IP address will be reported, if more 273 | # than one IP address is present, the last IP address will be the 274 | # one taken into consideration. 275 | header: X-Forwarded-For 276 | 277 | # a line based log of HTTP requests (no alerts) 278 | - http-log: 279 | enabled: no 280 | filename: http.log 281 | append: yes 282 | extended: yes # enable this for extended logging information 283 | #custom: yes # enabled the custom logging format (defined by customformat) 284 | #customformat: "%{%D-%H:%M:%S}t.%z %{X-Forwarded-For}i %H %m %h %u %s %B %a:%p -> %A:%P" 285 | #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' 286 | 287 | # a line based log of TLS handshake parameters (no alerts) 288 | - tls-log: 289 | enabled: no # Log TLS connections. 290 | filename: tls.log # File to store TLS logs. 291 | append: yes 292 | #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' 293 | #extended: yes # Log extended information like fingerprint 294 | 295 | # output module to store certificates chain to disk 296 | - tls-store: 297 | enabled: no 298 | #certs-log-dir: certs # directory to store the certificates files 299 | 300 | # a line based log of DNS requests and/or replies (no alerts) 301 | - dns-log: 302 | enabled: no 303 | filename: dns.log 304 | append: yes 305 | #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' 306 | 307 | # Packet log... log packets in pcap format. 3 modes of operation: "normal" 308 | # "multi" and "sguil". 309 | # 310 | # In normal mode a pcap file "filename" is created in the default-log-dir, 311 | # or are as specified by "dir". 312 | # In multi mode, a file is created per thread. This will perform much 313 | # better, but will create multiple files where 'normal' would create one. 314 | # In multi mode the filename takes a few special variables: 315 | # - %n -- thread number 316 | # - %i -- thread id 317 | # - %t -- timestamp (secs or secs.usecs based on 'ts-format' 318 | # E.g. filename: pcap.%n.%t 319 | # 320 | # Note that it's possible to use directories, but the directories are not 321 | # created by Suricata. E.g. filename: pcaps/%n/log.%s will log into the 322 | # per thread directory. 323 | # 324 | # Also note that the limit and max-files settings are enforced per thread. 325 | # So the size limit when using 8 threads with 1000mb files and 2000 files 326 | # is: 8*1000*2000 ~ 16TiB. 327 | # 328 | # In Sguil mode "dir" indicates the base directory. In this base dir the 329 | # pcaps are created in th directory structure Sguil expects: 330 | # 331 | # $sguil-base-dir/YYYY-MM-DD/$filename. 332 | # 333 | # By default all packets are logged except: 334 | # - TCP streams beyond stream.reassembly.depth 335 | # - encrypted streams after the key exchange 336 | # 337 | - pcap-log: 338 | enabled: yes 339 | filename: log.pcap 340 | 341 | # File size limit. Can be specified in kb, mb, gb. Just a number 342 | # is parsed as bytes. 343 | limit: 10000mb 344 | 345 | # If set to a value will enable ring buffer mode. Will keep Maximum of "max-files" of size "limit" 346 | max-files: 2000 347 | 348 | mode: normal # normal, multi or sguil. 349 | 350 | # Directory to place pcap files. If not provided the default log 351 | # directory will be used. Required for "sguil" mode. 352 | dir: /var/log/suricata/pcaps 353 | 354 | #ts-format: usec # sec or usec second format (default) is filename.sec usec is filename.sec.usec 355 | use-stream-depth: no #If set to "yes" packets seen after reaching stream inspection depth are ignored. "no" logs all packets 356 | honor-pass-rules: no # If set to "yes", flows in which a pass rule matched will stopped being logged. 357 | 358 | # a full alerts log containing much information for signature writers 359 | # or for investigating suspected false positives. 360 | - alert-debug: 361 | enabled: yes 362 | filename: alert-debug.log 363 | append: yes 364 | filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' 365 | 366 | # alert output to prelude (http://www.prelude-technologies.com/) only 367 | # available if Suricata has been compiled with --enable-prelude 368 | - alert-prelude: 369 | enabled: yes 370 | profile: suricata 371 | log-packet-content: yes 372 | log-packet-header: yes 373 | 374 | # Stats.log contains data from various counters of the suricata engine. 375 | - stats: 376 | enabled: yes 377 | filename: stats.log 378 | totals: yes # stats for all threads merged together 379 | threads: no # per thread stats 380 | #null-values: yes # print counters that have value 0 381 | 382 | # a line based alerts log similar to fast.log into syslog 383 | - syslog: 384 | enabled: no 385 | # reported identity to syslog. If ommited the program name (usually 386 | # suricata) will be used. 387 | #identity: "suricata" 388 | facility: local5 389 | #level: Info ## possible levels: Emergency, Alert, Critical, 390 | ## Error, Warning, Notice, Info, Debug 391 | 392 | # a line based information for dropped packets in IPS mode 393 | - drop: 394 | enabled: no 395 | filename: drop.log 396 | append: yes 397 | #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' 398 | 399 | # output module to store extracted files to disk 400 | # 401 | # The files are stored to the log-dir in a format "file." where is 402 | # an incrementing number starting at 1. For each file "file." a meta 403 | # file "file..meta" is created. 404 | # 405 | # File extraction depends on a lot of things to be fully done: 406 | # - file-store stream-depth. For optimal results, set this to 0 (unlimited) 407 | # - http request / response body sizes. Again set to 0 for optimal results. 408 | # - rules that contain the "filestore" keyword. 409 | - file-store: 410 | enabled: no # set to yes to enable 411 | log-dir: files # directory to store the files 412 | force-magic: no # force logging magic on all stored files 413 | # force logging of checksums, available hash functions are md5, 414 | # sha1 and sha256 415 | #force-hash: [md5] 416 | force-filestore: no # force storing of all files 417 | # override global stream-depth for sessions in which we want to 418 | # perform file extraction. Set to 0 for unlimited. 419 | #stream-depth: 0 420 | #waldo: file.waldo # waldo file to store the file_id across runs 421 | 422 | # output module to log files tracked in a easily parsable json format 423 | - file-log: 424 | enabled: no 425 | filename: files-json.log 426 | append: yes 427 | #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' 428 | 429 | force-magic: no # force logging magic on all logged files 430 | # force logging of checksums, available hash functions are md5, 431 | # sha1 and sha256 432 | #force-hash: [md5] 433 | 434 | # Log TCP data after stream normalization 435 | # 2 types: file or dir. File logs into a single logfile. Dir creates 436 | # 2 files per TCP session and stores the raw TCP data into them. 437 | # Using 'both' will enable both file and dir modes. 438 | # 439 | # Note: limited by stream.depth 440 | - tcp-data: 441 | enabled: yes 442 | type: file 443 | filename: tcp-data.log 444 | 445 | # Log HTTP body data after normalization, dechunking and unzipping. 446 | # 2 types: file or dir. File logs into a single logfile. Dir creates 447 | # 2 files per HTTP session and stores the normalized data into them. 448 | # Using 'both' will enable both file and dir modes. 449 | # 450 | # Note: limited by the body limit settings 451 | - http-body-data: 452 | enabled: yes 453 | type: file 454 | filename: http-data.log 455 | 456 | # Lua Output Support - execute lua script to generate alert and event 457 | # output. 458 | # Documented at: 459 | # https://redmine.openinfosecfoundation.org/projects/suricata/wiki/Lua_Output 460 | - lua: 461 | enabled: no 462 | #scripts-dir: /etc/suricata/lua-output/ 463 | scripts: 464 | # - script1.lua 465 | 466 | # Logging configuration. This is not about logging IDS alerts/events, but 467 | # output about what Suricata is doing, like startup messages, errors, etc. 468 | logging: 469 | # The default log level, can be overridden in an output section. 470 | # Note that debug level logging will only be emitted if Suricata was 471 | # compiled with the --enable-debug configure option. 472 | # 473 | # This value is overriden by the SC_LOG_LEVEL env var. 474 | default-log-level: notice 475 | 476 | # The default output format. Optional parameter, should default to 477 | # something reasonable if not provided. Can be overriden in an 478 | # output section. You can leave this out to get the default. 479 | # 480 | # This value is overriden by the SC_LOG_FORMAT env var. 481 | #default-log-format: "[%i] %t - (%f:%l) <%d> (%n) -- " 482 | 483 | # A regex to filter output. Can be overridden in an output section. 484 | # Defaults to empty (no filter). 485 | # 486 | # This value is overriden by the SC_LOG_OP_FILTER env var. 487 | default-output-filter: 488 | 489 | # Define your logging outputs. If none are defined, or they are all 490 | # disabled you will get the default - console output. 491 | outputs: 492 | - console: 493 | enabled: yes 494 | type: json 495 | - file: 496 | enabled: yes 497 | level: info 498 | filename: /var/log/suricata/suricata.log 499 | type: json 500 | - syslog: 501 | enabled: no 502 | facility: local5 503 | format: "[%i] <%d> -- " 504 | # type: json 505 | 506 | 507 | ## 508 | ## Step 4: configure common capture settings 509 | ## 510 | ## See "Advanced Capture Options" below for more options, including NETMAP 511 | ## and PF_RING. 512 | ## 513 | 514 | # Linux high speed capture support 515 | af-packet: 516 | - interface: eth0 517 | # Number of receive threads. "auto" uses the number of cores 518 | #threads: auto 519 | # Default clusterid. AF_PACKET will load balance packets based on flow. 520 | cluster-id: 99 521 | # Default AF_PACKET cluster type. AF_PACKET can load balance per flow or per hash. 522 | # This is only supported for Linux kernel > 3.1 523 | # possible value are: 524 | # * cluster_round_robin: round robin load balancing 525 | # * cluster_flow: all packets of a given flow are send to the same socket 526 | # * cluster_cpu: all packets treated in kernel by a CPU are send to the same socket 527 | # * cluster_qm: all packets linked by network card to a RSS queue are sent to the same 528 | # socket. Requires at least Linux 3.14. 529 | # * cluster_random: packets are sent randomly to sockets but with an equipartition. 530 | # Requires at least Linux 3.14. 531 | # * cluster_rollover: kernel rotates between sockets filling each socket before moving 532 | # to the next. Requires at least Linux 3.10. 533 | # Recommended modes are cluster_flow on most boxes and cluster_cpu or cluster_qm on system 534 | # with capture card using RSS (require cpu affinity tuning and system irq tuning) 535 | cluster-type: cluster_flow 536 | # In some fragmentation case, the hash can not be computed. If "defrag" is set 537 | # to yes, the kernel will do the needed defragmentation before sending the packets. 538 | defrag: yes 539 | # After Linux kernel 3.10 it is possible to activate the rollover option: if a socket is 540 | # full then kernel will send the packet on the next socket with room available. This option 541 | # can minimize packet drop and increase the treated bandwidth on single intensive flow. 542 | #rollover: yes 543 | # To use the ring feature of AF_PACKET, set 'use-mmap' to yes 544 | #use-mmap: yes 545 | # Lock memory map to avoid it goes to swap. Be careful that over suscribing could lock 546 | # your system 547 | #mmap-locked: yes 548 | # Use experimental tpacket_v3 capture mode, only active if use-mmap is true 549 | #tpacket-v3: yes 550 | # Ring size will be computed with respect to max_pending_packets and number 551 | # of threads. You can set manually the ring size in number of packets by setting 552 | # the following value. If you are using flow cluster-type and have really network 553 | # intensive single-flow you could want to set the ring-size independently of the number 554 | # of threads: 555 | #ring-size: 2048 556 | # Block size is used by tpacket_v3 only. It should set to a value high enough to contain 557 | # a decent number of packets. Size is in bytes so please consider your MTU. It should be 558 | # a power of 2 and it must be multiple of page size (usually 4096). 559 | #block-size: 32768 560 | # tpacket_v3 block timeout: an open block is passed to userspace if it is not 561 | # filled after block-timeout milliseconds. 562 | #block-timeout: 10 563 | # On busy system, this could help to set it to yes to recover from a packet drop 564 | # phase. This will result in some packets (at max a ring flush) being non treated. 565 | #use-emergency-flush: yes 566 | # recv buffer size, increase value could improve performance 567 | # buffer-size: 32768 568 | # Set to yes to disable promiscuous mode 569 | # disable-promisc: no 570 | # Choose checksum verification mode for the interface. At the moment 571 | # of the capture, some packets may be with an invalid checksum due to 572 | # offloading to the network card of the checksum computation. 573 | # Possible values are: 574 | # - kernel: use indication sent by kernel for each packet (default) 575 | # - yes: checksum validation is forced 576 | # - no: checksum validation is disabled 577 | # - auto: suricata uses a statistical approach to detect when 578 | # checksum off-loading is used. 579 | # Warning: 'checksum-validation' must be set to yes to have any validation 580 | #checksum-checks: kernel 581 | # BPF filter to apply to this interface. The pcap filter syntax apply here. 582 | #bpf-filter: port 80 or udp 583 | # You can use the following variables to activate AF_PACKET tap or IPS mode. 584 | # If copy-mode is set to ips or tap, the traffic coming to the current 585 | # interface will be copied to the copy-iface interface. If 'tap' is set, the 586 | # copy is complete. If 'ips' is set, the packet matching a 'drop' action 587 | # will not be copied. 588 | #copy-mode: ips 589 | #copy-iface: eth1 590 | 591 | # Put default values here. These will be used for an interface that is not 592 | # in the list above. 593 | - interface: default 594 | #threads: auto 595 | #use-mmap: no 596 | #rollover: yes 597 | #tpacket-v3: yes 598 | 599 | # Cross platform libpcap capture support 600 | pcap: 601 | - interface: eth0 602 | # On Linux, pcap will try to use mmaped capture and will use buffer-size 603 | # as total of memory used by the ring. So set this to something bigger 604 | # than 1% of your bandwidth. 605 | #buffer-size: 16777216 606 | #bpf-filter: "tcp and port 25" 607 | # Choose checksum verification mode for the interface. At the moment 608 | # of the capture, some packets may be with an invalid checksum due to 609 | # offloading to the network card of the checksum computation. 610 | # Possible values are: 611 | # - yes: checksum validation is forced 612 | # - no: checksum validation is disabled 613 | # - auto: suricata uses a statistical approach to detect when 614 | # checksum off-loading is used. (default) 615 | # Warning: 'checksum-validation' must be set to yes to have any validation 616 | #checksum-checks: auto 617 | # With some accelerator cards using a modified libpcap (like myricom), you 618 | # may want to have the same number of capture threads as the number of capture 619 | # rings. In this case, set up the threads variable to N to start N threads 620 | # listening on the same interface. 621 | #threads: 16 622 | # set to no to disable promiscuous mode: 623 | #promisc: no 624 | # set snaplen, if not set it defaults to MTU if MTU can be known 625 | # via ioctl call and to full capture if not. 626 | #snaplen: 1518 627 | # Put default values here 628 | - interface: default 629 | #checksum-checks: auto 630 | 631 | # Settings for reading pcap files 632 | pcap-file: 633 | # Possible values are: 634 | # - yes: checksum validation is forced 635 | # - no: checksum validation is disabled 636 | # - auto: suricata uses a statistical approach to detect when 637 | # checksum off-loading is used. (default) 638 | # Warning: 'checksum-validation' must be set to yes to have checksum tested 639 | checksum-checks: auto 640 | 641 | # See "Advanced Capture Options" below for more options, including NETMAP 642 | # and PF_RING. 643 | 644 | 645 | ## 646 | ## Step 5: App Layer Protocol Configuration 647 | ## 648 | 649 | # Configure the app-layer parsers. The protocols section details each 650 | # protocol. 651 | # 652 | # The option "enabled" takes 3 values - "yes", "no", "detection-only". 653 | # "yes" enables both detection and the parser, "no" disables both, and 654 | # "detection-only" enables protocol detection only (parser disabled). 655 | app-layer: 656 | protocols: 657 | tls: 658 | enabled: yes 659 | detection-ports: 660 | dp: 443 661 | 662 | # Completely stop processing TLS/SSL session after the handshake 663 | # completed. If bypass is enabled this will also trigger flow 664 | # bypass. If disabled (the default), TLS/SSL session is still 665 | # tracked for Heartbleed and other anomalies. 666 | #no-reassemble: yes 667 | dcerpc: 668 | enabled: yes 669 | ftp: 670 | enabled: yes 671 | ssh: 672 | enabled: yes 673 | smtp: 674 | enabled: yes 675 | # Configure SMTP-MIME Decoder 676 | mime: 677 | # Decode MIME messages from SMTP transactions 678 | # (may be resource intensive) 679 | # This field supercedes all others because it turns the entire 680 | # process on or off 681 | decode-mime: yes 682 | 683 | # Decode MIME entity bodies (ie. base64, quoted-printable, etc.) 684 | decode-base64: yes 685 | decode-quoted-printable: yes 686 | 687 | # Maximum bytes per header data value stored in the data structure 688 | # (default is 2000) 689 | header-value-depth: 2000 690 | 691 | # Extract URLs and save in state data structure 692 | extract-urls: yes 693 | # Set to yes to compute the md5 of the mail body. You will then 694 | # be able to journalize it. 695 | body-md5: no 696 | # Configure inspected-tracker for file_data keyword 697 | inspected-tracker: 698 | content-limit: 100000 699 | content-inspect-min-size: 32768 700 | content-inspect-window: 4096 701 | imap: 702 | enabled: detection-only 703 | msn: 704 | enabled: detection-only 705 | smb: 706 | enabled: yes 707 | detection-ports: 708 | dp: 139, 445 709 | # smb2 detection is disabled internally inside the engine. 710 | #smb2: 711 | # enabled: yes 712 | dns: 713 | # memcaps. Globally and per flow/state. 714 | #global-memcap: 16mb 715 | #state-memcap: 512kb 716 | 717 | # How many unreplied DNS requests are considered a flood. 718 | # If the limit is reached, app-layer-event:dns.flooded; will match. 719 | #request-flood: 500 720 | 721 | tcp: 722 | enabled: yes 723 | detection-ports: 724 | dp: 53 725 | udp: 726 | enabled: yes 727 | detection-ports: 728 | dp: 53 729 | http: 730 | enabled: yes 731 | # memcap: 64mb 732 | 733 | # default-config: Used when no server-config matches 734 | # personality: List of personalities used by default 735 | # request-body-limit: Limit reassembly of request body for inspection 736 | # by http_client_body & pcre /P option. 737 | # response-body-limit: Limit reassembly of response body for inspection 738 | # by file_data, http_server_body & pcre /Q option. 739 | # double-decode-path: Double decode path section of the URI 740 | # double-decode-query: Double decode query section of the URI 741 | # response-body-decompress-layer-limit: 742 | # Limit to how many layers of compression will be 743 | # decompressed. Defaults to 2. 744 | # 745 | # server-config: List of server configurations to use if address matches 746 | # address: List of ip addresses or networks for this block 747 | # personalitiy: List of personalities used by this block 748 | # request-body-limit: Limit reassembly of request body for inspection 749 | # by http_client_body & pcre /P option. 750 | # response-body-limit: Limit reassembly of response body for inspection 751 | # by file_data, http_server_body & pcre /Q option. 752 | # double-decode-path: Double decode path section of the URI 753 | # double-decode-query: Double decode query section of the URI 754 | # 755 | # uri-include-all: Include all parts of the URI. By default the 756 | # 'scheme', username/password, hostname and port 757 | # are excluded. Setting this option to true adds 758 | # all of them to the normalized uri as inspected 759 | # by http_uri, urilen, pcre with /U and the other 760 | # keywords that inspect the normalized uri. 761 | # Note that this does not affect http_raw_uri. 762 | # Also, note that including all was the default in 763 | # 1.4 and 2.0beta1. 764 | # 765 | # meta-field-limit: Hard size limit for request and response size 766 | # limits. Applies to request line and headers, 767 | # response line and headers. Does not apply to 768 | # request or response bodies. Default is 18k. 769 | # If this limit is reached an event is raised. 770 | # 771 | # Currently Available Personalities: 772 | # Minimal, Generic, IDS (default), IIS_4_0, IIS_5_0, IIS_5_1, IIS_6_0, 773 | # IIS_7_0, IIS_7_5, Apache_2 774 | libhtp: 775 | default-config: 776 | personality: IDS 777 | 778 | # Can be specified in kb, mb, gb. Just a number indicates 779 | # it's in bytes. 780 | request-body-limit: 100kb 781 | response-body-limit: 100kb 782 | 783 | # inspection limits 784 | request-body-minimal-inspect-size: 32kb 785 | request-body-inspect-window: 4kb 786 | response-body-minimal-inspect-size: 40kb 787 | response-body-inspect-window: 16kb 788 | 789 | # response body decompression (0 disables) 790 | response-body-decompress-layer-limit: 2 791 | 792 | # auto will use http-body-inline mode in IPS mode, yes or no set it statically 793 | http-body-inline: auto 794 | 795 | # Take a random value for inspection sizes around the specified value. 796 | # This lower the risk of some evasion technics but could lead 797 | # detection change between runs. It is set to 'yes' by default. 798 | #randomize-inspection-sizes: yes 799 | # If randomize-inspection-sizes is active, the value of various 800 | # inspection size will be choosen in the [1 - range%, 1 + range%] 801 | # range 802 | # Default value of randomize-inspection-range is 10. 803 | #randomize-inspection-range: 10 804 | 805 | # decoding 806 | double-decode-path: no 807 | double-decode-query: no 808 | 809 | server-config: 810 | 811 | #- apache: 812 | # address: [192.168.1.0/24, 127.0.0.0/8, "::1"] 813 | # personality: Apache_2 814 | # # Can be specified in kb, mb, gb. Just a number indicates 815 | # # it's in bytes. 816 | # request-body-limit: 4096 817 | # response-body-limit: 4096 818 | # double-decode-path: no 819 | # double-decode-query: no 820 | 821 | #- iis7: 822 | # address: 823 | # - 192.168.0.0/24 824 | # - 192.168.10.0/24 825 | # personality: IIS_7_0 826 | # # Can be specified in kb, mb, gb. Just a number indicates 827 | # # it's in bytes. 828 | # request-body-limit: 4096 829 | # response-body-limit: 4096 830 | # double-decode-path: no 831 | # double-decode-query: no 832 | 833 | # Note: Modbus probe parser is minimalist due to the poor significant field 834 | # Only Modbus message length (greater than Modbus header length) 835 | # And Protocol ID (equal to 0) are checked in probing parser 836 | # It is important to enable detection port and define Modbus port 837 | # to avoid false positive 838 | modbus: 839 | # How many unreplied Modbus requests are considered a flood. 840 | # If the limit is reached, app-layer-event:modbus.flooded; will match. 841 | #request-flood: 500 842 | 843 | enabled: no 844 | detection-ports: 845 | dp: 502 846 | # According to MODBUS Messaging on TCP/IP Implementation Guide V1.0b, it 847 | # is recommended to keep the TCP connection opened with a remote device 848 | # and not to open and close it for each MODBUS/TCP transaction. In that 849 | # case, it is important to set the depth of the stream reassembling as 850 | # unlimited (stream.reassembly.depth: 0) 851 | 852 | # Stream reassembly size for modbus. By default track it completely. 853 | stream-depth: 0 854 | 855 | # DNP3 856 | dnp3: 857 | enabled: no 858 | detection-ports: 859 | dp: 20000 860 | 861 | # SCADA EtherNet/IP and CIP protocol support 862 | enip: 863 | enabled: no 864 | detection-ports: 865 | dp: 44818 866 | sp: 44818 867 | 868 | # Limit for the maximum number of asn1 frames to decode (default 256) 869 | asn1-max-frames: 256 870 | 871 | 872 | ############################################################################## 873 | ## 874 | ## Advanced settings below 875 | ## 876 | ############################################################################## 877 | 878 | ## 879 | ## Run Options 880 | ## 881 | 882 | # Run suricata as user and group. 883 | #run-as: 884 | # user: suri 885 | # group: suri 886 | 887 | # Some logging module will use that name in event as identifier. The default 888 | # value is the hostname 889 | #sensor-name: suricata 890 | 891 | # Default pid file. 892 | # Will use this file if no --pidfile in command options. 893 | #pid-file: /var/run/suricata.pid 894 | 895 | # Daemon working directory 896 | # Suricata will change directory to this one if provided 897 | # Default: "/" 898 | #daemon-directory: "/" 899 | 900 | # Suricata core dump configuration. Limits the size of the core dump file to 901 | # approximately max-dump. The actual core dump size will be a multiple of the 902 | # page size. Core dumps that would be larger than max-dump are truncated. On 903 | # Linux, the actual core dump size may be a few pages larger than max-dump. 904 | # Setting max-dump to 0 disables core dumping. 905 | # Setting max-dump to 'unlimited' will give the full core dump file. 906 | # On 32-bit Linux, a max-dump value >= ULONG_MAX may cause the core dump size 907 | # to be 'unlimited'. 908 | 909 | coredump: 910 | max-dump: unlimited 911 | 912 | # If suricata box is a router for the sniffed networks, set it to 'router'. If 913 | # it is a pure sniffing setup, set it to 'sniffer-only'. 914 | # If set to auto, the variable is internally switch to 'router' in IPS mode 915 | # and 'sniffer-only' in IDS mode. 916 | # This feature is currently only used by the reject* keywords. 917 | host-mode: auto 918 | 919 | # Number of packets preallocated per thread. The default is 1024. A higher number 920 | # will make sure each CPU will be more easily kept busy, but may negatively 921 | # impact caching. 922 | # 923 | # If you are using the CUDA pattern matcher (mpm-algo: ac-cuda), different rules 924 | # apply. In that case try something like 60000 or more. This is because the CUDA 925 | # pattern matcher buffers and scans as many packets as possible in parallel. 926 | #max-pending-packets: 1024 927 | 928 | # Runmode the engine should use. Please check --list-runmodes to get the available 929 | # runmodes for each packet acquisition method. Defaults to "autofp" (auto flow pinned 930 | # load balancing). 931 | #runmode: autofp 932 | 933 | # Specifies the kind of flow load balancer used by the flow pinned autofp mode. 934 | # 935 | # Supported schedulers are: 936 | # 937 | # round-robin - Flows assigned to threads in a round robin fashion. 938 | # active-packets - Flows assigned to threads that have the lowest number of 939 | # unprocessed packets (default). 940 | # hash - Flow alloted usihng the address hash. More of a random 941 | # technique. Was the default in Suricata 1.2.1 and older. 942 | # 943 | #autofp-scheduler: active-packets 944 | 945 | # Preallocated size for packet. Default is 1514 which is the classical 946 | # size for pcap on ethernet. You should adjust this value to the highest 947 | # packet size (MTU + hardware header) on your system. 948 | #default-packet-size: 1514 949 | 950 | # Unix command socket can be used to pass commands to suricata. 951 | # An external tool can then connect to get information from suricata 952 | # or trigger some modifications of the engine. Set enabled to yes 953 | # to activate the feature. In auto mode, the feature will only be 954 | # activated in live capture mode. You can use the filename variable to set 955 | # the file name of the socket. 956 | unix-command: 957 | enabled: auto 958 | #filename: custom.socket 959 | 960 | # Magic file. The extension .mgc is added to the value here. 961 | #magic-file: /usr/share/file/magic 962 | #magic-file: 963 | 964 | legacy: 965 | uricontent: enabled 966 | 967 | ## 968 | ## Detection settings 969 | ## 970 | 971 | # Set the order of alerts bassed on actions 972 | # The default order is pass, drop, reject, alert 973 | # action-order: 974 | # - pass 975 | # - drop 976 | # - reject 977 | # - alert 978 | 979 | # IP Reputation 980 | #reputation-categories-file: /etc/suricata/iprep/categories.txt 981 | #default-reputation-path: /etc/suricata/iprep 982 | #reputation-files: 983 | # - reputation.list 984 | 985 | # When run with the option --engine-analysis, the engine will read each of 986 | # the parameters below, and print reports for each of the enabled sections 987 | # and exit. The reports are printed to a file in the default log dir 988 | # given by the parameter "default-log-dir", with engine reporting 989 | # subsection below printing reports in its own report file. 990 | engine-analysis: 991 | # enables printing reports for fast-pattern for every rule. 992 | rules-fast-pattern: yes 993 | # enables printing reports for each rule 994 | rules: yes 995 | 996 | #recursion and match limits for PCRE where supported 997 | pcre: 998 | match-limit: 3500 999 | match-limit-recursion: 1500 1000 | 1001 | ## 1002 | ## Advanced Traffic Tracking and Reconstruction Settings 1003 | ## 1004 | 1005 | # Host specific policies for defragmentation and TCP stream 1006 | # reassembly. The host OS lookup is done using a radix tree, just 1007 | # like a routing table so the most specific entry matches. 1008 | host-os-policy: 1009 | # Make the default policy windows. 1010 | windows: [0.0.0.0/0] 1011 | bsd: [] 1012 | bsd-right: [] 1013 | old-linux: [] 1014 | linux: [] 1015 | old-solaris: [] 1016 | solaris: [] 1017 | hpux10: [] 1018 | hpux11: [] 1019 | irix: [] 1020 | macos: [] 1021 | vista: [] 1022 | windows2k3: [] 1023 | 1024 | # Defrag settings: 1025 | 1026 | defrag: 1027 | memcap: 32mb 1028 | hash-size: 65536 1029 | trackers: 65535 # number of defragmented flows to follow 1030 | max-frags: 65535 # number of fragments to keep (higher than trackers) 1031 | prealloc: yes 1032 | timeout: 60 1033 | 1034 | # Enable defrag per host settings 1035 | # host-config: 1036 | # 1037 | # - dmz: 1038 | # timeout: 30 1039 | # address: [192.168.1.0/24, 127.0.0.0/8, 1.1.1.0/24, 2.2.2.0/24, "1.1.1.1", "2.2.2.2", "::1"] 1040 | # 1041 | # - lan: 1042 | # timeout: 45 1043 | # address: 1044 | # - 192.168.0.0/24 1045 | # - 192.168.10.0/24 1046 | # - 172.16.14.0/24 1047 | 1048 | # Flow settings: 1049 | # By default, the reserved memory (memcap) for flows is 32MB. This is the limit 1050 | # for flow allocation inside the engine. You can change this value to allow 1051 | # more memory usage for flows. 1052 | # The hash-size determine the size of the hash used to identify flows inside 1053 | # the engine, and by default the value is 65536. 1054 | # At the startup, the engine can preallocate a number of flows, to get a better 1055 | # performance. The number of flows preallocated is 10000 by default. 1056 | # emergency-recovery is the percentage of flows that the engine need to 1057 | # prune before unsetting the emergency state. The emergency state is activated 1058 | # when the memcap limit is reached, allowing to create new flows, but 1059 | # prunning them with the emergency timeouts (they are defined below). 1060 | # If the memcap is reached, the engine will try to prune flows 1061 | # with the default timeouts. If it doens't find a flow to prune, it will set 1062 | # the emergency bit and it will try again with more agressive timeouts. 1063 | # If that doesn't work, then it will try to kill the last time seen flows 1064 | # not in use. 1065 | # The memcap can be specified in kb, mb, gb. Just a number indicates it's 1066 | # in bytes. 1067 | 1068 | flow: 1069 | memcap: 128mb 1070 | hash-size: 65536 1071 | prealloc: 10000 1072 | emergency-recovery: 30 1073 | #managers: 1 # default to one flow manager 1074 | #recyclers: 1 # default to one flow recycler thread 1075 | 1076 | # This option controls the use of vlan ids in the flow (and defrag) 1077 | # hashing. Normally this should be enabled, but in some (broken) 1078 | # setups where both sides of a flow are not tagged with the same vlan 1079 | # tag, we can ignore the vlan id's in the flow hashing. 1080 | vlan: 1081 | use-for-tracking: true 1082 | 1083 | # Specific timeouts for flows. Here you can specify the timeouts that the 1084 | # active flows will wait to transit from the current state to another, on each 1085 | # protocol. The value of "new" determine the seconds to wait after a hanshake or 1086 | # stream startup before the engine free the data of that flow it doesn't 1087 | # change the state to established (usually if we don't receive more packets 1088 | # of that flow). The value of "established" is the amount of 1089 | # seconds that the engine will wait to free the flow if it spend that amount 1090 | # without receiving new packets or closing the connection. "closed" is the 1091 | # amount of time to wait after a flow is closed (usually zero). "bypassed" 1092 | # timeout controls locally bypassed flows. For these flows we don't do any other 1093 | # tracking. If no packets have been seen after this timeout, the flow is discarded. 1094 | # 1095 | # There's an emergency mode that will become active under attack circumstances, 1096 | # making the engine to check flow status faster. This configuration variables 1097 | # use the prefix "emergency-" and work similar as the normal ones. 1098 | # Some timeouts doesn't apply to all the protocols, like "closed", for udp and 1099 | # icmp. 1100 | 1101 | flow-timeouts: 1102 | 1103 | default: 1104 | new: 30 1105 | established: 300 1106 | closed: 0 1107 | bypassed: 100 1108 | emergency-new: 10 1109 | emergency-established: 100 1110 | emergency-closed: 0 1111 | emergency-bypassed: 50 1112 | tcp: 1113 | new: 60 1114 | established: 600 1115 | closed: 60 1116 | bypassed: 100 1117 | emergency-new: 5 1118 | emergency-established: 100 1119 | emergency-closed: 10 1120 | emergency-bypassed: 50 1121 | udp: 1122 | new: 30 1123 | established: 300 1124 | bypassed: 100 1125 | emergency-new: 10 1126 | emergency-established: 100 1127 | emergency-bypassed: 50 1128 | icmp: 1129 | new: 30 1130 | established: 300 1131 | bypassed: 100 1132 | emergency-new: 10 1133 | emergency-established: 100 1134 | emergency-bypassed: 50 1135 | 1136 | # Stream engine settings. Here the TCP stream tracking and reassembly 1137 | # engine is configured. 1138 | # 1139 | # stream: 1140 | # memcap: 32mb # Can be specified in kb, mb, gb. Just a 1141 | # # number indicates it's in bytes. 1142 | # checksum-validation: yes # To validate the checksum of received 1143 | # # packet. If csum validation is specified as 1144 | # # "yes", then packet with invalid csum will not 1145 | # # be processed by the engine stream/app layer. 1146 | # # Warning: locally generated trafic can be 1147 | # # generated without checksum due to hardware offload 1148 | # # of checksum. You can control the handling of checksum 1149 | # # on a per-interface basis via the 'checksum-checks' 1150 | # # option 1151 | # prealloc-sessions: 2k # 2k sessions prealloc'd per stream thread 1152 | # midstream: false # don't allow midstream session pickups 1153 | # async-oneside: false # don't enable async stream handling 1154 | # inline: no # stream inline mode 1155 | # max-synack-queued: 5 # Max different SYN/ACKs to queue 1156 | # bypass: no # Bypass packets when stream.depth is reached 1157 | # 1158 | # reassembly: 1159 | # memcap: 64mb # Can be specified in kb, mb, gb. Just a number 1160 | # # indicates it's in bytes. 1161 | # depth: 1mb # Can be specified in kb, mb, gb. Just a number 1162 | # # indicates it's in bytes. 1163 | # toserver-chunk-size: 2560 # inspect raw stream in chunks of at least 1164 | # # this size. Can be specified in kb, mb, 1165 | # # gb. Just a number indicates it's in bytes. 1166 | # # The max acceptable size is 4024 bytes. 1167 | # toclient-chunk-size: 2560 # inspect raw stream in chunks of at least 1168 | # # this size. Can be specified in kb, mb, 1169 | # # gb. Just a number indicates it's in bytes. 1170 | # # The max acceptable size is 4024 bytes. 1171 | # randomize-chunk-size: yes # Take a random value for chunk size around the specified value. 1172 | # # This lower the risk of some evasion technics but could lead 1173 | # # detection change between runs. It is set to 'yes' by default. 1174 | # randomize-chunk-range: 10 # If randomize-chunk-size is active, the value of chunk-size is 1175 | # # a random value between (1 - randomize-chunk-range/100)*toserver-chunk-size 1176 | # # and (1 + randomize-chunk-range/100)*toserver-chunk-size and the same 1177 | # # calculation for toclient-chunk-size. 1178 | # # Default value of randomize-chunk-range is 10. 1179 | # 1180 | # raw: yes # 'Raw' reassembly enabled or disabled. 1181 | # # raw is for content inspection by detection 1182 | # # engine. 1183 | # 1184 | # chunk-prealloc: 250 # Number of preallocated stream chunks. These 1185 | # # are used during stream inspection (raw). 1186 | # segments: # Settings for reassembly segment pool. 1187 | # - size: 4 # Size of the (data)segment for a pool 1188 | # prealloc: 256 # Number of segments to prealloc and keep 1189 | # # in the pool. 1190 | # zero-copy-size: 128 # This option sets in bytes the value at 1191 | # # which segment data is passed to the app 1192 | # # layer API directly. Data sizes equal to 1193 | # # and higher than the value set are passed 1194 | # # on directly. 1195 | # 1196 | stream: 1197 | memcap: 64mb 1198 | checksum-validation: yes # reject wrong csums 1199 | inline: auto # auto will use inline mode in IPS mode, yes or no set it statically 1200 | reassembly: 1201 | memcap: 256mb 1202 | depth: 1mb # reassemble 1mb into a stream 1203 | toserver-chunk-size: 2560 1204 | toclient-chunk-size: 2560 1205 | randomize-chunk-size: yes 1206 | #randomize-chunk-range: 10 1207 | #raw: yes 1208 | #chunk-prealloc: 250 1209 | #segments: 1210 | # - size: 4 1211 | # prealloc: 256 1212 | # - size: 16 1213 | # prealloc: 512 1214 | # - size: 112 1215 | # prealloc: 512 1216 | # - size: 248 1217 | # prealloc: 512 1218 | # - size: 512 1219 | # prealloc: 512 1220 | # - size: 768 1221 | # prealloc: 1024 1222 | # 'from_mtu' means that the size is mtu - 40, 1223 | # or 1460 if mtu couldn't be determined. 1224 | # - size: from_mtu 1225 | # prealloc: 1024 1226 | # - size: 65535 1227 | # prealloc: 128 1228 | #zero-copy-size: 128 1229 | 1230 | # Host table: 1231 | # 1232 | # Host table is used by tagging and per host thresholding subsystems. 1233 | # 1234 | host: 1235 | hash-size: 4096 1236 | prealloc: 1000 1237 | memcap: 32mb 1238 | 1239 | # IP Pair table: 1240 | # 1241 | # Used by xbits 'ippair' tracking. 1242 | # 1243 | #ippair: 1244 | # hash-size: 4096 1245 | # prealloc: 1000 1246 | # memcap: 32mb 1247 | 1248 | 1249 | ## 1250 | ## Performance tuning and profiling 1251 | ## 1252 | 1253 | # The detection engine builds internal groups of signatures. The engine 1254 | # allow us to specify the profile to use for them, to manage memory on an 1255 | # efficient way keeping a good performance. For the profile keyword you 1256 | # can use the words "low", "medium", "high" or "custom". If you use custom 1257 | # make sure to define the values at "- custom-values" as your convenience. 1258 | # Usually you would prefer medium/high/low. 1259 | # 1260 | # "sgh mpm-context", indicates how the staging should allot mpm contexts for 1261 | # the signature groups. "single" indicates the use of a single context for 1262 | # all the signature group heads. "full" indicates a mpm-context for each 1263 | # group head. "auto" lets the engine decide the distribution of contexts 1264 | # based on the information the engine gathers on the patterns from each 1265 | # group head. 1266 | # 1267 | # The option inspection-recursion-limit is used to limit the recursive calls 1268 | # in the content inspection code. For certain payload-sig combinations, we 1269 | # might end up taking too much time in the content inspection code. 1270 | # If the argument specified is 0, the engine uses an internally defined 1271 | # default limit. On not specifying a value, we use no limits on the recursion. 1272 | detect: 1273 | profile: medium 1274 | custom-values: 1275 | toclient-groups: 3 1276 | toserver-groups: 25 1277 | sgh-mpm-context: auto 1278 | inspection-recursion-limit: 3000 1279 | # If set to yes, the loading of signatures will be made after the capture 1280 | # is started. This will limit the downtime in IPS mode. 1281 | #delayed-detect: yes 1282 | 1283 | prefilter: 1284 | # default prefiltering setting. "mpm" only creates MPM/fast_pattern 1285 | # engines. "auto" also sets up prefilter engines for other keywords. 1286 | # Use --list-keywords=all to see which keywords support prefiltering. 1287 | default: mpm 1288 | 1289 | # the grouping values above control how many groups are created per 1290 | # direction. Port whitelisting forces that port to get it's own group. 1291 | # Very common ports will benefit, as well as ports with many expensive 1292 | # rules. 1293 | grouping: 1294 | #tcp-whitelist: 53, 80, 139, 443, 445, 1433, 3306, 3389, 6666, 6667, 8080 1295 | #udp-whitelist: 53, 135, 5060 1296 | 1297 | profiling: 1298 | # Log the rules that made it past the prefilter stage, per packet 1299 | # default is off. The threshold setting determines how many rules 1300 | # must have made it past pre-filter for that rule to trigger the 1301 | # logging. 1302 | #inspect-logging-threshold: 200 1303 | grouping: 1304 | dump-to-disk: false 1305 | include-rules: false # very verbose 1306 | include-mpm-stats: false 1307 | 1308 | # Select the multi pattern algorithm you want to run for scan/search the 1309 | # in the engine. 1310 | # 1311 | # The supported algorithms are: 1312 | # "ac" - Aho-Corasick, default implementation 1313 | # "ac-bs" - Aho-Corasick, reduced memory implementation 1314 | # "ac-cuda" - Aho-Corasick, CUDA implementation 1315 | # "ac-ks" - Aho-Corasick, "Ken Steele" variant 1316 | # "hs" - Hyperscan, available when built with Hyperscan support 1317 | # 1318 | # The default mpm-algo value of "auto" will use "hs" if Hyperscan is 1319 | # available, "ac" otherwise. 1320 | # 1321 | # The mpm you choose also decides the distribution of mpm contexts for 1322 | # signature groups, specified by the conf - "detect.sgh-mpm-context". 1323 | # Selecting "ac" as the mpm would require "detect.sgh-mpm-context" 1324 | # to be set to "single", because of ac's memory requirements, unless the 1325 | # ruleset is small enough to fit in one's memory, in which case one can 1326 | # use "full" with "ac". Rest of the mpms can be run in "full" mode. 1327 | # 1328 | # There is also a CUDA pattern matcher (only available if Suricata was 1329 | # compiled with --enable-cuda: b2g_cuda. Make sure to update your 1330 | # max-pending-packets setting above as well if you use b2g_cuda. 1331 | 1332 | mpm-algo: auto 1333 | 1334 | # Select the matching algorithm you want to use for single-pattern searches. 1335 | # 1336 | # Supported algorithms are "bm" (Boyer-Moore) and "hs" (Hyperscan, only 1337 | # available if Suricata has been built with Hyperscan support). 1338 | # 1339 | # The default of "auto" will use "hs" if available, otherwise "bm". 1340 | 1341 | spm-algo: auto 1342 | 1343 | # Suricata is multi-threaded. Here the threading can be influenced. 1344 | threading: 1345 | set-cpu-affinity: no 1346 | # Tune cpu affinity of threads. Each family of threads can be bound 1347 | # on specific CPUs. 1348 | # 1349 | # These 2 apply to the all runmodes: 1350 | # management-cpu-set is used for flow timeout handling, counters 1351 | # worker-cpu-set is used for 'worker' threads 1352 | # 1353 | # Additionally, for autofp these apply: 1354 | # receive-cpu-set is used for capture threads 1355 | # verdict-cpu-set is used for IPS verdict threads 1356 | # 1357 | cpu-affinity: 1358 | - management-cpu-set: 1359 | cpu: [ 0 ] # include only these cpus in affinity settings 1360 | - receive-cpu-set: 1361 | cpu: [ 0 ] # include only these cpus in affinity settings 1362 | - worker-cpu-set: 1363 | cpu: [ "all" ] 1364 | mode: "exclusive" 1365 | # Use explicitely 3 threads and don't compute number by using 1366 | # detect-thread-ratio variable: 1367 | # threads: 3 1368 | prio: 1369 | low: [ 0 ] 1370 | medium: [ "1-2" ] 1371 | high: [ 3 ] 1372 | default: "medium" 1373 | #- verdict-cpu-set: 1374 | # cpu: [ 0 ] 1375 | # prio: 1376 | # default: "high" 1377 | # 1378 | # By default Suricata creates one "detect" thread per available CPU/CPU core. 1379 | # This setting allows controlling this behaviour. A ratio setting of 2 will 1380 | # create 2 detect threads for each CPU/CPU core. So for a dual core CPU this 1381 | # will result in 4 detect threads. If values below 1 are used, less threads 1382 | # are created. So on a dual core CPU a setting of 0.5 results in 1 detect 1383 | # thread being created. Regardless of the setting at a minimum 1 detect 1384 | # thread will always be created. 1385 | # 1386 | detect-thread-ratio: 1.0 1387 | 1388 | # Luajit has a strange memory requirement, it's 'states' need to be in the 1389 | # first 2G of the process' memory. 1390 | # 1391 | # 'luajit.states' is used to control how many states are preallocated. 1392 | # State use: per detect script: 1 per detect thread. Per output script: 1 per 1393 | # script. 1394 | luajit: 1395 | states: 128 1396 | 1397 | # Profiling settings. Only effective if Suricata has been built with the 1398 | # the --enable-profiling configure flag. 1399 | # 1400 | profiling: 1401 | # Run profiling for every xth packet. The default is 1, which means we 1402 | # profile every packet. If set to 1000, one packet is profiled for every 1403 | # 1000 received. 1404 | #sample-rate: 1000 1405 | 1406 | # rule profiling 1407 | rules: 1408 | 1409 | # Profiling can be disabled here, but it will still have a 1410 | # performance impact if compiled in. 1411 | enabled: yes 1412 | filename: rule_perf.log 1413 | append: yes 1414 | 1415 | # Sort options: ticks, avgticks, checks, matches, maxticks 1416 | sort: avgticks 1417 | 1418 | # Limit the number of items printed at exit (ignored for json). 1419 | limit: 100 1420 | 1421 | # output to json 1422 | json: yes 1423 | 1424 | # per keyword profiling 1425 | keywords: 1426 | enabled: yes 1427 | filename: keyword_perf.log 1428 | append: yes 1429 | 1430 | # per rulegroup profiling 1431 | rulegroups: 1432 | enabled: yes 1433 | filename: rule_group_perf.log 1434 | append: yes 1435 | 1436 | # packet profiling 1437 | packets: 1438 | 1439 | # Profiling can be disabled here, but it will still have a 1440 | # performance impact if compiled in. 1441 | enabled: yes 1442 | filename: packet_stats.log 1443 | append: yes 1444 | 1445 | # per packet csv output 1446 | csv: 1447 | 1448 | # Output can be disabled here, but it will still have a 1449 | # performance impact if compiled in. 1450 | enabled: yes 1451 | filename: packet_stats.csv 1452 | 1453 | # profiling of locking. Only available when Suricata was built with 1454 | # --enable-profiling-locks. 1455 | locks: 1456 | enabled: no 1457 | filename: lock_stats.log 1458 | append: yes 1459 | 1460 | pcap-log: 1461 | enabled: yes 1462 | filename: pcaplog_stats.log 1463 | append: yes 1464 | 1465 | ## 1466 | ## Netfilter integration 1467 | ## 1468 | 1469 | # When running in NFQ inline mode, it is possible to use a simulated 1470 | # non-terminal NFQUEUE verdict. 1471 | # This permit to do send all needed packet to suricata via this a rule: 1472 | # iptables -I FORWARD -m mark ! --mark $MARK/$MASK -j NFQUEUE 1473 | # And below, you can have your standard filtering ruleset. To activate 1474 | # this mode, you need to set mode to 'repeat' 1475 | # If you want packet to be sent to another queue after an ACCEPT decision 1476 | # set mode to 'route' and set next-queue value. 1477 | # On linux >= 3.1, you can set batchcount to a value > 1 to improve performance 1478 | # by processing several packets before sending a verdict (worker runmode only). 1479 | # On linux >= 3.6, you can set the fail-open option to yes to have the kernel 1480 | # accept the packet if suricata is not able to keep pace. 1481 | # bypass mark and mask can be used to implement NFQ bypass. If bypass mark is 1482 | # set then the NFQ bypass is activated. Suricata will set the bypass mark/mask 1483 | # on packet of a flow that need to be bypassed. The Nefilter ruleset has to 1484 | # directly accept all packets of a flow once a packet has been marked. 1485 | nfq: 1486 | # mode: accept 1487 | # repeat-mark: 1 1488 | # repeat-mask: 1 1489 | # bypass-mark: 1 1490 | # bypass-mask: 1 1491 | # route-queue: 2 1492 | # batchcount: 20 1493 | # fail-open: yes 1494 | 1495 | #nflog support 1496 | nflog: 1497 | # netlink multicast group 1498 | # (the same as the iptables --nflog-group param) 1499 | # Group 0 is used by the kernel, so you can't use it 1500 | - group: 2 1501 | # netlink buffer size 1502 | buffer-size: 18432 1503 | # put default value here 1504 | - group: default 1505 | # set number of packet to queue inside kernel 1506 | qthreshold: 1 1507 | # set the delay before flushing packet in the queue inside kernel 1508 | qtimeout: 100 1509 | # netlink max buffer size 1510 | max-size: 20000 1511 | 1512 | ## 1513 | ## Advanced Capture Options 1514 | ## 1515 | 1516 | # general settings affecting packet capture 1517 | capture: 1518 | # disable NIC offloading. It's restored when Suricata exists. 1519 | # Enabled by default 1520 | #disable-offloading: false 1521 | # 1522 | # disable checksum validation. Same as setting '-k none' on the 1523 | # commandline 1524 | #checksum-validation: none 1525 | 1526 | # Netmap support 1527 | # 1528 | # Netmap operates with NIC directly in driver, so you need FreeBSD wich have 1529 | # built-in netmap support or compile and install netmap module and appropriate 1530 | # NIC driver on your Linux system. 1531 | # To reach maximum throughput disable all receive-, segmentation-, 1532 | # checksum- offloadings on NIC. 1533 | # Disabling Tx checksum offloading is *required* for connecting OS endpoint 1534 | # with NIC endpoint. 1535 | # You can find more information at https://github.com/luigirizzo/netmap 1536 | # 1537 | netmap: 1538 | # To specify OS endpoint add plus sign at the end (e.g. "eth0+") 1539 | - interface: eth2 1540 | # Number of receive threads. "auto" uses number of RSS queues on interface. 1541 | #threads: auto 1542 | # You can use the following variables to activate netmap tap or IPS mode. 1543 | # If copy-mode is set to ips or tap, the traffic coming to the current 1544 | # interface will be copied to the copy-iface interface. If 'tap' is set, the 1545 | # copy is complete. If 'ips' is set, the packet matching a 'drop' action 1546 | # will not be copied. 1547 | # To specify the OS as the copy-iface (so the OS can route packets, or forward 1548 | # to a service running on the same machine) add a plus sign at the end 1549 | # (e.g. "copy-iface: eth0+"). Don't forget to set up a symmetrical eth0+ -> eth0 1550 | # for return packets. Hardware checksumming must be *off* on the interface if 1551 | # using an OS endpoint (e.g. 'ifconfig eth0 -rxcsum -txcsum -rxcsum6 -txcsum6' for FreeBSD 1552 | # or 'ethtool -K eth0 tx off rx off' for Linux). 1553 | #copy-mode: tap 1554 | #copy-iface: eth3 1555 | # Set to yes to disable promiscuous mode 1556 | # disable-promisc: no 1557 | # Choose checksum verification mode for the interface. At the moment 1558 | # of the capture, some packets may be with an invalid checksum due to 1559 | # offloading to the network card of the checksum computation. 1560 | # Possible values are: 1561 | # - yes: checksum validation is forced 1562 | # - no: checksum validation is disabled 1563 | # - auto: suricata uses a statistical approach to detect when 1564 | # checksum off-loading is used. 1565 | # Warning: 'checksum-validation' must be set to yes to have any validation 1566 | #checksum-checks: auto 1567 | # BPF filter to apply to this interface. The pcap filter syntax apply here. 1568 | #bpf-filter: port 80 or udp 1569 | #- interface: eth3 1570 | #threads: auto 1571 | #copy-mode: tap 1572 | #copy-iface: eth2 1573 | # Put default values here 1574 | - interface: default 1575 | 1576 | # PF_RING configuration. for use with native PF_RING support 1577 | # for more info see http://www.ntop.org/products/pf_ring/ 1578 | pfring: 1579 | - interface: eth0 1580 | # Number of receive threads (>1 will enable experimental flow pinned 1581 | # runmode) 1582 | threads: 1 1583 | 1584 | # Default clusterid. PF_RING will load balance packets based on flow. 1585 | # All threads/processes that will participate need to have the same 1586 | # clusterid. 1587 | cluster-id: 99 1588 | 1589 | # Default PF_RING cluster type. PF_RING can load balance per flow. 1590 | # Possible values are cluster_flow or cluster_round_robin. 1591 | cluster-type: cluster_flow 1592 | # bpf filter for this interface 1593 | #bpf-filter: tcp 1594 | # Choose checksum verification mode for the interface. At the moment 1595 | # of the capture, some packets may be with an invalid checksum due to 1596 | # offloading to the network card of the checksum computation. 1597 | # Possible values are: 1598 | # - rxonly: only compute checksum for packets received by network card. 1599 | # - yes: checksum validation is forced 1600 | # - no: checksum validation is disabled 1601 | # - auto: suricata uses a statistical approach to detect when 1602 | # checksum off-loading is used. (default) 1603 | # Warning: 'checksum-validation' must be set to yes to have any validation 1604 | #checksum-checks: auto 1605 | # Second interface 1606 | #- interface: eth1 1607 | # threads: 3 1608 | # cluster-id: 93 1609 | # cluster-type: cluster_flow 1610 | # Put default values here 1611 | - interface: default 1612 | #threads: 2 1613 | 1614 | # For FreeBSD ipfw(8) divert(4) support. 1615 | # Please make sure you have ipfw_load="YES" and ipdivert_load="YES" 1616 | # in /etc/loader.conf or kldload'ing the appropriate kernel modules. 1617 | # Additionally, you need to have an ipfw rule for the engine to see 1618 | # the packets from ipfw. For Example: 1619 | # 1620 | # ipfw add 100 divert 8000 ip from any to any 1621 | # 1622 | # The 8000 above should be the same number you passed on the command 1623 | # line, i.e. -d 8000 1624 | # 1625 | ipfw: 1626 | 1627 | # Reinject packets at the specified ipfw rule number. This config 1628 | # option is the ipfw rule number AT WHICH rule processing continues 1629 | # in the ipfw processing system after the engine has finished 1630 | # inspecting the packet for acceptance. If no rule number is specified, 1631 | # accepted packets are reinjected at the divert rule which they entered 1632 | # and IPFW rule processing continues. No check is done to verify 1633 | # this will rule makes sense so care must be taken to avoid loops in ipfw. 1634 | # 1635 | ## The following example tells the engine to reinject packets 1636 | # back into the ipfw firewall AT rule number 5500: 1637 | # 1638 | # ipfw-reinjection-rule-number: 5500 1639 | 1640 | 1641 | napatech: 1642 | # The Host Buffer Allowance for all streams 1643 | # (-1 = OFF, 1 - 100 = percentage of the host buffer that can be held back) 1644 | hba: -1 1645 | 1646 | # use_all_streams set to "yes" will query the Napatech service for all configured 1647 | # streams and listen on all of them. When set to "no" the streams config array 1648 | # will be used. 1649 | use-all-streams: yes 1650 | 1651 | # The streams to listen on 1652 | streams: [1, 2, 3] 1653 | 1654 | # Tilera mpipe configuration. for use on Tilera TILE-Gx. 1655 | mpipe: 1656 | 1657 | # Load balancing modes: "static", "dynamic", "sticky", or "round-robin". 1658 | load-balance: dynamic 1659 | 1660 | # Number of Packets in each ingress packet queue. Must be 128, 512, 2028 or 65536 1661 | iqueue-packets: 2048 1662 | 1663 | # List of interfaces we will listen on. 1664 | inputs: 1665 | - interface: xgbe2 1666 | - interface: xgbe3 1667 | - interface: xgbe4 1668 | 1669 | 1670 | # Relative weight of memory for packets of each mPipe buffer size. 1671 | stack: 1672 | size128: 0 1673 | size256: 9 1674 | size512: 0 1675 | size1024: 0 1676 | size1664: 7 1677 | size4096: 0 1678 | size10386: 0 1679 | size16384: 0 1680 | 1681 | ## 1682 | ## Hardware accelaration 1683 | ## 1684 | 1685 | # Cuda configuration. 1686 | cuda: 1687 | # The "mpm" profile. On not specifying any of these parameters, the engine's 1688 | # internal default values are used, which are same as the ones specified in 1689 | # in the default conf file. 1690 | mpm: 1691 | # The minimum length required to buffer data to the gpu. 1692 | # Anything below this is MPM'ed on the CPU. 1693 | # Can be specified in kb, mb, gb. Just a number indicates it's in bytes. 1694 | # A value of 0 indicates there's no limit. 1695 | data-buffer-size-min-limit: 0 1696 | # The maximum length for data that we would buffer to the gpu. 1697 | # Anything over this is MPM'ed on the CPU. 1698 | # Can be specified in kb, mb, gb. Just a number indicates it's in bytes. 1699 | data-buffer-size-max-limit: 1500 1700 | # The ring buffer size used by the CudaBuffer API to buffer data. 1701 | cudabuffer-buffer-size: 500mb 1702 | # The max chunk size that can be sent to the gpu in a single go. 1703 | gpu-transfer-size: 50mb 1704 | # The timeout limit for batching of packets in microseconds. 1705 | batching-timeout: 2000 1706 | # The device to use for the mpm. Currently we don't support load balancing 1707 | # on multiple gpus. In case you have multiple devices on your system, you 1708 | # can specify the device to use, using this conf. By default we hold 0, to 1709 | # specify the first device cuda sees. To find out device-id associated with 1710 | # the card(s) on the system run "suricata --list-cuda-cards". 1711 | device-id: 0 1712 | # No of Cuda streams used for asynchronous processing. All values > 0 are valid. 1713 | # For this option you need a device with Compute Capability > 1.0. 1714 | cuda-streams: 2 1715 | 1716 | ## 1717 | ## Include other configs 1718 | ## 1719 | 1720 | # Includes. Files included here will be handled as if they were 1721 | # inlined in this configuration file. 1722 | #include: include1.yaml 1723 | #include: include2.yaml 1724 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Forensics 2 | Open Source forensic scripts and code produced by the CrowdStrike Services team. 3 | -------------------------------------------------------------------------------- /forensic-docs/X-Ways_Cheat_Sheet.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CrowdStrike/Forensics/6d6a2aa2b9d023667d45cdaaa822e408ac058e06/forensic-docs/X-Ways_Cheat_Sheet.pdf -------------------------------------------------------------------------------- /sysmon/sysmon_keywords.txt: -------------------------------------------------------------------------------- 1 | \|whoami\> 2 | \|ping\> 3 | \|net\> 4 | \|net1\> 5 | \|dsquery\> 6 | \|dsget\> 7 | \|tasklist\> 8 | \|quser\> 9 | \|cacls\> 10 | \|powershell\> 11 | \|wsmprovhost\> 12 | .*\.ps1\> 13 | .*\.cmd\> 14 | .*\.bat\> 15 | .*\.rar\> -------------------------------------------------------------------------------- /sysmon/sysmon_parse.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | CLS 3 | REM ------------------------------------------------------------------------------------------------------------------------ 4 | REM Sysmon_Parse v0.1 5 | REM by Matt Churchill (matt.churchill@crowdstrike.com) 6 | REM 7 | REM About: 8 | REM Script to automate parsing of the event log for Microsoft's Sysinternal tool Sysmon. 9 | REM Sysmon Download and Info: http://technet.microsoft.com/en-us/sysinternals/dn798348 10 | REM 11 | REM System Requirements: Python 2.7, .NET 4.5 12 | REM 13 | REM Additional Tools Needed: 14 | REM Microsoft Log Parser, http://www.microsoft.com/en-us/download/details.aspx?id=24659 15 | REM TekCollect.py, http://www.tekdefense.com/tekcollect/ 16 | REM IPNetInfo, http://www.nirsoft.net/utils/ipnetinfo.html 17 | REM virustotalchecker, http://www.woanware.co.uk/forensics/virustotalchecker.html 18 | REM 19 | REM Folder Structure: 20 | REM - Main Directory containing sysmon_parse.cmd and sysmon_keywords.txt 21 | REM - tools 22 | REM - ipnetinfo 23 | REM - logparser 24 | REM - tekcollect 25 | REM - virustotalchecker 26 | REM ------------------------------------------------------------------------------------------------------------------------ 27 | REM Syntax: 28 | REM sysmon_parse.cmd (file name optional) 29 | REM 30 | REM If no file name is supplied, the script will copy the Sysmon Event Log from the current running system. 31 | REM The Sysmon Event Log is stored here: C:\Windows\System32\winevt\Logs\Microsoft-Windows-Sysmon%4Operational.evtx 32 | REM The option to supply an event log is given to parse logs from other machines. 33 | REM 34 | REM ------------------------------------------------------------------------------------------------------------------------ 35 | REM Setup: 36 | REM Set base folder, set input source file if needed, and make Results directory. 37 | SET scriptlocation=%~dp0 38 | SET src=%1 39 | SET dtstamp=%date:~-4%%date:~4,2%%date:~7,2% 40 | mkdir Results_%dtstamp% 41 | IF EXIST %src% GOTO parse 42 | REM 43 | REM ------------------------------------------------------------------------------------------------------------------------ 44 | REM Copy Sysmon Event Log from running system. 45 | robocopy C:\Windows\system32\winevt\Logs %scriptlocation%Results_%dtstamp% *sysmon*.evtx 46 | SET src=%scriptlocation%Results_%dtstamp%\*.evtx 47 | REM 48 | REM ------------------------------------------------------------------------------------------------------------------------ 49 | REM Parse Sysmon Event Log to flat text file. 50 | CLS 51 | :parse 52 | REM Parse Sysmon Event Log using Microsoft's Logparser 53 | REM http://www.microsoft.com/en-us/download/details.aspx?id=24659 54 | ECHO Sysmon Log Copied, Now Parsing 55 | tools\logparser\logparser -i:evt -o:csv "Select RecordNumber,TO_UTCTIME(TimeGenerated),EventID,SourceName,ComputerName,SID,Strings from %src% WHERE EventID in ('1';'2';'3';'4';'5';'6';'7';'8')" > Results_%dtstamp%\sysmon_parsed.txt 56 | REM 57 | REM ------------------------------------------------------------------------------------------------------------------------ 58 | REM Remove copied Sysmon Event Log as it is no longer needed. 59 | del %scriptlocation%Results_%dtstamp%\*.evtx 60 | REM 61 | REM ------------------------------------------------------------------------------------------------------------------------ 62 | REM Extract Information from text file using TekDefense's tekcollect.py tool. 63 | REM http://www.tekdefense.com/tekcollect/ 64 | ECHO ..Log Parsed, Now Extracting Data 65 | cd tools\tekcollect 66 | python tekcollect.py -f %scriptlocation%Results_%dtstamp%\sysmon_parsed.txt -t MD5 > %scriptlocation%Results_%dtstamp%\MD5Hashes.txt 67 | python tekcollect.py -f %scriptlocation%Results_%dtstamp%\sysmon_parsed.txt -t SHA1 > %scriptlocation%Results_%dtstamp%\Sha1Hashes.txt 68 | python tekcollect.py -f %scriptlocation%Results_%dtstamp%\sysmon_parsed.txt -t SHA256 > %scriptlocation%Results_%dtstamp%\SHA256Hashes.txt 69 | python tekcollect.py -f %scriptlocation%Results_%dtstamp%\sysmon_parsed.txt -t domain > %scriptlocation%Results_%dtstamp%\Domains.txt 70 | python tekcollect.py -f %scriptlocation%Results_%dtstamp%\sysmon_parsed.txt -t url > %scriptlocation%Results_%dtstamp%\URL.txt 71 | ECHO begin > %scriptlocation%Results_%dtstamp%\IPs.txt 72 | ECHO verbose >> %scriptlocation%Results_%dtstamp%\IPs.txt 73 | python tekcollect.py -f %scriptlocation%Results_%dtstamp%\sysmon_parsed.txt -t ip4 >> %scriptlocation%Results_%dtstamp%\IPs.txt 74 | ECHO end >> %scriptlocation%Results_%dtstamp%\IPs.txt 75 | python tekcollect.py -f %scriptlocation%Results_%dtstamp%\sysmon_parsed.txt -t exe > %scriptlocation%Results_%dtstamp%\Executables.txt 76 | REM 77 | REM ------------------------------------------------------------------------------------------------------------------------ 78 | REM This section uses a list of terms contained in "sysmon_keywords.txt" to search the parsed Sysmon results 79 | REM and add them to a separate CSV file. Add any extra terms to sysmon_keywords.txt in order to search for them here. 80 | cd %scriptlocation% 81 | Echo ....Searching for Keywords 82 | findstr /G:sysmon_keywords.txt %scriptlocation%Results_%dtstamp%\sysmon_parsed.txt >> %scriptlocation%Results_%dtstamp%\Keywords.csv 83 | REM 84 | REM ------------------------------------------------------------------------------------------------------------------------ 85 | REM Begin some automated analysis on extracted data 86 | ECHO ......Launching Analysis Tools 87 | REM ------------------------------------------------------------------------------------------------------------------------ 88 | REM This section left in script for those who want to send lookups through Cymru. The results will be faster, stored in a text file, but not as robust as with IPNetInfo. 89 | REM https://www.team-cymru.org/Services/ip-to-asn.html#whois 90 | REM Results are saved to delimited file and can be sorted by Country, Owner Name, Resolved Name, etc. 91 | REM cd %scriptlocation%tools\nc 92 | REM ncat.exe whois.cymru.com 43 < %scriptlocation%Results_%dtstamp%\IPs.txt > %scriptlocation%Results_%dtstamp%\Whois_Results.txt 93 | REM 94 | REM ------------------------------------------------------------------------------------------------------------------------ 95 | REM Using NirSoft's IPNetInfo GUI Tool for bulk Whois 96 | REM http://www.nirsoft.net/utils/ipnetinfo.html 97 | REM A large number of IPs on intial import may cause the tool to abort and lookups need to be completed manually by breaking into sections or reverting to using the above WhoIs lookup tool. 98 | REM Results can be saved to CSV and sorted by Country, Owner Name, Resolved Name, etc. 99 | START %scriptlocation%tools\ipnetinfo\ipnetinfo.exe /ipfile %scriptlocation%Results_%dtstamp%\IPs.txt 100 | REM 101 | REM ------------------------------------------------------------------------------------------------------------------------ 102 | REM Using woanware's virustotalchecker 103 | REM http://www.woanware.co.uk/forensics/virustotalchecker.html 104 | REM Remember to add your VirusTotal API Key to the Settings.xml file. 105 | REM Sysmon can track MD5, SHA1, or SHA256 hashes. Be sure to pick the hash file Sysmon is using. In this example it is SHA256. 106 | cd %scriptlocation%tools\virustotalchecker 107 | START virustotalchecker.exe -m c -f %scriptlocation%Results_%dtstamp%\MD5Hashes.txt -o %scriptlocation%Results_%dtstamp% 108 | REM 109 | REM ------------------------------------------------------------------------------------------------------------------------ 110 | cd %scriptlocation% 111 | REM End script 112 | ECHO ........Check Parsing Output! 113 | ECHO Analysis Tip: Open "sysmon_parsed.txt" with Excel, delimited file by "," and "|". 114 | ECHO For Event Type 1 (New Process Created), check Column K for Command Line used. 115 | -------------------------------------------------------------------------------- /ual-analyzer/README.md: -------------------------------------------------------------------------------- 1 | # Unified Audit Log (UAL) Analyzer 2 | 3 | ## Description 4 | 5 | This tool parses, normalizes, and enriches event data from the Office 365 Unified Audit Log. 6 | 7 | ### Compatibility 8 | 9 | This tool requires Python 3.6 or later. 10 | 11 | ### Requirements 12 | 13 | - geoip2 14 | 15 | 16 | ### Usage 17 | 18 | ``` 19 | usage: ual-analyzer.py --input --output 20 | 21 | --input CSV input file (UAL) 22 | --output CSV output file 23 | ``` 24 | -------------------------------------------------------------------------------- /ual-analyzer/databases/geoip/COPYRIGHT.txt: -------------------------------------------------------------------------------- 1 | Database and Contents Copyright (c) 2018 MaxMind, Inc. 2 | -------------------------------------------------------------------------------- /ual-analyzer/databases/geoip/GeoLite2-City.mmdb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CrowdStrike/Forensics/6d6a2aa2b9d023667d45cdaaa822e408ac058e06/ual-analyzer/databases/geoip/GeoLite2-City.mmdb -------------------------------------------------------------------------------- /ual-analyzer/databases/geoip/LICENSE.txt: -------------------------------------------------------------------------------- 1 | This work is licensed under the Creative Commons Attribution-ShareAlike 4.0 International License. To view a copy of this license, visit http://creativecommons.org/licenses/by-sa/4.0/. 2 | 3 | This database incorporates GeoNames [http://www.geonames.org] geographical data, which is made available under the Creative Commons Attribution 3.0 License. To view a copy of this license, visit http://www.creativecommons.org/licenses/by/3.0/us/. 4 | -------------------------------------------------------------------------------- /ual-analyzer/databases/geoip/README.txt: -------------------------------------------------------------------------------- 1 | Latitude and longitude are not precise and should not be used to identify a particular street address or household. 2 | -------------------------------------------------------------------------------- /ual-analyzer/plugins/enrichers.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import geoip2.database 4 | 5 | 6 | class Enricher(object): 7 | 8 | enrichers = [] 9 | 10 | def __init_subclass__(cls, **kwargs): 11 | if cls.enabled: 12 | super().__init_subclass__(**kwargs) 13 | cls.enrichers.append(cls()) 14 | 15 | 16 | class Geolocation(Enricher): 17 | 18 | enabled = True 19 | db_path = 'databases/geoip/GeoLite2-City.mmdb' 20 | 21 | def __init__(self): 22 | self.reader = geoip2.database.Reader(self.db_path) 23 | 24 | def check(self, event): 25 | return True if event.get('Client_IP') else False 26 | 27 | def run(self, event): 28 | enriched_event = event 29 | try: 30 | enriched_event['Country'] = self.reader.city(enriched_event['Client_IP']).country.name 31 | enriched_event['Region'] = self.reader.city(enriched_event['Client_IP']).subdivisions.most_specific.name 32 | enriched_event['City'] = self.reader.city(enriched_event['Client_IP']).city.name 33 | except (ValueError, geoip2.errors.AddressNotFoundError): 34 | pass 35 | return enriched_event 36 | -------------------------------------------------------------------------------- /ual-analyzer/plugins/parsers.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import ipaddress 4 | import json 5 | import sys 6 | 7 | 8 | class Parser(object): 9 | 10 | parsers = [] 11 | 12 | def __init_subclass__(cls, **kwargs): 13 | super().__init_subclass__(**kwargs) 14 | cls.parsers.append(cls()) 15 | 16 | 17 | class MailboxActions(Parser): 18 | 19 | operations = ['Copy', 'Create', 'FolderBind', 'HardDelete', 'MailboxLogin', 20 | 'MessageBind', 'Move', 'MoveToDeletedItems', 'SendAs', 21 | 'SendOnBehalf', 'SoftDelete', 'Update', 22 | 'UpdateCalendarDelegation', 'UpdateFolderPermissions', 23 | 'UpdateInboxRules', 'New-InboxRule'] 24 | 25 | def check(self, event): 26 | return True if event['Operation'] in self.operations else False 27 | 28 | def run(self, event): 29 | if event.get('ClientInfoString'): 30 | client_type, user_agent = self._get_client_info(event['ClientInfoString']) 31 | else: 32 | client_type, user_agent = None, None 33 | if event.get('ClientIPAddress'): 34 | ip_address = event['ClientIPAddress'] 35 | else: 36 | ip_address = None 37 | parsed_event = {'Time': event['CreationTime'], 38 | 'Action': event['Operation'], 39 | 'Workload': event['Workload'], 40 | 'User': event['UserId'], 41 | 'Status': event['ResultStatus'], 42 | 'Client_IP': ip_address, 43 | 'Client_Type': client_type, 44 | 'User_Agent': user_agent, 45 | 'Data': event} 46 | return parsed_event 47 | 48 | def _get_client_info(self, clientinfostring): 49 | strings = clientinfostring.split(';') 50 | if any(element in strings[0] for element in ['/owa/', '/ecp/']): 51 | client_type = 'Web' 52 | elif strings[0].startswith('Client'): 53 | client_type = strings[0].split('=')[1] 54 | else: 55 | client_type = strings[0] 56 | user_agent = ';'.join(strings[1:]).strip() 57 | return client_type, user_agent 58 | 59 | 60 | class ForwardingRule(Parser): 61 | 62 | def check(self, event): 63 | if event.get('ExtendedProperties', {}).get('ForwardingSmtpAddress'): 64 | return True 65 | else: 66 | return False 67 | 68 | def run(self, event): 69 | client_ip = event['ClientIP'].split(':')[0] 70 | parsed_event = {'Time': event['CreationTime'], 71 | 'Action': 'ForwardingRule', 72 | 'Workload': event['Workload'], 73 | 'User': event['UserId'], 74 | 'Status': event['ResultStatus'], 75 | 'Address': event['ExtendedProperties']['ForwardingSmtpAddress'].split(':')[1], 76 | 'Client_IP': client_ip, 77 | 'Data': event} 78 | return parsed_event 79 | 80 | 81 | class Default(Parser): 82 | 83 | def check(self, event): 84 | return True 85 | 86 | def run(self, event): 87 | parsed_event = {'Time': event['CreationTime'], 88 | 'Action': event['Operation'], 89 | 'Workload': event['Workload'], 90 | 'User': event['UserId'], 91 | 'Data': event} 92 | 93 | if event.get('ClientIP'): 94 | try: 95 | ipaddress.ip_address(event.get('ClientIP')) 96 | client_ip = event['ClientIP'] 97 | except ValueError: 98 | if event.get('ClientIP').startswith('['): 99 | client_ip = event.get('ClientIP').split(']')[0][1:] 100 | else: 101 | client_ip = event['ClientIP'].split(':')[0] 102 | parsed_event['Client_IP'] = client_ip 103 | 104 | if event.get('ResultStatus'): 105 | parsed_event['Status'] = event.get('ResultStatus') 106 | 107 | if event.get('ExtendedProperties', {}).get('UserAgent'): 108 | parsed_event['User_Agent'] = event['ExtendedProperties']['UserAgent'] 109 | 110 | return parsed_event 111 | -------------------------------------------------------------------------------- /ual-analyzer/ual-analyzer.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import argparse 4 | import csv 5 | import json 6 | 7 | from plugins.enrichers import Enricher 8 | from plugins.parsers import Parser 9 | 10 | 11 | WORKLOADS = ['Exchange', 'AzureActiveDirectory'] 12 | 13 | EXCLUDED_USERS = ['S-1-5-18', 'NT AUTHORITY\SYSTEM', 'DevilFish', 14 | 'Microsoft Operator'] 15 | 16 | OUTPUT_FIELDS = ['Time', 'Action', 'Workload', 'User', 'Status', 'Client_IP', 17 | 'Client_Type', 'User_Agent', 'Address', 'Country', 'Region', 18 | 'City', 'Data'] 19 | 20 | arg_parser = argparse.ArgumentParser() 21 | arg_parser.add_argument('--input', required=True) 22 | arg_parser.add_argument('--output', required=True) 23 | args = arg_parser.parse_args() 24 | 25 | events = [] 26 | 27 | # Open input file 28 | with open(args.input) as input_file: 29 | reader = csv.DictReader(input_file) 30 | for row in reader: 31 | event = {} 32 | 33 | try: 34 | audit_data = json.loads(row['AuditData']) 35 | except json.decoder.JSONDecodeError: 36 | continue 37 | 38 | if WORKLOADS and not audit_data.get('Workload') in WORKLOADS: 39 | continue 40 | 41 | if any(user in row['UserIds'] for user in EXCLUDED_USERS): 42 | continue 43 | 44 | # Normalize event 45 | for prop, value in audit_data.items(): 46 | 47 | if prop in ['Parameters', 'ExtendedProperties'] \ 48 | and type(value) is list: 49 | for extended_prop in value: 50 | event.setdefault('ExtendedProperties', {})[ 51 | extended_prop['Name']] = extended_prop['Value'] 52 | else: 53 | event[prop] = value 54 | 55 | # Parse event 56 | for parser in Parser.parsers: 57 | if parser.check(event): 58 | event = parser.run(event) 59 | break 60 | 61 | # Enrich event 62 | for enricher in Enricher.enrichers: 63 | if enricher.check(event): 64 | event = enricher.run(event) 65 | 66 | # Add event to list 67 | events.append(event) 68 | 69 | # Sort events 70 | events = sorted(events, key=lambda k: k['Time']) 71 | 72 | # Open output file 73 | with open(args.output, 'w+', encoding='utf-8-sig') as output_file: 74 | writer = csv.DictWriter(output_file, extrasaction='ignore', 75 | fieldnames=OUTPUT_FIELDS, lineterminator='\n') 76 | writer.writeheader() 77 | 78 | # Write events to output file 79 | for event in events: 80 | writer.writerow(event) 81 | -------------------------------------------------------------------------------- /vshot/vshot: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # vshot - The Volatile Shotgun 3 | # by Devin Gergen (devin.gergen@gmail.com) 4 | version="4.01" 5 | 6 | #updates to come 7 | # 1) extended volatilty tools 8 | # 2) profile specific tools 9 | # 3) -oshm option to use /dev/shm for ouput only (do not copy the memory image file to /dev/shm) 10 | # 4) timeline support 11 | # 5) resume interrupted image processing 12 | 13 | ### CONFIGURATION ########################################################################################## 14 | ## Path to volatility ## 15 | volpath="/usr/bin/volatility" 16 | ## Path to bulk_extractor ## 17 | ## If you do not wish to have bulk_extractor on your system replace the path with an application that exists 18 | ## - bulkpath="/bin/echo" 19 | bulkpath="/usr/local/bin/bulk_extractor" 20 | ############################################################################################################ 21 | 22 | ## Global Vars ## 23 | img_locat= 24 | dst_fldr= 25 | img_info= 26 | preprnd=$RANDOM$RANDOM$RANDOM 27 | showtime=0 28 | runstrings=0 29 | runbulk=0 30 | pcount= 31 | kdbg= 32 | nokgdb= 33 | 34 | ## errors ## 35 | err(){ 36 | err_ary=( "File not found... $2" "Folder not found... $2" "Unrecognized option... use \"-h\" for more details" "Argument option must not start with a \"-\" ... " "Path to $2 not found... Please check script configuration." "Single Processor Detected ~ \"-mt\" option unavailable" "KDBG option invalid format (-h for details): $2" ) 37 | echo Error: ${err_ary[$1]};exit 38 | } 39 | 40 | #checks for tool existance 41 | varary=( "$volpath" "$bulkpath" ) 42 | for v in ${varary[@]};do if [[ $(which "$v" ) ]]; then : ;else err 4 $v ; fi ;done 43 | 44 | 45 | ## default array of volatility modules to be run 46 | toolarray=( pslist psscan psxview pstree dlllist filescan handles hivelist connections connscan clipboard consoles cmdscan cmdline hashdump getsids mutantscan modules shimcache malfind ldrmodules modscan driverscan driverirp sockets sockscan shellbags netscan threads thrdscan ssdt userassist mbrparser mftparser svcscan atoms atomscan callbacks deskscan devicetree envars userhandles privs ) 47 | 48 | #GENERAL TOOLS Not in toolarray ## 49 | #bioskbd crashinfo eventhooks evtlogs gahti gditimers gdt getservicesids hibinfo hivedump hivescan hpakextract hpakinfo idt iehistory imagecopy impscan kdbgscan kpcrscan ldrmodules lsadump machoinfo memdump memmap messagehooks moddump patcher printkey procexedump procmemdump raw2dmp screenshot sessions strings svcscan symlinkscan timeliner timers unloadedmodules vaddump vadinfo vadtree vadwalk vboxinfo vmwareinfo volshell windows wintree wndscan yarascan ) 50 | 51 | ##dumpdir required ## 52 | # dlldump dumpcerts dumpfiles 53 | 54 | ## WIN7/VISTA ONLY ## 55 | # netscan 56 | 57 | #addthese back to the list later 58 | # apihooks 59 | # pstree -v 60 | # threads -L 61 | 62 | 63 | kdbgset(){ 64 | kregchk='^.*[L]$' #regex to check default KDBG string (volatility doesn't process it with the L at the end) 65 | kregchk_two='^0x[0-9a-fA-F]*[^L]$' #regex to check a correct format for KDBG 66 | if [[ $1 =~ $kregchk ]];then 67 | err 6 $1 68 | else 69 | if [[ $1 =~ $kregchk_two ]];then 70 | kdbg=$1 71 | else 72 | err 6 $1 73 | fi 74 | fi 75 | } 76 | 77 | get_imginfo(){ 78 | echo Scanning image for profile id... 79 | $volpath -f $img_locat imageinfo | tee $dst_fldr/imageinfo.txt 80 | echo -n Please enter your image profile:\ ;read img_info 81 | if [[ $nokgdb -eq 1 ]];then :;else echo -n Please enter your KDBG value:\ ;read kdbgr; kdbgset $kdbgr;fi 82 | } 83 | 84 | get_imglocat(){ 85 | echo -n Please enter the full path to your memory image:\ ;read img_locat 86 | if [ -f $img_locat ];then 87 | : 88 | else 89 | err 0 $img_locat 90 | fi 91 | } 92 | 93 | get_dstfldr(){ 94 | echo -n Please enter the full path to your destination directory:\ ;read dst_fldr 95 | if [ -d $dst_fldr ];then 96 | : 97 | else 98 | err 1 $dst_fldr 99 | fi 100 | } 101 | 102 | get_loc(){ 103 | get_imglocat 104 | get_dstfldr 105 | get_imginfo 106 | process_image $img_locat $dst_fldr 107 | } 108 | 109 | strngsrchz(){ 110 | strng_arry=( l b L B ) 111 | echo -n Running: strings... 112 | strings -t d $1 | awk '{printf $1":";for (i = 2; i <= NF; i++) printf "%s ",$i;print " "}' >> $2/strings.txt 2> /dev/null 113 | for i in ${strng_arry[@]};do 114 | strings -e $i -t d $1 | awk '{printf $1":";for (i = 2; i <= NF; i++) printf "%s ",$i;print " "}' >> $2/strings.txt 2> /dev/null 115 | done 116 | $volpath -f $1 --profile=$img_info strings -s $2/strings.txt > $2/vol_strings.txt 2> /dev/null 117 | echo DONE 118 | } 119 | 120 | #cal process_image /path/toprocess dst_fldr 121 | process_image(){ 122 | 123 | if [ $showtime -eq 1 ];then date;fi #shows time if -time argument is used 124 | 125 | cdbg= 126 | if [ $kdbg ];then cdbg="-g $kdbg";fi #if $kdbg has content, the kdbgz variable will be set for use in command execution 127 | 128 | if [ $runbulk -eq 1 ];then #runs bulk_extractor if -b argument is used 129 | $bulkpath -e wordlist -m 5 -o $2/bulk_ex_results $1 &> /dev/null & 130 | echo Started bulk_extractor... 131 | fi 132 | 133 | if [[ $mthreads -eq 1 ]];then #multithreading check 134 | 135 | trackfile=/dev/shm/$RANDOM$RANDOM$RANDOM.trackfile 136 | #========================== 137 | processor_array=( $(for i in $(seq 1 $pcount);do echo -n "0 ";done)) 138 | acount=0 139 | 140 | while [ $acount -lt ${#toolarray[@]} ];do 141 | for i in $(seq 1 ${#processor_array[@]});do 142 | if [ -f $trackfile.$i ];then 143 | processor_array[$(($i-1))]=$(cat $trackfile.$i) 144 | fi 145 | if [[ "${processor_array[$(($i-1))]}" == "0" ]];then 146 | #The "taskset" tool assigns the volatility process to a specific CPU 147 | #taskset -c $i volatiltiy... 148 | # $i is the CPU 149 | echo "taskset -c $i $volpath -f $1 --profile=$img_info ${toolarray[$acount]} $cdbg > $2/${toolarray[$acount]}.txt 2> /dev/null" > $trackfile.$i.command 150 | echo "echo -n ${toolarray[$acount]} completed at: " >> $trackfile.$i.command 151 | echo "date" >> $trackfile.$i.command 152 | echo "echo 0 > $trackfile.$i" >> $trackfile.$i.command 153 | source $trackfile.$i.command & 154 | acount=$(( $acount + 1 )) 155 | echo 1 > $trackfile.$i 156 | fi 157 | done 158 | sleep 0.3 159 | done 160 | 161 | vchk=$(ps -ef | grep $volpath| wc -l) 162 | while [ $vchk -gt 1 ];do 163 | echo Waiting for volatility to finish... 164 | sleep 10 165 | vchk=$(ps -ef | grep $volpath| wc -l) 166 | done 167 | 168 | rm $trackfile.* 169 | 170 | #========================== 171 | else #non multithreading processing 172 | for i in $(seq 0 $(echo ${#toolarray[@]}-1|bc));do 173 | echo -n Running: ${toolarray[$i]}...\ 174 | $volpath -f $1 --profile=$img_info ${toolarray[$i]} $kgdbz > $2/${toolarray[$i]}.txt 2> /dev/null 175 | echo DONE 176 | done 177 | fi 178 | 179 | if [ $runstrings -eq 1 ];then strngsrchz $1 $2; fi 180 | 181 | bschk=$(ps -ef | grep bulk_extractor| wc -l) #bschk variable initialized for first check in "while" loop 182 | while [ $bschk -gt 1 ];do 183 | echo Waiting for bulk_extractor to finish... 184 | sleep 10 185 | bschk=$(ps -ef | grep bulk_extractor| wc -l) 186 | done 187 | 188 | if [ $showtime -eq 1 ];then date;fi 189 | 190 | } 191 | 192 | chk_shm(){ 193 | avail_shm=$(df -B 1 /dev/shm | tail -1 |awk '{print $4}') 194 | img_size=$(echo 2*$(ls -l $img_locat | awk '{print $5}')|bc) 195 | if [[ $1 == "ns" ]];then 196 | process_image $img_locat $dst_fldr 197 | else 198 | if [ $avail_shm -gt $img_size ];then 199 | echo shm space available... processing in shm... 200 | mkdir /dev/shm/$preprnd 201 | cp $img_locat /dev/shm/$preprnd 202 | img_locat=/dev/shm/$preprnd/$(ls /dev/shm/$preprnd/) 203 | mkdir /dev/shm/$preprnd/results 204 | process_image $img_locat /dev/shm/$preprnd/results 205 | 206 | #move results to destination dir 207 | mv /dev/shm/$preprnd/results/* $dst_fldr 208 | #cleanup 209 | rm -rf /dev/shm/$preprnd/ 210 | else 211 | echo Not enough space to use shm... 212 | echo You can resize shm by: sudo mount -o remount,size=4g /dev/shm 213 | echo note: The size is adjustable... ctrl+c to exit or enter to continue with disk based processing...;read pause 214 | process_image $img_locat $dst_fldr 215 | fi 216 | fi 217 | } 218 | 219 | usage(){ 220 | echo -e " 221 | vshot v$version - The Volatile Shotgun by Devin Gergen 222 | 223 | vshot usage: 224 | -h help dialogue 225 | -shm attempt to use /dev/shm for processing 226 | -p volatility profile to use: -p Win7SP1x64 227 | - If no profile is given, vshot will run imageinfo 228 | and prompt user for profile and KDBG. 229 | -i memory image location: -i /cases/case/mem.img 230 | - If not provided, vshot will prompt for user input. 231 | -d destination directory: -d /cases/case/memory_analysis 232 | - If not provided, vshot will prompt for user input. 233 | -time display the start and end times for entire run 234 | -str run strings & volstrings 235 | -mt enables multithreading with volatility 236 | -b run bulk_extractor 237 | -g volatility KDBG location: -g 0x82968c28 238 | - KDBG can be found by running imageinfo on the memory image 239 | - Provides speed boost 240 | - If -g options is used with -p, vshot will not prompt the user for KDBG. 241 | " 242 | } 243 | 244 | hand_args(){ 245 | 246 | #i d p time shm str mt b g 247 | testarray=( 0 0 0 0 0 0 0 0 0 ) 248 | 249 | argarry=( $@ ) 250 | testreg="^\-" 251 | for i in $( seq 0 $(echo ${#argarry[@]}-1|bc) );do 252 | case ${argarry[$i]} in 253 | -h) 254 | usage;exit 255 | ;; 256 | --help) 257 | usage;exit 258 | ;; 259 | -shm) 260 | testarray[5]=1 261 | ;; 262 | -p) 263 | #uses argument after -p for the $volpath profile 264 | pfsel=$(echo $i+1|bc) 265 | if [[ ${argarry[$pfsel]} =~ $testreg ]];then 266 | err 3 267 | else 268 | img_info=${argarry[$pfsel]} 269 | testarray[2]=1 270 | fi 271 | ;; 272 | -i) 273 | imgsel=$(echo $i+1|bc) 274 | if [[ ${argarry[$imgsel]} =~ $testreg ]];then 275 | err 3 276 | else 277 | img_locat=${argarry[$imgsel]} 278 | if [ -f $img_locat ];then 279 | testarray[0]=1 280 | else 281 | err 0 $img_locat 282 | fi 283 | fi 284 | ;; 285 | -d) 286 | #uses argument after -d for the destination folder 287 | dstsel=$(echo $i+1|bc) 288 | if [[ ${argarry[$dstsel]} =~ $testreg ]];then 289 | err 3 290 | else 291 | dst_fldr=${argarry[$dstsel]} 292 | if [ -d $dst_fldr ];then 293 | testarray[1]=1 294 | else 295 | err 1 $dst_fldr 296 | fi 297 | fi 298 | ;; 299 | -time) 300 | testarray[3]=1 301 | ;; 302 | -str) 303 | testarray[4]=1 304 | ;; 305 | -mt) 306 | pcount=$(echo $(cat /proc/cpuinfo | grep processor | wc -l)-1|bc) 307 | if [ $pcount -eq 0 ];then 308 | err 5 309 | else 310 | testarray[6]=1 311 | fi 312 | ;; 313 | -b) testarray[7]=1 314 | ;; 315 | -g) 316 | kgdbchk=$(echo $i+1|bc) 317 | if [[ ${argarry[$kgdbchk]} =~ $testreg ]];then 318 | err 3 319 | else 320 | testarray[8]=1 321 | kdbgset ${argarry[$kgdbchk]} 322 | fi 323 | ;; 324 | *) 325 | ochkn=$(echo $i-1|bc) 326 | optnchk=${argarry[$ochkn]} 327 | if [[ "$optnchk" == "-p" || "$optnchk" == "-d" || "$optnchk" == "-i" || "$optnchk" == "-g" ]];then 328 | : #goodcheck 329 | else 330 | err 2 331 | fi 332 | ;; 333 | esac 334 | done 335 | 336 | #i d p time str shm g 337 | for z in $(seq 0 $(echo ${#testarray[@]}-1|bc));do 338 | case $z in 339 | 0) if [ ${testarray[$z]} -eq 0 ];then get_imglocat;fi #sets up image location variable 340 | ;; 341 | 1) if [ ${testarray[$z]} -eq 0 ];then get_dstfldr;fi #sets up destination folder variable 342 | ;; 343 | 2) if [ ${testarray[$z]} -eq 0 ];then get_imginfo;fi #sets up imageinfo variable 344 | ;; 345 | 3) if [ ${testarray[$z]} -eq 1 ];then showtime=1;fi #flags "date" commands in process_image function 346 | ;; 347 | 4) if [ ${testarray[$z]} -eq 1 ];then runstrings=1;fi #flags strings to run in process_image function 348 | ;; 349 | 5) if [ ${testarray[$z]} -eq 1 ];then chk_shm;exit;fi #if shm option is used, executes processing for shm then exits 350 | ;; 351 | 6) if [ ${testarray[$z]} -eq 1 ];then mthreads=1;fi #sets condition to use multiprocessor logic 352 | ;; 353 | 7) if [ ${testarray[$z]} -eq 1 ];then runbulk=1;fi #sets condition to run bulk_extractor 354 | ;; 355 | 8) if [ ${testarray[$z]} -eq 1 ];then nokgdb=1;fi #sets condition to skip prompt for kdbg 356 | ;; 357 | esac 358 | done 359 | process_image $img_locat $dst_fldr 360 | } 361 | 362 | 363 | #### MAIN #### 364 | 365 | if [ -z "$*" ]; then 366 | get_loc 367 | else 368 | hand_args $@ 369 | fi 370 | --------------------------------------------------------------------------------